avoid redundant use of boolean literals

This commit is contained in:
Axel Kohlmeyer
2022-04-10 20:29:54 -04:00
parent 74f2b67b1a
commit d6f7570d57
22 changed files with 64 additions and 82 deletions

View File

@ -94,13 +94,13 @@ bool AnswerT::init(const int inum, const bool charge, const bool rot,
template <class numtyp, class acctyp>
bool AnswerT::add_fields(const bool charge, const bool rot) {
bool realloc=false;
if (charge && _charge==false) {
if (charge && !_charge) {
_charge=true;
_e_fields++;
_ev_fields++;
realloc=true;
}
if (rot && _rot==false) {
if (rot && !_rot) {
_rot=true;
realloc=true;
}
@ -163,10 +163,8 @@ void AnswerT::copy_answers(const bool eflag, const bool vflag,
#endif
int csize=_ev_fields;
if (!eflag)
csize-=_e_fields;
if (!vflag)
csize-=6;
if (!eflag) csize-=_e_fields;
if (!vflag) csize-=6;
if (csize>0)
engv.update_host(_ev_stride*csize,true);
@ -192,8 +190,7 @@ void AnswerT::copy_answers(const bool eflag, const bool vflag,
template <class numtyp, class acctyp>
double AnswerT::energy_virial(double *eatom, double **vatom,
double *virial) {
if (_eflag==false && _vflag==false)
return 0.0;
if (!_eflag && !_vflag) return 0.0;
double evdwl=0.0;
int vstart=0;
@ -241,11 +238,9 @@ double AnswerT::energy_virial(double *eatom, double **vatom,
template <class numtyp, class acctyp>
double AnswerT::energy_virial(double *eatom, double **vatom,
double *virial, double &ecoul) {
if (_eflag==false && _vflag==false)
return 0.0;
if (!_eflag && !_vflag) return 0.0;
if (_charge==false)
return energy_virial(eatom,vatom,virial);
if (!_charge) return energy_virial(eatom,vatom,virial);
double evdwl=0.0;
int vstart=0, iend=_ev_stride;

View File

@ -107,17 +107,17 @@ bool AtomT::alloc(const int nall) {
gpu_bytes+=x_cast.device.row_bytes()+type_cast.device.row_bytes();
#endif
if (_charge && _host_view==false) {
if (_charge && !_host_view) {
success=success && (q.alloc(_max_atoms,*dev,UCL_WRITE_ONLY,
UCL_READ_ONLY)==UCL_SUCCESS);
gpu_bytes+=q.device.row_bytes();
}
if (_rot && _host_view==false) {
if (_rot && !_host_view) {
success=success && (quat.alloc(_max_atoms*4,*dev,UCL_WRITE_ONLY,
UCL_READ_ONLY)==UCL_SUCCESS);
gpu_bytes+=quat.device.row_bytes();
}
if (_vel && _host_view==false) {
if (_vel && !_host_view) {
success=success && (v.alloc(_max_atoms*4,*dev,UCL_WRITE_ONLY,
UCL_READ_ONLY)==UCL_SUCCESS);
gpu_bytes+=v.device.row_bytes();
@ -161,37 +161,37 @@ bool AtomT::add_fields(const bool charge, const bool rot,
// Ignore host/device transfers?
int gpu_bytes=0;
if (charge && _charge==false) {
if (charge && !_charge) {
_charge=true;
_other=true;
if (_host_view==false) {
if (!_host_view) {
success=success && (q.alloc(_max_atoms,*dev,UCL_WRITE_ONLY,
UCL_READ_ONLY)==UCL_SUCCESS);
gpu_bytes+=q.device.row_bytes();
}
}
if (rot && _rot==false) {
if (rot && !_rot) {
_rot=true;
_other=true;
if (_host_view==false) {
if (!_host_view) {
success=success && (quat.alloc(_max_atoms*4,*dev,UCL_WRITE_ONLY,
UCL_READ_ONLY)==UCL_SUCCESS);
gpu_bytes+=quat.device.row_bytes();
}
}
if (vel && _vel==false) {
if (vel && !_vel) {
_vel=true;
_other=true;
if (_host_view==false) {
if (!_host_view) {
success=success && (v.alloc(_max_atoms*4,*dev,UCL_WRITE_ONLY,
UCL_READ_ONLY)==UCL_SUCCESS);
gpu_bytes+=v.device.row_bytes();
}
}
if (bonds && _bonds==false) {
if (bonds && !_bonds) {
_bonds=true;
if (_bonds && _gpu_nbor>0) {
success=success && (dev_tag.alloc(_max_atoms,*dev,

View File

@ -201,9 +201,9 @@ int DeviceT::init_device(MPI_Comm world, MPI_Comm replica, const int ngpu,
unsigned best_cus = gpu->cus(0);
bool type_match = (gpu->device_type(0) == type);
for (int i = 1; i < gpu->num_devices(); i++) {
if (type_match==true && gpu->device_type(i)!=type)
if (type_match && gpu->device_type(i)!=type)
continue;
if (type_match == false && gpu->device_type(i) == type) {
if (type_match && gpu->device_type(i) == type) {
type_match = true;
best_cus = gpu->cus(i);
best_device = i;
@ -280,7 +280,7 @@ int DeviceT::init_device(MPI_Comm world, MPI_Comm replica, const int ngpu,
MPI_Comm_rank(_comm_gpu,&_gpu_rank);
#if !defined(CUDA_PROXY) && !defined(CUDA_MPS_SUPPORT)
if (_procs_per_gpu>1 && gpu->sharing_supported(my_gpu)==false)
if (_procs_per_gpu>1 && !gpu->sharing_supported(my_gpu))
return -7;
#endif
@ -400,7 +400,7 @@ int DeviceT::set_ocl_params(std::string s_config, const std::string &extra_args)
_ocl_compile_string += " -DCONFIG_ID="+params[0]+
" -DSIMD_SIZE="+params[1]+
" -DMEM_THREADS="+params[2];
if (gpu->has_shuffle_support()==false)
if (!gpu->has_shuffle_support())
_ocl_compile_string+=" -DSHUFFLE_AVAIL=0";
else
_ocl_compile_string+=" -DSHUFFLE_AVAIL="+params[3];
@ -443,7 +443,7 @@ int DeviceT::init(Answer<numtyp,acctyp> &ans, const bool charge,
const bool vel) {
if (!_device_init)
return -1;
if (sizeof(acctyp)==sizeof(double) && gpu->double_precision()==false)
if (sizeof(acctyp)==sizeof(double) && !gpu->double_precision())
return -5;
// Counts of data transfers for timing overhead estimates
@ -480,11 +480,11 @@ int DeviceT::init(Answer<numtyp,acctyp> &ans, const bool charge,
if (vel)
_data_in_estimate++;
} else {
if (atom.charge()==false && charge)
if (!atom.charge() && charge)
_data_in_estimate++;
if (atom.quaternion()==false && rot)
if (!atom.quaternion() && rot)
_data_in_estimate++;
if (atom.velocity()==false && vel)
if (!atom.velocity() && vel)
_data_in_estimate++;
if (!atom.add_fields(charge,rot,gpu_nbor,gpu_nbor>0 && maxspecial,vel))
return -3;
@ -502,7 +502,7 @@ int DeviceT::init(Answer<numtyp,acctyp> &ans, const int nlocal,
const int nall) {
if (!_device_init)
return -1;
if (sizeof(acctyp)==sizeof(double) && gpu->double_precision()==false)
if (sizeof(acctyp)==sizeof(double) && !gpu->double_precision())
return -5;
if (_init_count==0) {

View File

@ -113,7 +113,7 @@ bool Neighbor::init(NeighborShared *shared, const int inum,
if (!success)
return false;
if (_use_packing==false) {
if (!_use_packing) {
#ifndef LAL_USE_OLD_NEIGHBOR
_shared->compile_kernels(devi, gpu_nbor, compile_flags+
" -DMAX_SUBGROUPS_PER_BLOCK="+toa(_block_nbor_build/_simd_size));
@ -153,7 +153,7 @@ void Neighbor::alloc(bool &success) {
int nt=_max_atoms+_max_host;
if (_max_nbors)
_max_nbors = ((_max_nbors-1)/_threads_per_atom+1)*_threads_per_atom;
if (_use_packing==false || _gpu_nbor>0) {
if (!_use_packing || _gpu_nbor>0) {
if (_max_nbors)
success=success &&
(dev_nbor.alloc((_max_nbors+2)*_max_atoms,*dev)==UCL_SUCCESS);
@ -166,7 +166,7 @@ void Neighbor::alloc(bool &success) {
_c_bytes=dev_nbor.row_bytes();
if (_alloc_packed) {
if (_use_packing==false) {
if (!_use_packing) {
dev_packed_begin.clear();
success=success && (dev_packed_begin.alloc(_max_atoms,*dev,
_packed_permissions)==UCL_SUCCESS);
@ -373,7 +373,7 @@ void Neighbor::get_host(const int inum, int *ilist, int *numj,
time_nbor.stop();
if (_use_packing==false) {
if (!_use_packing) {
time_kernel.start();
int GX=static_cast<int>(ceil(static_cast<double>(inum)*_threads_per_atom/
block_size));
@ -450,7 +450,7 @@ void Neighbor::get_host3(const int inum, const int nlist, int *ilist, int *numj,
}
time_nbor.stop();
if (_use_packing==false) {
if (!_use_packing) {
time_kernel.start();
int GX=static_cast<int>(ceil(static_cast<double>(inum)*_threads_per_atom/
block_size));

View File

@ -303,7 +303,7 @@ int PPPMT::spread(const int ago, const int nlocal, const int nall,
double *host_q, double *boxlo,
const double delxinv, const double delyinv,
const double delzinv) {
if (_precompute_done==false) {
if (!_precompute_done) {
atom->acc_timers();
_precompute(ago,nlocal,nall,host_x,host_type,success,host_q,boxlo,delxinv,
delyinv,delzinv);
@ -359,7 +359,7 @@ void PPPMT::interp(const grdtyp qqrd2e_scale) {
time_interp.stop();
ans->copy_answers(false,false,false,false,0);
if (_kspace_split==false)
if (!_kspace_split)
device->add_ans_object(ans);
}

View File

@ -101,7 +101,7 @@ float * pppm_gpu_init_f(const int nlocal, const int nall, FILE *screen,
float *b=pppm_gpu_init(PPPMF,nlocal,nall,screen,order,nxlo_out,nylo_out,
nzlo_out,nxhi_out,nyhi_out,nzhi_out,rho_coeff,vd_brick,
slab_volfactor,nx_pppm,ny_pppm,nz_pppm,split,success);
if (split==false && respa==false)
if (!split && !respa)
PPPMF.device->set_single_precompute(&PPPMF);
return b;
}
@ -146,7 +146,7 @@ double * pppm_gpu_init_d(const int nlocal, const int nall, FILE *screen,
nzlo_out,nxhi_out,nyhi_out,nzhi_out,rho_coeff,
vd_brick,slab_volfactor,nx_pppm,ny_pppm,nz_pppm,
split,success);
if (split==false && respa==false)
if (!split && !respa)
PPPMD.device->set_double_precompute(&PPPMD);
return b;
}

View File

@ -69,7 +69,7 @@ int YukawaColloidT::init(const int ntypes,
_max_rad_size=static_cast<int>(static_cast<double>(ef_nall)*1.10);
if (_shared_view==false)
if (!_shared_view)
c_rad.alloc(_max_rad_size,*(this->ucl_device),UCL_WRITE_ONLY,UCL_READ_ONLY);
rad_tex.get_texture(*(this->pair_program),"rad_tex");
@ -157,7 +157,7 @@ void YukawaColloidT::compute(const int f_ago, const int inum_full,
if (nall>_max_rad_size) {
_max_rad_size=static_cast<int>(static_cast<double>(nall)*1.10);
if (_shared_view==false) {
if (!_shared_view) {
c_rad.resize(_max_rad_size);
rad_tex.bind_float(c_rad,1);
}
@ -229,7 +229,7 @@ int** YukawaColloidT::compute(const int ago, const int inum_full,
if (nall>_max_rad_size) {
_max_rad_size=static_cast<int>(static_cast<double>(nall)*1.10);
if (_shared_view==false) {
if (!_shared_view) {
c_rad.resize(_max_rad_size);
rad_tex.bind_float(c_rad,1);
}