avoid redundant use of boolean literals
This commit is contained in:
@ -94,13 +94,13 @@ bool AnswerT::init(const int inum, const bool charge, const bool rot,
|
|||||||
template <class numtyp, class acctyp>
|
template <class numtyp, class acctyp>
|
||||||
bool AnswerT::add_fields(const bool charge, const bool rot) {
|
bool AnswerT::add_fields(const bool charge, const bool rot) {
|
||||||
bool realloc=false;
|
bool realloc=false;
|
||||||
if (charge && _charge==false) {
|
if (charge && !_charge) {
|
||||||
_charge=true;
|
_charge=true;
|
||||||
_e_fields++;
|
_e_fields++;
|
||||||
_ev_fields++;
|
_ev_fields++;
|
||||||
realloc=true;
|
realloc=true;
|
||||||
}
|
}
|
||||||
if (rot && _rot==false) {
|
if (rot && !_rot) {
|
||||||
_rot=true;
|
_rot=true;
|
||||||
realloc=true;
|
realloc=true;
|
||||||
}
|
}
|
||||||
@ -163,10 +163,8 @@ void AnswerT::copy_answers(const bool eflag, const bool vflag,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
int csize=_ev_fields;
|
int csize=_ev_fields;
|
||||||
if (!eflag)
|
if (!eflag) csize-=_e_fields;
|
||||||
csize-=_e_fields;
|
if (!vflag) csize-=6;
|
||||||
if (!vflag)
|
|
||||||
csize-=6;
|
|
||||||
|
|
||||||
if (csize>0)
|
if (csize>0)
|
||||||
engv.update_host(_ev_stride*csize,true);
|
engv.update_host(_ev_stride*csize,true);
|
||||||
@ -192,8 +190,7 @@ void AnswerT::copy_answers(const bool eflag, const bool vflag,
|
|||||||
template <class numtyp, class acctyp>
|
template <class numtyp, class acctyp>
|
||||||
double AnswerT::energy_virial(double *eatom, double **vatom,
|
double AnswerT::energy_virial(double *eatom, double **vatom,
|
||||||
double *virial) {
|
double *virial) {
|
||||||
if (_eflag==false && _vflag==false)
|
if (!_eflag && !_vflag) return 0.0;
|
||||||
return 0.0;
|
|
||||||
|
|
||||||
double evdwl=0.0;
|
double evdwl=0.0;
|
||||||
int vstart=0;
|
int vstart=0;
|
||||||
@ -241,11 +238,9 @@ double AnswerT::energy_virial(double *eatom, double **vatom,
|
|||||||
template <class numtyp, class acctyp>
|
template <class numtyp, class acctyp>
|
||||||
double AnswerT::energy_virial(double *eatom, double **vatom,
|
double AnswerT::energy_virial(double *eatom, double **vatom,
|
||||||
double *virial, double &ecoul) {
|
double *virial, double &ecoul) {
|
||||||
if (_eflag==false && _vflag==false)
|
if (!_eflag && !_vflag) return 0.0;
|
||||||
return 0.0;
|
|
||||||
|
|
||||||
if (_charge==false)
|
if (!_charge) return energy_virial(eatom,vatom,virial);
|
||||||
return energy_virial(eatom,vatom,virial);
|
|
||||||
|
|
||||||
double evdwl=0.0;
|
double evdwl=0.0;
|
||||||
int vstart=0, iend=_ev_stride;
|
int vstart=0, iend=_ev_stride;
|
||||||
|
|||||||
@ -107,17 +107,17 @@ bool AtomT::alloc(const int nall) {
|
|||||||
gpu_bytes+=x_cast.device.row_bytes()+type_cast.device.row_bytes();
|
gpu_bytes+=x_cast.device.row_bytes()+type_cast.device.row_bytes();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (_charge && _host_view==false) {
|
if (_charge && !_host_view) {
|
||||||
success=success && (q.alloc(_max_atoms,*dev,UCL_WRITE_ONLY,
|
success=success && (q.alloc(_max_atoms,*dev,UCL_WRITE_ONLY,
|
||||||
UCL_READ_ONLY)==UCL_SUCCESS);
|
UCL_READ_ONLY)==UCL_SUCCESS);
|
||||||
gpu_bytes+=q.device.row_bytes();
|
gpu_bytes+=q.device.row_bytes();
|
||||||
}
|
}
|
||||||
if (_rot && _host_view==false) {
|
if (_rot && !_host_view) {
|
||||||
success=success && (quat.alloc(_max_atoms*4,*dev,UCL_WRITE_ONLY,
|
success=success && (quat.alloc(_max_atoms*4,*dev,UCL_WRITE_ONLY,
|
||||||
UCL_READ_ONLY)==UCL_SUCCESS);
|
UCL_READ_ONLY)==UCL_SUCCESS);
|
||||||
gpu_bytes+=quat.device.row_bytes();
|
gpu_bytes+=quat.device.row_bytes();
|
||||||
}
|
}
|
||||||
if (_vel && _host_view==false) {
|
if (_vel && !_host_view) {
|
||||||
success=success && (v.alloc(_max_atoms*4,*dev,UCL_WRITE_ONLY,
|
success=success && (v.alloc(_max_atoms*4,*dev,UCL_WRITE_ONLY,
|
||||||
UCL_READ_ONLY)==UCL_SUCCESS);
|
UCL_READ_ONLY)==UCL_SUCCESS);
|
||||||
gpu_bytes+=v.device.row_bytes();
|
gpu_bytes+=v.device.row_bytes();
|
||||||
@ -161,37 +161,37 @@ bool AtomT::add_fields(const bool charge, const bool rot,
|
|||||||
// Ignore host/device transfers?
|
// Ignore host/device transfers?
|
||||||
int gpu_bytes=0;
|
int gpu_bytes=0;
|
||||||
|
|
||||||
if (charge && _charge==false) {
|
if (charge && !_charge) {
|
||||||
_charge=true;
|
_charge=true;
|
||||||
_other=true;
|
_other=true;
|
||||||
if (_host_view==false) {
|
if (!_host_view) {
|
||||||
success=success && (q.alloc(_max_atoms,*dev,UCL_WRITE_ONLY,
|
success=success && (q.alloc(_max_atoms,*dev,UCL_WRITE_ONLY,
|
||||||
UCL_READ_ONLY)==UCL_SUCCESS);
|
UCL_READ_ONLY)==UCL_SUCCESS);
|
||||||
gpu_bytes+=q.device.row_bytes();
|
gpu_bytes+=q.device.row_bytes();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rot && _rot==false) {
|
if (rot && !_rot) {
|
||||||
_rot=true;
|
_rot=true;
|
||||||
_other=true;
|
_other=true;
|
||||||
if (_host_view==false) {
|
if (!_host_view) {
|
||||||
success=success && (quat.alloc(_max_atoms*4,*dev,UCL_WRITE_ONLY,
|
success=success && (quat.alloc(_max_atoms*4,*dev,UCL_WRITE_ONLY,
|
||||||
UCL_READ_ONLY)==UCL_SUCCESS);
|
UCL_READ_ONLY)==UCL_SUCCESS);
|
||||||
gpu_bytes+=quat.device.row_bytes();
|
gpu_bytes+=quat.device.row_bytes();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vel && _vel==false) {
|
if (vel && !_vel) {
|
||||||
_vel=true;
|
_vel=true;
|
||||||
_other=true;
|
_other=true;
|
||||||
if (_host_view==false) {
|
if (!_host_view) {
|
||||||
success=success && (v.alloc(_max_atoms*4,*dev,UCL_WRITE_ONLY,
|
success=success && (v.alloc(_max_atoms*4,*dev,UCL_WRITE_ONLY,
|
||||||
UCL_READ_ONLY)==UCL_SUCCESS);
|
UCL_READ_ONLY)==UCL_SUCCESS);
|
||||||
gpu_bytes+=v.device.row_bytes();
|
gpu_bytes+=v.device.row_bytes();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (bonds && _bonds==false) {
|
if (bonds && !_bonds) {
|
||||||
_bonds=true;
|
_bonds=true;
|
||||||
if (_bonds && _gpu_nbor>0) {
|
if (_bonds && _gpu_nbor>0) {
|
||||||
success=success && (dev_tag.alloc(_max_atoms,*dev,
|
success=success && (dev_tag.alloc(_max_atoms,*dev,
|
||||||
|
|||||||
@ -201,9 +201,9 @@ int DeviceT::init_device(MPI_Comm world, MPI_Comm replica, const int ngpu,
|
|||||||
unsigned best_cus = gpu->cus(0);
|
unsigned best_cus = gpu->cus(0);
|
||||||
bool type_match = (gpu->device_type(0) == type);
|
bool type_match = (gpu->device_type(0) == type);
|
||||||
for (int i = 1; i < gpu->num_devices(); i++) {
|
for (int i = 1; i < gpu->num_devices(); i++) {
|
||||||
if (type_match==true && gpu->device_type(i)!=type)
|
if (type_match && gpu->device_type(i)!=type)
|
||||||
continue;
|
continue;
|
||||||
if (type_match == false && gpu->device_type(i) == type) {
|
if (type_match && gpu->device_type(i) == type) {
|
||||||
type_match = true;
|
type_match = true;
|
||||||
best_cus = gpu->cus(i);
|
best_cus = gpu->cus(i);
|
||||||
best_device = i;
|
best_device = i;
|
||||||
@ -280,7 +280,7 @@ int DeviceT::init_device(MPI_Comm world, MPI_Comm replica, const int ngpu,
|
|||||||
MPI_Comm_rank(_comm_gpu,&_gpu_rank);
|
MPI_Comm_rank(_comm_gpu,&_gpu_rank);
|
||||||
|
|
||||||
#if !defined(CUDA_PROXY) && !defined(CUDA_MPS_SUPPORT)
|
#if !defined(CUDA_PROXY) && !defined(CUDA_MPS_SUPPORT)
|
||||||
if (_procs_per_gpu>1 && gpu->sharing_supported(my_gpu)==false)
|
if (_procs_per_gpu>1 && !gpu->sharing_supported(my_gpu))
|
||||||
return -7;
|
return -7;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -400,7 +400,7 @@ int DeviceT::set_ocl_params(std::string s_config, const std::string &extra_args)
|
|||||||
_ocl_compile_string += " -DCONFIG_ID="+params[0]+
|
_ocl_compile_string += " -DCONFIG_ID="+params[0]+
|
||||||
" -DSIMD_SIZE="+params[1]+
|
" -DSIMD_SIZE="+params[1]+
|
||||||
" -DMEM_THREADS="+params[2];
|
" -DMEM_THREADS="+params[2];
|
||||||
if (gpu->has_shuffle_support()==false)
|
if (!gpu->has_shuffle_support())
|
||||||
_ocl_compile_string+=" -DSHUFFLE_AVAIL=0";
|
_ocl_compile_string+=" -DSHUFFLE_AVAIL=0";
|
||||||
else
|
else
|
||||||
_ocl_compile_string+=" -DSHUFFLE_AVAIL="+params[3];
|
_ocl_compile_string+=" -DSHUFFLE_AVAIL="+params[3];
|
||||||
@ -443,7 +443,7 @@ int DeviceT::init(Answer<numtyp,acctyp> &ans, const bool charge,
|
|||||||
const bool vel) {
|
const bool vel) {
|
||||||
if (!_device_init)
|
if (!_device_init)
|
||||||
return -1;
|
return -1;
|
||||||
if (sizeof(acctyp)==sizeof(double) && gpu->double_precision()==false)
|
if (sizeof(acctyp)==sizeof(double) && !gpu->double_precision())
|
||||||
return -5;
|
return -5;
|
||||||
|
|
||||||
// Counts of data transfers for timing overhead estimates
|
// Counts of data transfers for timing overhead estimates
|
||||||
@ -480,11 +480,11 @@ int DeviceT::init(Answer<numtyp,acctyp> &ans, const bool charge,
|
|||||||
if (vel)
|
if (vel)
|
||||||
_data_in_estimate++;
|
_data_in_estimate++;
|
||||||
} else {
|
} else {
|
||||||
if (atom.charge()==false && charge)
|
if (!atom.charge() && charge)
|
||||||
_data_in_estimate++;
|
_data_in_estimate++;
|
||||||
if (atom.quaternion()==false && rot)
|
if (!atom.quaternion() && rot)
|
||||||
_data_in_estimate++;
|
_data_in_estimate++;
|
||||||
if (atom.velocity()==false && vel)
|
if (!atom.velocity() && vel)
|
||||||
_data_in_estimate++;
|
_data_in_estimate++;
|
||||||
if (!atom.add_fields(charge,rot,gpu_nbor,gpu_nbor>0 && maxspecial,vel))
|
if (!atom.add_fields(charge,rot,gpu_nbor,gpu_nbor>0 && maxspecial,vel))
|
||||||
return -3;
|
return -3;
|
||||||
@ -502,7 +502,7 @@ int DeviceT::init(Answer<numtyp,acctyp> &ans, const int nlocal,
|
|||||||
const int nall) {
|
const int nall) {
|
||||||
if (!_device_init)
|
if (!_device_init)
|
||||||
return -1;
|
return -1;
|
||||||
if (sizeof(acctyp)==sizeof(double) && gpu->double_precision()==false)
|
if (sizeof(acctyp)==sizeof(double) && !gpu->double_precision())
|
||||||
return -5;
|
return -5;
|
||||||
|
|
||||||
if (_init_count==0) {
|
if (_init_count==0) {
|
||||||
|
|||||||
@ -113,7 +113,7 @@ bool Neighbor::init(NeighborShared *shared, const int inum,
|
|||||||
if (!success)
|
if (!success)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (_use_packing==false) {
|
if (!_use_packing) {
|
||||||
#ifndef LAL_USE_OLD_NEIGHBOR
|
#ifndef LAL_USE_OLD_NEIGHBOR
|
||||||
_shared->compile_kernels(devi, gpu_nbor, compile_flags+
|
_shared->compile_kernels(devi, gpu_nbor, compile_flags+
|
||||||
" -DMAX_SUBGROUPS_PER_BLOCK="+toa(_block_nbor_build/_simd_size));
|
" -DMAX_SUBGROUPS_PER_BLOCK="+toa(_block_nbor_build/_simd_size));
|
||||||
@ -153,7 +153,7 @@ void Neighbor::alloc(bool &success) {
|
|||||||
int nt=_max_atoms+_max_host;
|
int nt=_max_atoms+_max_host;
|
||||||
if (_max_nbors)
|
if (_max_nbors)
|
||||||
_max_nbors = ((_max_nbors-1)/_threads_per_atom+1)*_threads_per_atom;
|
_max_nbors = ((_max_nbors-1)/_threads_per_atom+1)*_threads_per_atom;
|
||||||
if (_use_packing==false || _gpu_nbor>0) {
|
if (!_use_packing || _gpu_nbor>0) {
|
||||||
if (_max_nbors)
|
if (_max_nbors)
|
||||||
success=success &&
|
success=success &&
|
||||||
(dev_nbor.alloc((_max_nbors+2)*_max_atoms,*dev)==UCL_SUCCESS);
|
(dev_nbor.alloc((_max_nbors+2)*_max_atoms,*dev)==UCL_SUCCESS);
|
||||||
@ -166,7 +166,7 @@ void Neighbor::alloc(bool &success) {
|
|||||||
|
|
||||||
_c_bytes=dev_nbor.row_bytes();
|
_c_bytes=dev_nbor.row_bytes();
|
||||||
if (_alloc_packed) {
|
if (_alloc_packed) {
|
||||||
if (_use_packing==false) {
|
if (!_use_packing) {
|
||||||
dev_packed_begin.clear();
|
dev_packed_begin.clear();
|
||||||
success=success && (dev_packed_begin.alloc(_max_atoms,*dev,
|
success=success && (dev_packed_begin.alloc(_max_atoms,*dev,
|
||||||
_packed_permissions)==UCL_SUCCESS);
|
_packed_permissions)==UCL_SUCCESS);
|
||||||
@ -373,7 +373,7 @@ void Neighbor::get_host(const int inum, int *ilist, int *numj,
|
|||||||
|
|
||||||
time_nbor.stop();
|
time_nbor.stop();
|
||||||
|
|
||||||
if (_use_packing==false) {
|
if (!_use_packing) {
|
||||||
time_kernel.start();
|
time_kernel.start();
|
||||||
int GX=static_cast<int>(ceil(static_cast<double>(inum)*_threads_per_atom/
|
int GX=static_cast<int>(ceil(static_cast<double>(inum)*_threads_per_atom/
|
||||||
block_size));
|
block_size));
|
||||||
@ -450,7 +450,7 @@ void Neighbor::get_host3(const int inum, const int nlist, int *ilist, int *numj,
|
|||||||
}
|
}
|
||||||
time_nbor.stop();
|
time_nbor.stop();
|
||||||
|
|
||||||
if (_use_packing==false) {
|
if (!_use_packing) {
|
||||||
time_kernel.start();
|
time_kernel.start();
|
||||||
int GX=static_cast<int>(ceil(static_cast<double>(inum)*_threads_per_atom/
|
int GX=static_cast<int>(ceil(static_cast<double>(inum)*_threads_per_atom/
|
||||||
block_size));
|
block_size));
|
||||||
|
|||||||
@ -303,7 +303,7 @@ int PPPMT::spread(const int ago, const int nlocal, const int nall,
|
|||||||
double *host_q, double *boxlo,
|
double *host_q, double *boxlo,
|
||||||
const double delxinv, const double delyinv,
|
const double delxinv, const double delyinv,
|
||||||
const double delzinv) {
|
const double delzinv) {
|
||||||
if (_precompute_done==false) {
|
if (!_precompute_done) {
|
||||||
atom->acc_timers();
|
atom->acc_timers();
|
||||||
_precompute(ago,nlocal,nall,host_x,host_type,success,host_q,boxlo,delxinv,
|
_precompute(ago,nlocal,nall,host_x,host_type,success,host_q,boxlo,delxinv,
|
||||||
delyinv,delzinv);
|
delyinv,delzinv);
|
||||||
@ -359,7 +359,7 @@ void PPPMT::interp(const grdtyp qqrd2e_scale) {
|
|||||||
time_interp.stop();
|
time_interp.stop();
|
||||||
|
|
||||||
ans->copy_answers(false,false,false,false,0);
|
ans->copy_answers(false,false,false,false,0);
|
||||||
if (_kspace_split==false)
|
if (!_kspace_split)
|
||||||
device->add_ans_object(ans);
|
device->add_ans_object(ans);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -101,7 +101,7 @@ float * pppm_gpu_init_f(const int nlocal, const int nall, FILE *screen,
|
|||||||
float *b=pppm_gpu_init(PPPMF,nlocal,nall,screen,order,nxlo_out,nylo_out,
|
float *b=pppm_gpu_init(PPPMF,nlocal,nall,screen,order,nxlo_out,nylo_out,
|
||||||
nzlo_out,nxhi_out,nyhi_out,nzhi_out,rho_coeff,vd_brick,
|
nzlo_out,nxhi_out,nyhi_out,nzhi_out,rho_coeff,vd_brick,
|
||||||
slab_volfactor,nx_pppm,ny_pppm,nz_pppm,split,success);
|
slab_volfactor,nx_pppm,ny_pppm,nz_pppm,split,success);
|
||||||
if (split==false && respa==false)
|
if (!split && !respa)
|
||||||
PPPMF.device->set_single_precompute(&PPPMF);
|
PPPMF.device->set_single_precompute(&PPPMF);
|
||||||
return b;
|
return b;
|
||||||
}
|
}
|
||||||
@ -146,7 +146,7 @@ double * pppm_gpu_init_d(const int nlocal, const int nall, FILE *screen,
|
|||||||
nzlo_out,nxhi_out,nyhi_out,nzhi_out,rho_coeff,
|
nzlo_out,nxhi_out,nyhi_out,nzhi_out,rho_coeff,
|
||||||
vd_brick,slab_volfactor,nx_pppm,ny_pppm,nz_pppm,
|
vd_brick,slab_volfactor,nx_pppm,ny_pppm,nz_pppm,
|
||||||
split,success);
|
split,success);
|
||||||
if (split==false && respa==false)
|
if (!split && !respa)
|
||||||
PPPMD.device->set_double_precompute(&PPPMD);
|
PPPMD.device->set_double_precompute(&PPPMD);
|
||||||
return b;
|
return b;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -69,7 +69,7 @@ int YukawaColloidT::init(const int ntypes,
|
|||||||
|
|
||||||
_max_rad_size=static_cast<int>(static_cast<double>(ef_nall)*1.10);
|
_max_rad_size=static_cast<int>(static_cast<double>(ef_nall)*1.10);
|
||||||
|
|
||||||
if (_shared_view==false)
|
if (!_shared_view)
|
||||||
c_rad.alloc(_max_rad_size,*(this->ucl_device),UCL_WRITE_ONLY,UCL_READ_ONLY);
|
c_rad.alloc(_max_rad_size,*(this->ucl_device),UCL_WRITE_ONLY,UCL_READ_ONLY);
|
||||||
|
|
||||||
rad_tex.get_texture(*(this->pair_program),"rad_tex");
|
rad_tex.get_texture(*(this->pair_program),"rad_tex");
|
||||||
@ -157,7 +157,7 @@ void YukawaColloidT::compute(const int f_ago, const int inum_full,
|
|||||||
|
|
||||||
if (nall>_max_rad_size) {
|
if (nall>_max_rad_size) {
|
||||||
_max_rad_size=static_cast<int>(static_cast<double>(nall)*1.10);
|
_max_rad_size=static_cast<int>(static_cast<double>(nall)*1.10);
|
||||||
if (_shared_view==false) {
|
if (!_shared_view) {
|
||||||
c_rad.resize(_max_rad_size);
|
c_rad.resize(_max_rad_size);
|
||||||
rad_tex.bind_float(c_rad,1);
|
rad_tex.bind_float(c_rad,1);
|
||||||
}
|
}
|
||||||
@ -229,7 +229,7 @@ int** YukawaColloidT::compute(const int ago, const int inum_full,
|
|||||||
|
|
||||||
if (nall>_max_rad_size) {
|
if (nall>_max_rad_size) {
|
||||||
_max_rad_size=static_cast<int>(static_cast<double>(nall)*1.10);
|
_max_rad_size=static_cast<int>(static_cast<double>(nall)*1.10);
|
||||||
if (_shared_view==false) {
|
if (!_shared_view) {
|
||||||
c_rad.resize(_max_rad_size);
|
c_rad.resize(_max_rad_size);
|
||||||
rad_tex.bind_float(c_rad,1);
|
rad_tex.bind_float(c_rad,1);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -95,10 +95,7 @@ struct cmp_x{
|
|||||||
else if (d>tol)
|
else if (d>tol)
|
||||||
return false;
|
return false;
|
||||||
d=xx[left.second][2]-xx[right.second][2];
|
d=xx[left.second][2]-xx[right.second][2];
|
||||||
if (d<-tol)
|
return d < -tol;
|
||||||
return true;
|
|
||||||
else
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
return left.first<right.first;
|
return left.first<right.first;
|
||||||
|
|||||||
@ -212,7 +212,7 @@ double PairEAMAlloyGPU::single(int i, int j, int itype, int jtype, double rsq,
|
|||||||
z2 = ((coeff[3] * p + coeff[4]) * p + coeff[5]) * p + coeff[6];
|
z2 = ((coeff[3] * p + coeff[4]) * p + coeff[5]) * p + coeff[6];
|
||||||
|
|
||||||
double fp_i, fp_j;
|
double fp_i, fp_j;
|
||||||
if (fp_single == false) {
|
if (!fp_single) {
|
||||||
fp_i = ((double *) fp_pinned)[i];
|
fp_i = ((double *) fp_pinned)[i];
|
||||||
fp_j = ((double *) fp_pinned)[j];
|
fp_j = ((double *) fp_pinned)[j];
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@ -212,7 +212,7 @@ double PairEAMFSGPU::single(int i, int j, int itype, int jtype, double rsq,
|
|||||||
z2 = ((coeff[3] * p + coeff[4]) * p + coeff[5]) * p + coeff[6];
|
z2 = ((coeff[3] * p + coeff[4]) * p + coeff[5]) * p + coeff[6];
|
||||||
|
|
||||||
double fp_i, fp_j;
|
double fp_i, fp_j;
|
||||||
if (fp_single == false) {
|
if (!fp_single) {
|
||||||
fp_i = ((double *) fp_pinned)[i];
|
fp_i = ((double *) fp_pinned)[i];
|
||||||
fp_j = ((double *) fp_pinned)[j];
|
fp_j = ((double *) fp_pinned)[j];
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@ -209,7 +209,7 @@ double PairEAMGPU::single(int i, int j, int itype, int jtype, double rsq, double
|
|||||||
z2 = ((coeff[3] * p + coeff[4]) * p + coeff[5]) * p + coeff[6];
|
z2 = ((coeff[3] * p + coeff[4]) * p + coeff[5]) * p + coeff[6];
|
||||||
|
|
||||||
double fp_i, fp_j;
|
double fp_i, fp_j;
|
||||||
if (fp_single == false) {
|
if (!fp_single) {
|
||||||
fp_i = ((double *) fp_pinned)[i];
|
fp_i = ((double *) fp_pinned)[i];
|
||||||
fp_j = ((double *) fp_pinned)[j];
|
fp_j = ((double *) fp_pinned)[j];
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@ -188,7 +188,7 @@ void PairGranular::compute(int eflag, int vflag)
|
|||||||
double *history,*allhistory,**firsthistory;
|
double *history,*allhistory,**firsthistory;
|
||||||
|
|
||||||
bool touchflag = false;
|
bool touchflag = false;
|
||||||
const bool historyupdate = (update->setupflag) ? false : true;
|
const bool historyupdate = update->setupflag != 0;
|
||||||
|
|
||||||
ev_init(eflag,vflag);
|
ev_init(eflag,vflag);
|
||||||
|
|
||||||
|
|||||||
@ -181,20 +181,17 @@ void KimParam::command(int narg, char **arg)
|
|||||||
|
|
||||||
std::string atom_type_list;
|
std::string atom_type_list;
|
||||||
|
|
||||||
bool isPairStyleAssigned = force->pair ? true : false;
|
if (force->pair) {
|
||||||
if (isPairStyleAssigned) {
|
|
||||||
Pair *pair = force->pair_match("kim", 1, 0);
|
Pair *pair = force->pair_match("kim", 1, 0);
|
||||||
if (pair) {
|
if (pair) {
|
||||||
auto pairKIM = reinterpret_cast<PairKIM *>(pair);
|
auto pairKIM = reinterpret_cast<PairKIM *>(pair);
|
||||||
|
|
||||||
pkim = pairKIM->get_kim_model();
|
pkim = pairKIM->get_kim_model();
|
||||||
if (!pkim)
|
if (!pkim) error->all(FLERR, "Unable to get the KIM Portable Model");
|
||||||
error->all(FLERR, "Unable to get the KIM Portable Model");
|
|
||||||
|
|
||||||
if (kim_param_get_set == "set") {
|
if (kim_param_get_set == "set") {
|
||||||
atom_type_list = pairKIM->get_atom_type_list();
|
atom_type_list = pairKIM->get_atom_type_list();
|
||||||
if (atom_type_list.empty())
|
if (atom_type_list.empty()) error->all(FLERR, "The requested atom type list is empty");
|
||||||
error->all(FLERR, "The requested atom type list is empty");
|
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
error->all(FLERR, "Pair style is defined, but there is "
|
error->all(FLERR, "Pair style is defined, but there is "
|
||||||
|
|||||||
@ -1057,18 +1057,15 @@ int PairKIM::check_for_routine_compatibility()
|
|||||||
pkim, modelRoutineName, &present, &required);
|
pkim, modelRoutineName, &present, &required);
|
||||||
if (error) return true;
|
if (error) return true;
|
||||||
|
|
||||||
if ((present == true) && (required == true)) {
|
if (present && required) {
|
||||||
if (!(KIM_ModelRoutineName_Equal(modelRoutineName,
|
if (!(KIM_ModelRoutineName_Equal(modelRoutineName, KIM_MODEL_ROUTINE_NAME_Create)
|
||||||
KIM_MODEL_ROUTINE_NAME_Create)
|
|| KIM_ModelRoutineName_Equal(modelRoutineName,
|
||||||
|| KIM_ModelRoutineName_Equal(
|
|
||||||
modelRoutineName,
|
|
||||||
KIM_MODEL_ROUTINE_NAME_ComputeArgumentsCreate)
|
KIM_MODEL_ROUTINE_NAME_ComputeArgumentsCreate)
|
||||||
|| KIM_ModelRoutineName_Equal(modelRoutineName,
|
|| KIM_ModelRoutineName_Equal(modelRoutineName,
|
||||||
KIM_MODEL_ROUTINE_NAME_Compute)
|
KIM_MODEL_ROUTINE_NAME_Compute)
|
||||||
|| KIM_ModelRoutineName_Equal(modelRoutineName,
|
|| KIM_ModelRoutineName_Equal(modelRoutineName,
|
||||||
KIM_MODEL_ROUTINE_NAME_Refresh)
|
KIM_MODEL_ROUTINE_NAME_Refresh)
|
||||||
|| KIM_ModelRoutineName_Equal(
|
|| KIM_ModelRoutineName_Equal(modelRoutineName,
|
||||||
modelRoutineName,
|
|
||||||
KIM_MODEL_ROUTINE_NAME_ComputeArgumentsDestroy)
|
KIM_MODEL_ROUTINE_NAME_ComputeArgumentsDestroy)
|
||||||
|| KIM_ModelRoutineName_Equal(modelRoutineName,
|
|| KIM_ModelRoutineName_Equal(modelRoutineName,
|
||||||
KIM_MODEL_ROUTINE_NAME_Destroy))) {
|
KIM_MODEL_ROUTINE_NAME_Destroy))) {
|
||||||
|
|||||||
@ -198,10 +198,8 @@ void FixOMP::init()
|
|||||||
&& !utils::strmatch(update->integrate_style,"^respa/omp"))
|
&& !utils::strmatch(update->integrate_style,"^respa/omp"))
|
||||||
error->all(FLERR,"Must use respa/omp for r-RESPA with /omp styles");
|
error->all(FLERR,"Must use respa/omp for r-RESPA with /omp styles");
|
||||||
|
|
||||||
if (force->pair && force->pair->compute_flag) _pair_compute_flag = true;
|
_pair_compute_flag = force->pair && force->pair->compute_flag;
|
||||||
else _pair_compute_flag = false;
|
_kspace_compute_flag = force->kspace && force->kspace->compute_flag;
|
||||||
if (force->kspace && force->kspace->compute_flag) _kspace_compute_flag = true;
|
|
||||||
else _kspace_compute_flag = false;
|
|
||||||
|
|
||||||
int check_hybrid, kspace_split;
|
int check_hybrid, kspace_split;
|
||||||
last_pair_hybrid = nullptr;
|
last_pair_hybrid = nullptr;
|
||||||
|
|||||||
@ -427,8 +427,7 @@ void DynamicalMatrix::displace_atom(int local_idx, int direction, int magnitude)
|
|||||||
void DynamicalMatrix::update_force()
|
void DynamicalMatrix::update_force()
|
||||||
{
|
{
|
||||||
neighbor->ago = 0;
|
neighbor->ago = 0;
|
||||||
if ((modify->get_fix_by_id("package_intel")) ? true : false)
|
if (modify->get_fix_by_id("package_intel")) neighbor->decide();
|
||||||
neighbor->decide();
|
|
||||||
force_clear();
|
force_clear();
|
||||||
int n_pre_force = modify->n_pre_force;
|
int n_pre_force = modify->n_pre_force;
|
||||||
int n_pre_reverse = modify->n_pre_reverse;
|
int n_pre_reverse = modify->n_pre_reverse;
|
||||||
|
|||||||
@ -485,8 +485,7 @@ void ThirdOrder::displace_atom(int local_idx, int direction, int magnitude)
|
|||||||
void ThirdOrder::update_force()
|
void ThirdOrder::update_force()
|
||||||
{
|
{
|
||||||
neighbor->ago = 0;
|
neighbor->ago = 0;
|
||||||
if ((modify->get_fix_by_id("package_intel")) ? true : false)
|
if (modify->get_fix_by_id("package_intel")) neighbor->decide();
|
||||||
neighbor->decide();
|
|
||||||
force_clear();
|
force_clear();
|
||||||
int n_post_force = modify->n_post_force;
|
int n_post_force = modify->n_post_force;
|
||||||
int n_pre_force = modify->n_pre_force;
|
int n_pre_force = modify->n_pre_force;
|
||||||
|
|||||||
@ -2387,7 +2387,7 @@ void AtomVec::setup_fields()
|
|||||||
else threads = nullptr;
|
else threads = nullptr;
|
||||||
for (int i = 0; i < ngrow; i++) {
|
for (int i = 0; i < ngrow; i++) {
|
||||||
Atom::PerAtom *field = &atom->peratom[mgrow.index[i]];
|
Atom::PerAtom *field = &atom->peratom[mgrow.index[i]];
|
||||||
threads[i] = (field->threadflag) ? true : false;
|
threads[i] = field->threadflag == 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// set style-specific sizes
|
// set style-specific sizes
|
||||||
|
|||||||
@ -105,7 +105,7 @@ void AtomVecBody::process_args(int narg, char **arg)
|
|||||||
|
|
||||||
if (narg < 1) error->all(FLERR,"Invalid atom_style body command");
|
if (narg < 1) error->all(FLERR,"Invalid atom_style body command");
|
||||||
|
|
||||||
if (false) {
|
if (false) { // NOLINT
|
||||||
bptr = nullptr;
|
bptr = nullptr;
|
||||||
|
|
||||||
#define BODY_CLASS
|
#define BODY_CLASS
|
||||||
|
|||||||
@ -785,13 +785,13 @@ bool Info::is_active(const char *category, const char *name)
|
|||||||
|
|
||||||
if (strcmp(category,"package") == 0) {
|
if (strcmp(category,"package") == 0) {
|
||||||
if (strcmp(name,"gpu") == 0) {
|
if (strcmp(name,"gpu") == 0) {
|
||||||
return (modify->get_fix_by_id("package_gpu")) ? true : false;
|
return modify->get_fix_by_id("package_gpu") != nullptr;
|
||||||
} else if (strcmp(name,"intel") == 0) {
|
} else if (strcmp(name,"intel") == 0) {
|
||||||
return (modify->get_fix_by_id("package_intel")) ? true : false;
|
return modify->get_fix_by_id("package_intel") != nullptr;
|
||||||
} else if (strcmp(name,"kokkos") == 0) {
|
} else if (strcmp(name,"kokkos") == 0) {
|
||||||
return (lmp->kokkos && lmp->kokkos->kokkos_exists) ? true : false;
|
return lmp->kokkos && lmp->kokkos->kokkos_exists;
|
||||||
} else if (strcmp(name,"omp") == 0) {
|
} else if (strcmp(name,"omp") == 0) {
|
||||||
return (modify->get_fix_by_id("package_omp")) ? true : false;
|
return modify->get_fix_by_id("package_omp") != nullptr;
|
||||||
} else error->all(FLERR,"Unknown name for info package category: {}", name);
|
} else error->all(FLERR,"Unknown name for info package category: {}", name);
|
||||||
|
|
||||||
} else if (strcmp(category,"newton") == 0) {
|
} else if (strcmp(category,"newton") == 0) {
|
||||||
|
|||||||
@ -233,7 +233,7 @@ void ReadDump::setup_reader(int narg, char **arg)
|
|||||||
// create Nreader reader classes per reader
|
// create Nreader reader classes per reader
|
||||||
// match readerstyle to options in style_reader.h
|
// match readerstyle to options in style_reader.h
|
||||||
|
|
||||||
if (false) {
|
if (false) { // NOLINT
|
||||||
return; // dummy line to enable else-if macro expansion
|
return; // dummy line to enable else-if macro expansion
|
||||||
|
|
||||||
#define READER_CLASS
|
#define READER_CLASS
|
||||||
|
|||||||
@ -56,7 +56,7 @@ void WriteDump::command(int narg, char **arg)
|
|||||||
for (int i = 2; i < modindex; ++i)
|
for (int i = 2; i < modindex; ++i)
|
||||||
dumpargs[i+2] = arg[i];
|
dumpargs[i+2] = arg[i];
|
||||||
|
|
||||||
if (false) {
|
if (false) { // NOLINT
|
||||||
return; // dummy branch to enable else-if macro expansion
|
return; // dummy branch to enable else-if macro expansion
|
||||||
|
|
||||||
#define DUMP_CLASS
|
#define DUMP_CLASS
|
||||||
|
|||||||
Reference in New Issue
Block a user