Adding redundant call to sync Intel package arrays with native arrays for methods such as MC that do not need pre_reverse.
This commit is contained in:
@ -275,10 +275,8 @@ int FixIntel::setmask()
|
|||||||
int mask = 0;
|
int mask = 0;
|
||||||
mask |= PRE_REVERSE;
|
mask |= PRE_REVERSE;
|
||||||
mask |= MIN_PRE_REVERSE;
|
mask |= MIN_PRE_REVERSE;
|
||||||
#ifdef _LMP_INTEL_OFFLOAD
|
|
||||||
mask |= POST_FORCE;
|
mask |= POST_FORCE;
|
||||||
mask |= MIN_POST_FORCE;
|
mask |= MIN_POST_FORCE;
|
||||||
#endif
|
|
||||||
mask |= POST_RUN;
|
mask |= POST_RUN;
|
||||||
return mask;
|
return mask;
|
||||||
}
|
}
|
||||||
@ -597,6 +595,19 @@ void FixIntel::pre_reverse(int /*eflag*/, int /*vflag*/)
|
|||||||
|
|
||||||
/* ---------------------------------------------------------------------- */
|
/* ---------------------------------------------------------------------- */
|
||||||
|
|
||||||
|
void FixIntel::post_force(int vflag)
|
||||||
|
{
|
||||||
|
// Redundant call to sync Intel data structs with native for methods that
|
||||||
|
// call force compute but do not call prereverse
|
||||||
|
_sync_main_arrays(1);
|
||||||
|
|
||||||
|
#ifdef LMP_INTEL_OFFLOAD
|
||||||
|
if (_sync_mode == 2) sync_coprocessor();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
/* ---------------------------------------------------------------------- */
|
||||||
|
|
||||||
template <class acc_t>
|
template <class acc_t>
|
||||||
void FixIntel::reduce_results(acc_t * _noalias const f_scalar)
|
void FixIntel::reduce_results(acc_t * _noalias const f_scalar)
|
||||||
{
|
{
|
||||||
@ -883,13 +894,6 @@ double FixIntel::memory_usage()
|
|||||||
|
|
||||||
/* ---------------------------------------------------------------------- */
|
/* ---------------------------------------------------------------------- */
|
||||||
|
|
||||||
void FixIntel::post_force(int vflag)
|
|
||||||
{
|
|
||||||
if (_sync_mode == 2) sync_coprocessor();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* ---------------------------------------------------------------------- */
|
|
||||||
|
|
||||||
template <class ft, class acc_t>
|
template <class ft, class acc_t>
|
||||||
void FixIntel::add_off_results(const ft * _noalias const f_in,
|
void FixIntel::add_off_results(const ft * _noalias const f_in,
|
||||||
const acc_t * _noalias const ev_global) {
|
const acc_t * _noalias const ev_global) {
|
||||||
|
|||||||
@ -55,6 +55,7 @@ class FixIntel : public Fix {
|
|||||||
void pre_reverse(int eflag = 0, int vflag = 0) override;
|
void pre_reverse(int eflag = 0, int vflag = 0) override;
|
||||||
inline void min_pre_reverse(int eflag = 0, int vflag = 0) override { pre_reverse(eflag, vflag); }
|
inline void min_pre_reverse(int eflag = 0, int vflag = 0) override { pre_reverse(eflag, vflag); }
|
||||||
|
|
||||||
|
void post_force(int vflag) override;
|
||||||
void post_run() override { _print_pkg_info = 1; }
|
void post_run() override { _print_pkg_info = 1; }
|
||||||
|
|
||||||
// Get all forces, calculation results from coprocesser
|
// Get all forces, calculation results from coprocesser
|
||||||
@ -132,7 +133,6 @@ class FixIntel : public Fix {
|
|||||||
inline void get_buffern(const int offload, int &nlocal, int &nall, int &minlocal);
|
inline void get_buffern(const int offload, int &nlocal, int &nall, int &minlocal);
|
||||||
|
|
||||||
#ifdef _LMP_INTEL_OFFLOAD
|
#ifdef _LMP_INTEL_OFFLOAD
|
||||||
void post_force(int vflag);
|
|
||||||
inline int coprocessor_number() { return _cop; }
|
inline int coprocessor_number() { return _cop; }
|
||||||
inline int full_host_list() { return _full_host_list; }
|
inline int full_host_list() { return _full_host_list; }
|
||||||
void set_offload_affinity();
|
void set_offload_affinity();
|
||||||
|
|||||||
@ -2352,12 +2352,6 @@ double FixGCMC::energy_full()
|
|||||||
|
|
||||||
if (force->kspace) force->kspace->compute(eflag,vflag);
|
if (force->kspace) force->kspace->compute(eflag,vflag);
|
||||||
|
|
||||||
// unlike Verlet, not performing a reverse_comm() or forces here
|
|
||||||
// b/c GCMC does not care about forces
|
|
||||||
// don't think it will mess up energy due to any post_force() fixes
|
|
||||||
// but Modify::pre_reverse() is needed for INTEL
|
|
||||||
|
|
||||||
if (modify->n_pre_reverse) modify->pre_reverse(eflag,vflag);
|
|
||||||
if (modify->n_post_force_any) modify->post_force(vflag);
|
if (modify->n_post_force_any) modify->post_force(vflag);
|
||||||
|
|
||||||
// NOTE: all fixes with energy_global_flag set and which
|
// NOTE: all fixes with energy_global_flag set and which
|
||||||
|
|||||||
@ -1050,13 +1050,7 @@ double FixWidom::energy_full()
|
|||||||
|
|
||||||
if (force->kspace) force->kspace->compute(eflag,vflag);
|
if (force->kspace) force->kspace->compute(eflag,vflag);
|
||||||
|
|
||||||
// unlike Verlet, not performing a reverse_comm() or forces here
|
if (modify->n_post_force_any) modify->post_force(vflag);
|
||||||
// b/c Widom does not care about forces
|
|
||||||
// don't think it will mess up energy due to any post_force() fixes
|
|
||||||
// but Modify::pre_reverse() is needed for INTEL
|
|
||||||
|
|
||||||
if (modify->n_pre_reverse) modify->pre_reverse(eflag,vflag);
|
|
||||||
if (modify->n_pre_force) modify->pre_force(vflag);
|
|
||||||
|
|
||||||
// NOTE: all fixes with energy_global_flag set and which
|
// NOTE: all fixes with energy_global_flag set and which
|
||||||
// operate at pre_force() or post_force()
|
// operate at pre_force() or post_force()
|
||||||
|
|||||||
Reference in New Issue
Block a user