/* Determine which shift vector we need */
ivec vis = { 0, 0, 0 };
vis[dd->dim[d]] = 1;
- const int is = IVEC2IS(vis);
+ const int is = gmx::ivecToShiftIndex(vis);
/* Loop over the pulses */
const gmx_domdec_comm_dim_t& cd = comm.cd[d];
{
clear_ivec(vis);
vis[dim] = (dir == 0 ? 1 : -1);
- int is = IVEC2IS(vis);
+ int is = gmx::ivecToShiftIndex(vis);
if (!bScrew)
{
/* Sum and add to shift forces */
dr[clust_id].aver_3[ndr] += drt;
dr[clust_id].aver_6[ndr] += disresdata->Rt_6[label];
- snew(fshift, SHIFTS);
+ snew(fshift, gmx::c_numShiftVectors);
ta_disres(n, &forceatoms[i], forceparams.data(), x, f, fshift, pbc, lam, &dvdl, {}, nullptr, disresdata, nullptr, nullptr);
sfree(fshift);
viol = disresdata->sumviol;
else
{
rvec_sub(xi, xj, dx);
- return CENTRAL;
+ return c_centralShiftIndex;
}
}
if (computeVirial(flavor))
{
fshift[shiftIndex][m] += fij;
- fshift[CENTRAL][m] -= fij;
+ fshift[c_centralShiftIndex][m] -= fij;
}
}
}
if (computeVirial(flavor))
{
fshift[ki][m] += fij;
- fshift[CENTRAL][m] -= fij;
+ fshift[c_centralShiftIndex][m] -= fij;
}
}
}
if (computeVirial(flavor))
{
fshift[t][m] += fff;
- fshift[CENTRAL][m] -= fff;
+ fshift[c_centralShiftIndex][m] -= fff;
}
} /* 15 */
if (computeVirial(flavor))
{
rvec_inc(fshift[t1], f_i);
- rvec_inc(fshift[CENTRAL], f_j);
+ rvec_inc(fshift[c_centralShiftIndex], f_j);
rvec_inc(fshift[t2], f_k);
}
} /* 161 TOTAL */
if (computeVirial(flavor))
{
rvec_inc(fshift[t1], f_i);
- rvec_inc(fshift[CENTRAL], f_j);
+ rvec_inc(fshift[c_centralShiftIndex], f_j);
rvec_inc(fshift[t2], f_k);
}
} /* 57 TOTAL */
if (computeVirial(flavor))
{
rvec_inc(fshift[t1], f_i);
- rvec_inc(fshift[CENTRAL], f_j);
+ rvec_inc(fshift[c_centralShiftIndex], f_j);
rvec_inc(fshift[t2], f_k);
}
} /* 161 TOTAL */
if (computeVirial(flavor))
{
fshift[ki][m] += fik;
- fshift[CENTRAL][m] -= fik;
+ fshift[c_centralShiftIndex][m] -= fik;
}
}
}
if (computeVirial(flavor))
{
rvec_inc(fshift[t1], f_i);
- rvec_inc(fshift[CENTRAL], f_j);
+ rvec_inc(fshift[c_centralShiftIndex], f_j);
rvec_inc(fshift[t2], f_k);
}
} /* 153 TOTAL */
}
else
{
- t3 = CENTRAL;
+ t3 = c_centralShiftIndex;
}
rvec_inc(fshift[t1], f_i);
- rvec_dec(fshift[CENTRAL], f_j);
+ rvec_dec(fshift[c_centralShiftIndex], f_j);
rvec_dec(fshift[t2], f_k);
rvec_inc(fshift[t3], f_l);
}
if (computeVirial(flavor))
{
rvec_inc(fshift[t1], f_i);
- rvec_dec(fshift[CENTRAL], f_i);
+ rvec_dec(fshift[c_centralShiftIndex], f_i);
if (!bZAxis)
{
rvec_inc(fshift[t2], f_k);
- rvec_dec(fshift[CENTRAL], f_k);
+ rvec_dec(fshift[c_centralShiftIndex], f_k);
}
}
}
if (computeVirial(flavor))
{
rvec_inc(fshift[t1], f_i);
- rvec_inc(fshift[CENTRAL], f_j);
+ rvec_inc(fshift[c_centralShiftIndex], f_j);
rvec_inc(fshift[t2], f_k);
}
}
}
else
{
- t3 = CENTRAL;
+ t3 = c_centralShiftIndex;
}
rvec_inc(fshift[t1], f_i);
- rvec_inc(fshift[CENTRAL], f_j);
+ rvec_inc(fshift[c_centralShiftIndex], f_j);
rvec_inc(fshift[t2], f_k);
rvec_inc(fshift[t3], f_l);
}
}
else
{
- t3 = CENTRAL;
+ t3 = c_centralShiftIndex;
}
rvec_inc(fshift[t1], f_i);
- rvec_inc(fshift[CENTRAL], f_j);
+ rvec_inc(fshift[c_centralShiftIndex], f_j);
rvec_inc(fshift[t2], f_k);
rvec_inc(fshift[t3], f_l);
}
}
else
{
- t31 = CENTRAL;
- t32 = CENTRAL;
+ t31 = c_centralShiftIndex;
+ t32 = c_centralShiftIndex;
}
rvec_inc(fshift[t11], f1_i);
- rvec_inc(fshift[CENTRAL], f1_j);
+ rvec_inc(fshift[c_centralShiftIndex], f1_j);
rvec_inc(fshift[t21], f1_k);
rvec_inc(fshift[t31], f1_l);
rvec_inc(fshift[t12], f2_i);
- rvec_inc(fshift[CENTRAL], f2_j);
+ rvec_inc(fshift[c_centralShiftIndex], f2_j);
rvec_inc(fshift[t22], f2_k);
rvec_inc(fshift[t32], f2_l);
}
if (computeVirial(flavor))
{
rvec_inc(fshift[t1], f_i);
- rvec_inc(fshift[CENTRAL], f_j);
+ rvec_inc(fshift[c_centralShiftIndex], f_j);
rvec_inc(fshift[t2], f_k); /* 9 */
}
/* 163 TOTAL */
if (computeVirial(flavor))
{
rvec_inc(fshift[t1], f_i);
- rvec_inc(fshift[CENTRAL], f_j);
+ rvec_inc(fshift[c_centralShiftIndex], f_j);
rvec_inc(fshift[t2], f_k); /* 9 */
}
/* 163 TOTAL */
if (computeVirial(flavor))
{
rvec_inc(fshift[t1], f_i);
- rvec_inc(fshift[CENTRAL], f_j);
+ rvec_inc(fshift[c_centralShiftIndex], f_j);
rvec_inc(fshift[t2], f_k); /* 9 */
}
/* 163 TOTAL */
if (computeVirial(flavor))
{
rvec_inc(fshift[t1], f_i);
- rvec_inc(fshift[CENTRAL], f_j);
+ rvec_inc(fshift[c_centralShiftIndex], f_j);
rvec_inc(fshift[t2], f_k);
}
} /* 169 TOTAL */
int pair = (faOffset + fa) / 3;
int ai = forceatoms[fa + 1];
int aj = forceatoms[fa + 2];
- int ki = CENTRAL;
+ int ki = gmx::c_centralShiftIndex;
if (pbc)
{
ki = pbc_dx_aiuc(pbc, x[ai], x[aj], dx);
if (fshift)
{
fshift[ki][m] += fij;
- fshift[CENTRAL][m] -= fij;
+ fshift[gmx::c_centralShiftIndex][m] -= fij;
}
}
}
GMX_RELEASE_ASSERT(deviceStream.isValid(),
"Can't run GPU version of bonded forces in stream that is not valid.");
- static_assert(c_threadsPerBlock >= SHIFTS,
- "Threads per block in GPU bonded must be >= SHIFTS for the virial kernel "
- "(calcVir=true)");
+ static_assert(
+ c_threadsPerBlock >= c_numShiftVectors,
+ "Threads per block in GPU bonded must be >= c_numShiftVectors for the virial kernel "
+ "(calcVir=true)");
wcycle_ = wcycle;
kernelLaunchConfig_.gridSize[1] = 1;
kernelLaunchConfig_.gridSize[2] = 1;
kernelLaunchConfig_.sharedMemorySize =
- SHIFTS * sizeof(float3) + (c_threadsPerBlock / warp_size) * 3 * sizeof(float);
+ c_numShiftVectors * sizeof(float3) + (c_threadsPerBlock / warp_size) * 3 * sizeof(float);
}
GpuBonded::Impl::~Impl()
float3 fij = fbond * dx;
atomicAdd(&gm_f[ai], fij);
atomicAdd(&gm_f[aj], -fij);
- if (calcVir && ki != CENTRAL)
+ if (calcVir && ki != gmx::c_centralShiftIndex)
{
atomicAdd(&sm_fShiftLoc[ki], fij);
- atomicAdd(&sm_fShiftLoc[CENTRAL], -fij);
+ atomicAdd(&sm_fShiftLoc[gmx::c_centralShiftIndex], -fij);
}
}
}
if (calcVir)
{
atomicAdd(&sm_fShiftLoc[t1], f_i);
- atomicAdd(&sm_fShiftLoc[CENTRAL], f_j);
+ atomicAdd(&sm_fShiftLoc[gmx::c_centralShiftIndex], f_j);
atomicAdd(&sm_fShiftLoc[t2], f_k);
}
}
if (calcVir)
{
atomicAdd(&sm_fShiftLoc[t1], f_i);
- atomicAdd(&sm_fShiftLoc[CENTRAL], f_j);
+ atomicAdd(&sm_fShiftLoc[gmx::c_centralShiftIndex], f_j);
atomicAdd(&sm_fShiftLoc[t2], f_k);
}
}
atomicAdd(&gm_f[ai], fik);
atomicAdd(&gm_f[ak], -fik);
- if (calcVir && ki != CENTRAL)
+ if (calcVir && ki != gmx::c_centralShiftIndex)
{
atomicAdd(&sm_fShiftLoc[ki], fik);
- atomicAdd(&sm_fShiftLoc[CENTRAL], -fik);
+ atomicAdd(&sm_fShiftLoc[gmx::c_centralShiftIndex], -fik);
}
}
}
int t3 = pbcDxAiuc<calcVir>(pbcAiuc, gm_xq[l], gm_xq[j], dx_jl);
atomicAdd(&sm_fShiftLoc[t1], f_i);
- atomicAdd(&sm_fShiftLoc[CENTRAL], -f_j);
+ atomicAdd(&sm_fShiftLoc[gmx::c_centralShiftIndex], -f_j);
atomicAdd(&sm_fShiftLoc[t2], -f_k);
atomicAdd(&sm_fShiftLoc[t3], f_l);
}
/* Add the forces */
atomicAdd(&gm_f[ai], f);
atomicAdd(&gm_f[aj], -f);
- if (calcVir && fshift_index != CENTRAL)
+ if (calcVir && fshift_index != gmx::c_centralShiftIndex)
{
atomicAdd(&sm_fShiftLoc[fshift_index], f);
- atomicAdd(&sm_fShiftLoc[CENTRAL], -f);
+ atomicAdd(&sm_fShiftLoc[gmx::c_centralShiftIndex], -f);
}
if (calcEner)
extern __shared__ char sm_dynamicShmem[];
char* sm_nextSlotPtr = sm_dynamicShmem;
float3* sm_fShiftLoc = (float3*)sm_nextSlotPtr;
- sm_nextSlotPtr += SHIFTS * sizeof(float3);
+ sm_nextSlotPtr += c_numShiftVectors * sizeof(float3);
if (calcVir)
{
- if (threadIdx.x < SHIFTS)
+ if (threadIdx.x < c_numShiftVectors)
{
sm_fShiftLoc[threadIdx.x] = make_float3(0.0f, 0.0f, 0.0f);
}
atomicAdd(vtotElec, sm_vTotElec[warpId]);
}
}
- /* Accumulate shift vectors from shared memory to global memory on the first SHIFTS threads of the block. */
+ /* Accumulate shift vectors from shared memory to global memory on the first c_numShiftVectors threads of the block. */
if (calcVir)
{
__syncthreads();
- if (threadIdx.x < SHIFTS)
+ if (threadIdx.x < c_numShiftVectors)
{
atomicAdd(kernelParams.d_fShift[threadIdx.x], sm_fShiftLoc[threadIdx.x]);
}
if (idef_->ilsort == ilsortFE_SORTED)
{
forceBufferLambda_.resize(numAtomsForce * sizeof(rvec4) / sizeof(real));
- shiftForceBufferLambda_.resize(SHIFTS);
+ shiftForceBufferLambda_.resize(gmx::c_numShiftVectors);
}
}
}
}
- for (int i = 0; i < SHIFTS; i++)
+ for (int i = 0; i < gmx::c_numShiftVectors; i++)
{
clear_rvec(f_t->fshift[i]);
}
if (stepWork.computeVirial)
{
- for (int i = 0; i < SHIFTS; i++)
+ for (int i = 0; i < gmx::c_numShiftVectors; i++)
{
for (int t = 1; t < bt->nthreads; t++)
{
//! Index to touched blocks
std::vector<int> block_index;
- //! Shift force array, size SHIFTS
+ //! Shift force array, size c_numShiftVectors
std::vector<gmx::RVec> fshift;
//! Energy array
real ener[F_NRE];
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
* Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
- * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
}
}
-f_thread_t::f_thread_t(int numEnergyGroups) : fshift(SHIFTS), grpp(numEnergyGroups) {}
+f_thread_t::f_thread_t(int numEnergyGroups) : fshift(gmx::c_numShiftVectors), grpp(numEnergyGroups)
+{
+}
bonded_threading_t::bonded_threading_t(const int numThreads, const int numEnergyGroups, FILE* fplog) :
nthreads(numThreads),
t_oriresdata* oriresdata,
int gmx_unused* global_atom_index)
{
- int ex, power, ki = CENTRAL;
+ int ex, power, ki = gmx::c_centralShiftIndex;
real r2, invr, invr2, fc, smooth_fc, dev, devins, pfac;
rvec r, Sr, fij;
real vtot;
if (fshift)
{
fshift[ki][i] += fij[i];
- fshift[CENTRAL][i] -= fij[i];
+ fshift[gmx::c_centralShiftIndex][i] -= fij[i];
}
}
}
}
else
{
- fshift_index = CENTRAL;
+ fshift_index = c_centralShiftIndex;
rvec_sub(x[ai], x[aj], dx);
}
r2 = norm2(dx);
if (computeVirial(flavor))
{
- if (fshift_index != CENTRAL)
+ if (fshift_index != c_centralShiftIndex)
{
rvec_inc(fshift[fshift_index], dx);
- rvec_dec(fshift[CENTRAL], dx);
+ rvec_dec(fshift[c_centralShiftIndex], dx);
}
}
}
//! Derivative with respect to lambda
real dvdlambda = 0;
//! Shift vectors
- rvec fshift[N_IVEC] = { { 0 } };
+ rvec fshift[c_numShiftVectors] = { { 0 } };
//! Forces
alignas(GMX_REAL_MAX_SIMD_WIDTH * sizeof(real)) rvec4 f[c_numAtoms] = { { 0 } };
};
if (computeVirial(flavor))
{
shiftForcesChecker.setDefaultTolerance(shiftForcesTolerance_);
- shiftForcesChecker.checkVector(output.fshift[CENTRAL], "Central");
+ shiftForcesChecker.checkVector(output.fshift[c_centralShiftIndex], "Central");
}
else
{
//! Derivative with respect to lambda
std::vector<real> dvdLambda;
//! Shift vectors
- rvec fShift[N_IVEC] = { { 0 } };
+ rvec fShift[gmx::detail::c_numIvecs] = { { 0 } };
//! Forces
alignas(GMX_REAL_MAX_SIMD_WIDTH * sizeof(real)) rvec4 f[c_numAtoms] = { { 0 } };
};
if (computeVirial(flavor))
{
- shiftForcesChecker.checkVector(output.fShift[CENTRAL], "Central");
+ shiftForcesChecker.checkVector(output.fShift[gmx::c_centralShiftIndex], "Central");
}
else
{
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
* Copyright (c) 2013,2014,2015,2016,2018 by the GROMACS development team.
- * Copyright (c) 2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
if (bScrewPBC)
{
- int isx = IS2X(i);
+ int isx = gmx::shiftIndexToXDim(i);
/* We should correct all odd x-shifts, but the range of isx is -2 to 2 */
if (isx == 1 || isx == -1)
{
ForceHelperBuffers::ForceHelperBuffers(bool haveDirectVirialContributions) :
haveDirectVirialContributions_(haveDirectVirialContributions)
{
- shiftForces_.resize(SHIFTS);
+ shiftForces_.resize(gmx::c_numShiftVectors);
}
void ForceHelperBuffers::resize(int numAtoms)
if (forcerec->shift_vec.empty())
{
- forcerec->shift_vec.resize(SHIFTS);
+ forcerec->shift_vec.resize(gmx::c_numShiftVectors);
}
if (forcerec->nbfp.empty())
/* The short-range virial from surrounding boxes */
const rvec* fshift = as_rvec_array(forceWithShiftForces.shiftForces().data());
const rvec* shiftVecPointer = as_rvec_array(fr->shift_vec.data());
- calc_vir(SHIFTS, shiftVecPointer, fshift, vir_part, pbcType == PbcType::Screw, box);
- inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
+ calc_vir(gmx::c_numShiftVectors, shiftVecPointer, fshift, vir_part, pbcType == PbcType::Screw, box);
+ inc_nrnb(nrnb, eNR_VIRIAL, gmx::c_numShiftVectors);
/* Calculate partial virial, for local atoms only, based on short range.
* Total virial is computed in global_stat, called from do_md
//! The interaction lists, only vsite entries are used
std::array<InteractionList, F_NRE> ilist;
//! Local fshift accumulation buffer
- std::array<RVec, SHIFTS> fshift;
+ std::array<RVec, c_numShiftVectors> fshift;
//! Local virial dx*df accumulation buffer
matrix dxdf;
//! Tells if interdependent task idTask should be used (in addition to the rest of this task), this bool has the same value on all threads
else
{
rvec_sub(xi, xj, dx);
- return CENTRAL;
+ return c_centralShiftIndex;
}
}
/* Keep the vsite in the same periodic image as before */
rvec dx;
int ishift = pbc_dx_aiuc(pbc_null, x[avsite], xv, dx);
- if (ishift != CENTRAL)
+ if (ishift != c_centralShiftIndex)
{
rvec_add(xv, dx, x[avsite]);
}
}
else
{
- siv = CENTRAL;
- sij = CENTRAL;
+ siv = c_centralShiftIndex;
+ sij = c_centralShiftIndex;
}
- if (siv != CENTRAL || sij != CENTRAL)
+ if (siv != c_centralShiftIndex || sij != c_centralShiftIndex)
{
rvec_inc(fshift[siv], f[av]);
- rvec_dec(fshift[CENTRAL], fi);
+ rvec_dec(fshift[c_centralShiftIndex], fi);
rvec_dec(fshift[sij], fj);
}
}
}
else
{
- svi = CENTRAL;
+ svi = c_centralShiftIndex;
}
- if (svi != CENTRAL || sji != CENTRAL)
+ if (svi != c_centralShiftIndex || sji != c_centralShiftIndex)
{
rvec_dec(fshift[svi], fv);
- fshift[CENTRAL][XX] += fv[XX] - fj[XX];
- fshift[CENTRAL][YY] += fv[YY] - fj[YY];
- fshift[CENTRAL][ZZ] += fv[ZZ] - fj[ZZ];
+ fshift[c_centralShiftIndex][XX] += fv[XX] - fj[XX];
+ fshift[c_centralShiftIndex][YY] += fv[YY] - fj[YY];
+ fshift[c_centralShiftIndex][ZZ] += fv[ZZ] - fj[ZZ];
fshift[sji][XX] += fj[XX];
fshift[sji][YY] += fj[YY];
fshift[sji][ZZ] += fj[ZZ];
}
else
{
- siv = CENTRAL;
- sij = CENTRAL;
- sik = CENTRAL;
+ siv = c_centralShiftIndex;
+ sij = c_centralShiftIndex;
+ sik = c_centralShiftIndex;
}
- if (siv != CENTRAL || sij != CENTRAL || sik != CENTRAL)
+ if (siv != c_centralShiftIndex || sij != c_centralShiftIndex || sik != c_centralShiftIndex)
{
rvec_inc(fshift[siv], f[av]);
- rvec_dec(fshift[CENTRAL], fi);
+ rvec_dec(fshift[c_centralShiftIndex], fi);
rvec_dec(fshift[sij], fj);
rvec_dec(fshift[sik], fk);
}
}
else
{
- svi = CENTRAL;
+ svi = c_centralShiftIndex;
}
- if (svi != CENTRAL || sji != CENTRAL || skj != CENTRAL)
+ if (svi != c_centralShiftIndex || sji != c_centralShiftIndex || skj != c_centralShiftIndex)
{
rvec_dec(fshift[svi], fv);
- fshift[CENTRAL][XX] += fv[XX] - (1 + a) * temp[XX];
- fshift[CENTRAL][YY] += fv[YY] - (1 + a) * temp[YY];
- fshift[CENTRAL][ZZ] += fv[ZZ] - (1 + a) * temp[ZZ];
+ fshift[c_centralShiftIndex][XX] += fv[XX] - (1 + a) * temp[XX];
+ fshift[c_centralShiftIndex][YY] += fv[YY] - (1 + a) * temp[YY];
+ fshift[c_centralShiftIndex][ZZ] += fv[ZZ] - (1 + a) * temp[ZZ];
fshift[sji][XX] += temp[XX];
fshift[sji][YY] += temp[YY];
fshift[sji][ZZ] += temp[ZZ];
}
else
{
- svi = CENTRAL;
+ svi = c_centralShiftIndex;
}
- if (svi != CENTRAL || sji != CENTRAL || skj != CENTRAL)
+ if (svi != c_centralShiftIndex || sji != c_centralShiftIndex || skj != c_centralShiftIndex)
{
rvec_dec(fshift[svi], fv);
- fshift[CENTRAL][XX] += fv[XX] - f1[XX] - (1 - c1) * f2[XX] + f3[XX];
- fshift[CENTRAL][YY] += fv[YY] - f1[YY] - (1 - c1) * f2[YY] + f3[YY];
- fshift[CENTRAL][ZZ] += fv[ZZ] - f1[ZZ] - (1 - c1) * f2[ZZ] + f3[ZZ];
+ fshift[c_centralShiftIndex][XX] += fv[XX] - f1[XX] - (1 - c1) * f2[XX] + f3[XX];
+ fshift[c_centralShiftIndex][YY] += fv[YY] - f1[YY] - (1 - c1) * f2[YY] + f3[YY];
+ fshift[c_centralShiftIndex][ZZ] += fv[ZZ] - f1[ZZ] - (1 - c1) * f2[ZZ] + f3[ZZ];
fshift[sji][XX] += f1[XX] - c1 * f2[XX] - f3[XX];
fshift[sji][YY] += f1[YY] - c1 * f2[YY] - f3[YY];
fshift[sji][ZZ] += f1[ZZ] - c1 * f2[ZZ] - f3[ZZ];
}
else
{
- svi = CENTRAL;
+ svi = c_centralShiftIndex;
}
- if (svi != CENTRAL || sji != CENTRAL || ski != CENTRAL)
+ if (svi != c_centralShiftIndex || sji != c_centralShiftIndex || ski != c_centralShiftIndex)
{
rvec_dec(fshift[svi], fv);
- fshift[CENTRAL][XX] += fv[XX] - fj[XX] - fk[XX];
- fshift[CENTRAL][YY] += fv[YY] - fj[YY] - fk[YY];
- fshift[CENTRAL][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ];
+ fshift[c_centralShiftIndex][XX] += fv[XX] - fj[XX] - fk[XX];
+ fshift[c_centralShiftIndex][YY] += fv[YY] - fj[YY] - fk[YY];
+ fshift[c_centralShiftIndex][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ];
rvec_inc(fshift[sji], fj);
rvec_inc(fshift[ski], fk);
}
}
else
{
- svi = CENTRAL;
+ svi = c_centralShiftIndex;
}
- if (svi != CENTRAL || sji != CENTRAL || skj != CENTRAL || slj != CENTRAL)
+ if (svi != c_centralShiftIndex || sji != c_centralShiftIndex || skj != c_centralShiftIndex
+ || slj != c_centralShiftIndex)
{
rvec_dec(fshift[svi], fv);
for (m = 0; m < DIM; m++)
{
- fshift[CENTRAL][m] += fv[m] - (1 + a + b) * temp[m];
+ fshift[c_centralShiftIndex][m] += fv[m] - (1 + a + b) * temp[m];
fshift[sji][m] += temp[m];
fshift[skj][m] += a * temp[m];
fshift[slj][m] += b * temp[m];
}
else
{
- svi = CENTRAL;
+ svi = c_centralShiftIndex;
}
- if (svi != CENTRAL || sij != CENTRAL || sik != CENTRAL || sil != CENTRAL)
+ if (svi != c_centralShiftIndex || sij != c_centralShiftIndex || sik != c_centralShiftIndex
+ || sil != c_centralShiftIndex)
{
rvec_dec(fshift[svi], fv);
- fshift[CENTRAL][XX] += fv[XX] - fj[XX] - fk[XX] - fl[XX];
- fshift[CENTRAL][YY] += fv[YY] - fj[YY] - fk[YY] - fl[YY];
- fshift[CENTRAL][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ] - fl[ZZ];
+ fshift[c_centralShiftIndex][XX] += fv[XX] - fj[XX] - fk[XX] - fl[XX];
+ fshift[c_centralShiftIndex][YY] += fv[YY] - fj[YY] - fk[YY] - fl[YY];
+ fshift[c_centralShiftIndex][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ] - fl[ZZ];
rvec_inc(fshift[sij], fj);
rvec_inc(fshift[sik], fk);
rvec_inc(fshift[sil], fl);
}
else
{
- siv = CENTRAL;
+ siv = c_centralShiftIndex;
}
a = ip[ia[i]].vsiten.a;
svmul(a, f[av], fi);
rvec_inc(f[ai], fi);
- if (virialHandling == VirialHandling::Pbc && siv != CENTRAL)
+ if (virialHandling == VirialHandling::Pbc && siv != c_centralShiftIndex)
{
rvec_inc(fshift[siv], fi);
- rvec_dec(fshift[CENTRAL], fi);
+ rvec_dec(fshift[c_centralShiftIndex], fi);
}
/* 6 Flops */
}
{
fshift_t = tData.fshift;
- for (int i = 0; i < SHIFTS; i++)
+ for (int i = 0; i < c_numShiftVectors; i++)
{
clear_rvec(fshift_t[i]);
}
{
for (int th = 1; th < numThreads; th++)
{
- for (int i = 0; i < SHIFTS; i++)
+ for (int i = 0; i < c_numShiftVectors; i++)
{
rvec_inc(fshift[i], threadingInfo_.threadData(th).fshift[i]);
}
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
*
* \param[in] force A force buffer that will be used for storing forces
* \param[in] computeVirial True when algorithms are required to provide their virial contribution (for the current force evaluation)
- * \param[in] shiftForces A shift forces buffer of size SHIFTS, only used with \p computeVirial = true
+ * \param[in] shiftForces A shift forces buffer of size c_numShiftVectors, only used with \p computeVirial = true
*/
ForceWithShiftForces(const gmx::ArrayRefWithPadding<gmx::RVec>& force,
const bool computeVirial,
gmx::ArrayRefWithPadding<gmx::RVec> force_;
//! True when virial computation is requested
bool computeVirial_;
- //! A buffer for storing the shift forces, size SHIFTS
+ //! A buffer for storing the shift forces, size c_numShiftVectors
gmx::ArrayRef<gmx::RVec> shiftForces_;
//! Tells whether we have spread the vsite forces
bool haveSpreadVsiteForces_ = false;
return forceBufferForDirectVirialContributions_;
}
- //! Returns the buffer for shift forces, size SHIFTS
+ //! Returns the buffer for shift forces, size c_numShiftVectors
gmx::ArrayRef<gmx::RVec> shiftForces() { return shiftForces_; }
//! Resizes the direct virial contribution buffer, when present
bool haveDirectVirialContributions_ = false;
//! Force buffer for force computation with direct virial contributions
std::vector<gmx::RVec> forceBufferForDirectVirialContributions_;
- //! Shift force array for computing the virial, size SHIFTS
+ //! Shift force array for computing the virial, size c_numShiftVectors
std::vector<gmx::RVec> shiftForces_;
};
// NOLINTNEXTLINE (clang-analyzer-optin.performance.Padding)
Vvdw({}, { pinningPolicy }),
Vc({}, { pinningPolicy })
{
- fshift.resize(SHIFTS * DIM);
+ fshift.resize(gmx::c_numShiftVectors * DIM);
Vvdw.resize(numEnergyGroups * numEnergyGroups);
Vc.resize(numEnergyGroups * numEnergyGroups);
nbat->FFormat = nbatXYZ;
}
- nbat->shift_vec.resize(SHIFTS);
+ nbat->shift_vec.resize(gmx::c_numShiftVectors);
nbat->xstride = (nbat->XFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
nbat->fstride = (nbat->FFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
{
gmx::ArrayRef<const nbnxn_atomdata_output_t> outputBuffers = nbat.out;
- for (int s = 0; s < SHIFTS; s++)
+ for (int s = 0; s < gmx::c_numShiftVectors; s++)
{
rvec sum;
clear_rvec(sum);
//! f, size natoms*fstride
gmx::HostVector<real> f;
- //! Shift force array, size SHIFTS*DIM
+ //! Shift force array, size c_numShiftVectors*DIM
gmx::HostVector<real> fshift;
//! Temporary Van der Waals group energy storage
gmx::HostVector<real> Vvdw;
forceRec.ntype = numAtomTypes;
forceRec.nbfp = nonbondedParameters;
- forceRec.shift_vec.resize(SHIFTS);
+ forceRec.shift_vec.resize(gmx::c_numShiftVectors);
calc_shifts(box, forceRec.shift_vec);
if (!outputFile.empty())
{
E_el = 0.0f;
# ifdef EXCLUSION_FORCES /* Ewald or RF */
- if (nb_sci.shift == CENTRAL && pl_cj4[cij4_start].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster)
+ if (nb_sci.shift == gmx::c_centralShiftIndex
+ && pl_cj4[cij4_start].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster)
{
/* we have the diagonal: add the charge and LJ self interaction energy term */
for (i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++)
# endif /* CALC_ENERGIES */
# ifdef EXCLUSION_FORCES
- const int nonSelfInteraction = !(nb_sci.shift == CENTRAL & tidxj <= tidxi);
+ const int nonSelfInteraction = !(nb_sci.shift == gmx::c_centralShiftIndex & tidxj <= tidxi);
# endif
/* loop over the j clusters = seen by any of the atoms in the current super-cluster;
}
/* skip central shifts when summing shift forces */
- if (nb_sci.shift == CENTRAL)
+ if (nb_sci.shift == gmx::c_centralShiftIndex)
{
bCalcFshift = false;
}
if (reduceFshift)
{
- for (int i = 0; i < SHIFTS; i++)
+ for (int i = 0; i < gmx::c_numShiftVectors; i++)
{
rvec_inc(fshift[i], nbst.fShift[i]);
}
void clear_fshift(real* fshift)
{
- for (int i = 0; i < SHIFTS * DIM; i++)
+ for (int i = 0; i < gmx::c_numShiftVectors * DIM; i++)
{
fshift[i] = 0;
}
real vctot = 0;
real Vvdwtot = 0;
- if (nbln.shift == CENTRAL && nbl->cj4[cj4_ind0].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster)
+ if (nbln.shift == gmx::c_centralShiftIndex
+ && nbl->cj4[cj4_ind0].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster)
{
/* we have the diagonal:
* add the charge self interaction energy term
{
const int ja = cj * c_clSize + jc;
- if (nbln.shift == CENTRAL && ci == cj && ja <= ia)
+ if (nbln.shift == gmx::c_centralShiftIndex && ci == cj && ja <= ia)
{
continue;
}
const int cjind1 = ciEntry.cj_ind_end;
/* Currently only works super-cells equal to sub-cells */
const int ci = ciEntry.ci;
- const int ci_sh = (ish == CENTRAL ? ci : -1);
+ const int ci_sh = (ish == gmx::c_centralShiftIndex ? ci : -1);
/* We have 5 LJ/C combinations, but use only three inner loops,
* as the other combinations are unlikely and/or not much faster:
const int cjind0 = ciEntry.cj_ind_start;
const int cjind1 = ciEntry.cj_ind_end;
const int ci = ciEntry.ci;
- const int ci_sh = (ish == CENTRAL ? ci : -1);
+ const int ci_sh = (ish == gmx::c_centralShiftIndex ? ci : -1);
shX_S = SimdReal(shiftvec[ish3]);
shY_S = SimdReal(shiftvec[ish3 + 1]);
const int cjind0 = ciEntry.cj_ind_start;
const int cjind1 = ciEntry.cj_ind_end;
const int ci = ciEntry.ci;
- const int ci_sh = (ish == CENTRAL ? ci : -1);
+ const int ci_sh = (ish == gmx::c_centralShiftIndex ? ci : -1);
shX_S = SimdReal(shiftvec[ish3]);
shY_S = SimdReal(shiftvec[ish3 + 1]);
const DeviceStream& localStream)
{
atomdata->numTypes = numTypes;
- allocateDeviceBuffer(&atomdata->shiftVec, SHIFTS, deviceContext);
+ allocateDeviceBuffer(&atomdata->shiftVec, gmx::c_numShiftVectors, deviceContext);
atomdata->shiftVecUploaded = false;
- allocateDeviceBuffer(&atomdata->fShift, SHIFTS, deviceContext);
+ allocateDeviceBuffer(&atomdata->fShift, gmx::c_numShiftVectors, deviceContext);
allocateDeviceBuffer(&atomdata->eLJ, 1, deviceContext);
allocateDeviceBuffer(&atomdata->eElec, 1, deviceContext);
- clearDeviceBufferAsync(&atomdata->fShift, 0, SHIFTS, localStream);
+ clearDeviceBufferAsync(&atomdata->fShift, 0, gmx::c_numShiftVectors, localStream);
clearDeviceBufferAsync(&atomdata->eElec, 0, 1, localStream);
clearDeviceBufferAsync(&atomdata->eLJ, 0, 1, localStream);
/* init nbst */
pmalloc(reinterpret_cast<void**>(&nb->nbst.eLJ), sizeof(*nb->nbst.eLJ));
pmalloc(reinterpret_cast<void**>(&nb->nbst.eElec), sizeof(*nb->nbst.eElec));
- pmalloc(reinterpret_cast<void**>(&nb->nbst.fShift), SHIFTS * sizeof(*nb->nbst.fShift));
+ pmalloc(reinterpret_cast<void**>(&nb->nbst.fShift), gmx::c_numShiftVectors * sizeof(*nb->nbst.fShift));
init_plist(nb->plist[InteractionLocality::Local]);
copyToDeviceBuffer(&adat->shiftVec,
gmx::asGenericFloat3Pointer(nbatom->shift_vec),
0,
- SHIFTS,
+ gmx::c_numShiftVectors,
localStream,
GpuApiCallBehavior::Async,
nullptr);
// Clear shift force array and energies if the outputs were used in the current step
if (computeVirial)
{
- clearDeviceBufferAsync(&adat->fShift, 0, SHIFTS, localStream);
+ clearDeviceBufferAsync(&adat->fShift, 0, gmx::c_numShiftVectors, localStream);
clearDeviceBufferAsync(&adat->eLJ, 0, 1, localStream);
clearDeviceBufferAsync(&adat->eElec, 0, 1, localStream);
}
copyFromDeviceBuffer(nb->nbst.fShift,
&adat->fShift,
0,
- SHIFTS,
+ gmx::c_numShiftVectors,
deviceStream,
GpuApiCallBehavior::Async,
bDoTime ? timers->xf[atomLocality].nb_d2h.fetchNextEvent() : nullptr);
# This file is part of the GROMACS molecular simulation package.
#
# Copyright (c) 2012,2013,2014,2015,2018 by the GROMACS development team.
-# Copyright (c) 2019,2020, by the GROMACS development team, led by
+# Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
# Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
# and including many others, as listed in the AUTHORS file in the
# top-level source directory and at http://www.gromacs.org.
-DNBNXM_MIN_DISTANCE_SQUARED_VALUE_FLOAT=3.82e-07
-Dc_nbnxnGpuNumClusterPerSupercluster=8
-Dc_nbnxnGpuJgroupSize=4
+ -Dc_centralShiftIndex=22
-DIATYPE_SHMEM
-c -I ${CMAKE_SOURCE_DIR}/src -std=cl1.2
-Weverything -Wno-conversion -Wno-missing-variable-declarations -Wno-used-but-marked-unused
#include "gromacs/nbnxm/nbnxm_gpu.h"
#include "gromacs/nbnxm/nbnxm_gpu_data_mgmt.h"
#include "gromacs/nbnxm/pairlistsets.h"
-#include "gromacs/pbcutil/ishift.h"
#include "gromacs/timing/gpu_timing.h"
#include "gromacs/utility/cstringutil.h"
#include "gromacs/utility/fatalerror.h"
* This file is part of the GROMACS molecular simulation package.
*
* Copyright (c) 2014,2015,2016,2017,2018 by the GROMACS development team.
- * Copyright (c) 2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
" -DNBNXM_MIN_DISTANCE_SQUARED_VALUE_FLOAT=%g"
" -Dc_nbnxnGpuNumClusterPerSupercluster=%d"
" -Dc_nbnxnGpuJgroupSize=%d"
+ " -Dc_centralShiftIndex=%d"
"%s",
c_nbnxnGpuClusterSize,
c_nbnxnMinDistanceSquared,
c_nbnxnGpuNumClusterPerSupercluster,
c_nbnxnGpuJgroupSize,
+ gmx::c_centralShiftIndex,
(nb->bPrefetchLjParam) ? " -DIATYPE_SHMEM" : "");
try
{
float E_el = 0.0F;
# if defined EXCLUSION_FORCES /* Ewald or RF */
- if (nb_sci.shift == CENTRAL && pl_cj4[cij4_start].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster)
+ if (nb_sci.shift == c_centralShiftIndex
+ && pl_cj4[cij4_start].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster)
{
/* we have the diagonal: add the charge and LJ self interaction energy term */
for (int i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++)
#endif /* CALC_ENERGIES */
#ifdef EXCLUSION_FORCES
- const int nonSelfInteraction = !(nb_sci.shift == CENTRAL & tidxj <= tidxi);
+ const int nonSelfInteraction = !(nb_sci.shift == c_centralShiftIndex & tidxj <= tidxi);
#endif
/* loop over the j clusters = seen by any of the atoms in the current super-cluster */
}
/* skip central shifts when summing shift forces */
- if (nb_sci.shift == CENTRAL)
+ if (nb_sci.shift == c_centralShiftIndex)
{
bCalcFshift = 0;
}
#include "gromacs/gpu_utils/device_utils.clh"
#include "gromacs/gpu_utils/vectype_ops.clh"
-#include "gromacs/pbcutil/ishift.h"
#include "nbnxm_ocl_consts.h"
"nbl average j cell list length %.1f\n",
0.25 * nbl.ncjInUse / std::max(static_cast<double>(nbl.ci.size()), 1.0));
- int cs[SHIFTS] = { 0 };
- int npexcl = 0;
+ int cs[gmx::c_numShiftVectors] = { 0 };
+ int npexcl = 0;
for (const nbnxn_ci_t& ciEntry : nbl.ci)
{
cs[ciEntry.shift & NBNXN_CI_SHIFT] += ciEntry.cj_ind_end - ciEntry.cj_ind_start;
nbl.cj.size(),
npexcl,
100 * npexcl / std::max(static_cast<double>(nbl.cj.size()), 1.0));
- for (int s = 0; s < SHIFTS; s++)
+ for (int s = 0; s < gmx::c_numShiftVectors; s++)
{
if (cs[s] > 0)
{
for (int tx = -shp[XX]; tx <= shp[XX]; tx++)
{
- const int shift = XYZ2IS(tx, ty, tz);
+ const int shift = xyzToShiftIndex(tx, ty, tz);
- const bool excludeSubDiagonal = (isIntraGridList && shift == CENTRAL);
+ const bool excludeSubDiagonal = (isIntraGridList && shift == gmx::c_centralShiftIndex);
- if (c_pbcShiftBackward && isIntraGridList && shift > CENTRAL)
+ if (c_pbcShiftBackward && isIntraGridList && shift > gmx::c_centralShiftIndex)
{
continue;
}
/* When true, leave the pairs with i > j.
* Skip half of y when i and j have the same x.
*/
- const bool skipHalfY =
- (isIntraGridList && cx == 0
- && (!c_pbcShiftBackward || shift == CENTRAL) && cyf < ci_y);
- const int cyf_x = skipHalfY ? ci_y : cyf;
+ const bool skipHalfY = (isIntraGridList && cx == 0
+ && (!c_pbcShiftBackward || shift == gmx::c_centralShiftIndex)
+ && cyf < ci_y);
+ const int cyf_x = skipHalfY ? ci_y : cyf;
for (int cy = cyf_x; cy <= cyl; cy++)
{
/* We want each atom/cell pair only once,
* only use cj >= ci.
*/
- if (!c_pbcShiftBackward || shift == CENTRAL)
+ if (!c_pbcShiftBackward || shift == gmx::c_centralShiftIndex)
{
firstCell = std::max(firstCell, ci);
}
}
if constexpr (doCalcEnergies && doExclusionForces)
{
- if (nbSci.shift == CENTRAL && a_plistCJ4[cij4Start].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster)
+ if (nbSci.shift == gmx::c_centralShiftIndex
+ && a_plistCJ4[cij4Start].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster)
{
// we have the diagonal: add the charge and LJ self interaction energy term
for (int i = 0; i < c_nbnxnGpuNumClusterPerSupercluster; i++)
energyElec /= epsFac * c_clSize;
energyElec *= -ewaldBeta * c_OneOverSqrtPi; /* last factor 1/sqrt(pi) */
}
- } // (nbSci.shift == CENTRAL && a_plistCJ4[cij4Start].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster)
+ } // (nbSci.shift == gmx::c_centralShiftIndex && a_plistCJ4[cij4Start].cj[0] == sci * c_nbnxnGpuNumClusterPerSupercluster)
} // (doCalcEnergies && doExclusionForces)
// Only needed if (doExclusionForces)
- const bool nonSelfInteraction = !(nbSci.shift == CENTRAL & tidxj <= tidxi);
+ const bool nonSelfInteraction = !(nbSci.shift == gmx::c_centralShiftIndex & tidxj <= tidxi);
// loop over the j clusters = seen by any of the atoms in the current super-cluster
for (int j4 = cij4Start + tidxz; j4 < cij4End; j4 += 1)
} // for (int j4 = cij4Start; j4 < cij4End; j4 += 1)
/* skip central shifts when summing shift forces */
- const bool doCalcShift = (calcShift && !(nbSci.shift == CENTRAL));
+ const bool doCalcShift = (calcShift && !(nbSci.shift == gmx::c_centralShiftIndex));
reduceForceIAndFShift(
sm_reductionBuffer, fCiBuf, doCalcShift, itemIdx, tidxi, tidxj, sci, nbSci.shift, a_f, a_fShift);
*
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team.
- * Copyright (c) 2010,2014,2019, by the GROMACS development team, led by
+ * Copyright (c) 2010,2014,2019,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#ifndef GMX_PBCUTIL_ISHIFT_H
#define GMX_PBCUTIL_ISHIFT_H
-#define D_BOX_Z 1
-#define D_BOX_Y 1
-#define D_BOX_X 2
-#define N_BOX_Z (2 * D_BOX_Z + 1)
-#define N_BOX_Y (2 * D_BOX_Y + 1)
-#define N_BOX_X (2 * D_BOX_X + 1)
-#define N_IVEC (N_BOX_Z * N_BOX_Y * N_BOX_X)
-#define CENTRAL (N_IVEC / 2)
-#define SHIFTS N_IVEC
+namespace gmx
+{
+//! Maximum dimensions of grid expressing shifts across PBC
+//! \{
+constexpr int c_dBoxZ = 1;
+constexpr int c_dBoxY = 1;
+constexpr int c_dBoxX = 2;
+//! \}
+namespace detail
+{
+constexpr int c_nBoxZ = 2 * gmx::c_dBoxZ + 1;
+constexpr int c_nBoxY = 2 * gmx::c_dBoxY + 1;
+constexpr int c_nBoxX = 2 * gmx::c_dBoxX + 1;
+constexpr int c_numIvecs = detail::c_nBoxZ * detail::c_nBoxY * detail::c_nBoxX;
+} // namespace detail
-#define XYZ2IS(x, y, z) (N_BOX_X * (N_BOX_Y * ((z) + D_BOX_Z) + (y) + D_BOX_Y) + (x) + D_BOX_X)
-#define IVEC2IS(iv) (XYZ2IS((iv)[XX], (iv)[YY], (iv)[ZZ]))
-#define IS2X(iv) (((iv) % N_BOX_X) - D_BOX_X)
-#define IS2Y(iv) ((((iv) / N_BOX_X) % N_BOX_Y) - D_BOX_Y)
-#define IS2Z(iv) ((iv) / (N_BOX_X * N_BOX_Y) - D_BOX_Z)
+constexpr int c_centralShiftIndex = detail::c_numIvecs / 2;
+constexpr int c_numShiftVectors = detail::c_numIvecs;
+//! Convert grid coordinates to shift index
+static inline int xyzToShiftIndex(int x, int y, int z)
+{
+ return (detail::c_nBoxX * (detail::c_nBoxY * ((z) + gmx::c_dBoxZ) + (y) + gmx::c_dBoxY) + (x)
+ + gmx::c_dBoxX);
+}
+
+//! Convert grid coordinates to shift index
+static inline int ivecToShiftIndex(ivec iv)
+{
+ return (xyzToShiftIndex((iv)[XX], (iv)[YY], (iv)[ZZ]));
+}
+
+//! Return the shift in the X dimension of grid space corresponding to \c iv
+static inline int shiftIndexToXDim(int iv)
+{
+ return (((iv) % detail::c_nBoxX) - gmx::c_dBoxX);
+}
+} // namespace gmx
#endif
"Internal error in pbc_dx_aiuc, set_pbc_dd or set_pbc has not been called");
}
- is = IVEC2IS(ishift);
+ is = gmx::ivecToShiftIndex(ishift);
if (debug)
{
- range_check_mesg(is, 0, SHIFTS, "PBC shift vector index range check.");
+ range_check_mesg(is, 0, gmx::c_numShiftVectors, "PBC shift vector index range check.");
}
return is;
void calc_shifts(const matrix box, gmx::ArrayRef<gmx::RVec> shift_vec)
{
- for (int n = 0, m = -D_BOX_Z; m <= D_BOX_Z; m++)
+ for (int n = 0, m = -gmx::c_dBoxZ; m <= gmx::c_dBoxZ; m++)
{
- for (int l = -D_BOX_Y; l <= D_BOX_Y; l++)
+ for (int l = -gmx::c_dBoxY; l <= gmx::c_dBoxY; l++)
{
- for (int k = -D_BOX_X; k <= D_BOX_X; k++, n++)
+ for (int k = -gmx::c_dBoxX; k <= gmx::c_dBoxX; k++, n++)
{
for (int d = 0; d < DIM; d++)
{
* \param[in] x2 Coordinates for particle 2
* \param[out] dx Distance vector
* \return the ishift required to shift x1 at closest distance to x2;
- * i.e. if 0<=ishift<SHIFTS then x1 - x2 + shift_vec[ishift] = dx
+ * i.e. if 0<=ishift<c_numShiftVectors then x1 - x2 + shift_vec[ishift] = dx
* (see calc_shifts below on how to obtain shift_vec)
*/
int pbc_dx_aiuc(const t_pbc* pbc, const rvec x1, const rvec x2, rvec dx);
/*
* This file is part of the GROMACS molecular simulation package.
*
- * Copyright (c) 2019,2020, by the GROMACS development team, led by
+ * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
* Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
* and including many others, as listed in the AUTHORS file in the
* top-level source directory and at http://www.gromacs.org.
#define GMX_PBCUTIL_PBC_AIUC_CUDA_CUH
#include "gromacs/gpu_utils/vectype_ops.cuh"
+#include "gromacs/pbcutil/ishift.h"
#include "gromacs/pbcutil/pbc_aiuc.h"
+static inline __device__ int xyzToShiftIndex(int x, int y, int z)
+{
+ return (gmx::detail::c_nBoxX * (gmx::detail::c_nBoxY * ((z) + gmx::c_dBoxZ) + (y) + gmx::c_dBoxY)
+ + (x) + gmx::c_dBoxX);
+}
+
+static inline __device__ int int3ToShiftIndex(int3 iv)
+{
+ return (xyzToShiftIndex(iv.x, iv.y, iv.z));
+}
+
/*! \brief Computes the vector between two points taking PBC into account.
*
* Computes the vector dr between points r2 and r1, taking into account the
if (returnShift)
{
- ivec ishift;
+ int3 ishift;
- ishift[XX] = -__float2int_rn(shx);
- ishift[YY] = -__float2int_rn(shy);
- ishift[ZZ] = -__float2int_rn(shz);
+ ishift.x = -__float2int_rn(shx);
+ ishift.y = -__float2int_rn(shy);
+ ishift.z = -__float2int_rn(shz);
- return IVEC2IS(ishift);
+ return int3ToShiftIndex(ishift);
}
else
{
// shift vector values when the largest box shift in any dimension
// is two.
const matrix box = { { 0.01, 1, -100 }, { 300, -0.03, 3 }, { -6, -600, 0.06 } };
- std::vector<gmx::RVec> shiftVectors(SHIFTS);
+ std::vector<gmx::RVec> shiftVectors(c_numShiftVectors);
calc_shifts(box, shiftVectors);