2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012-2018, The GROMACS development team.
5 * Copyright (c) 2019,2020,2021, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
47 #include "gromacs/math/functions.h"
48 #include "gromacs/math/utilities.h"
49 #include "gromacs/math/vec.h"
50 #include "gromacs/mdlib/gmx_omp_nthreads.h"
51 #include "gromacs/mdtypes/forcerec.h" // only for GET_CGINFO_*
52 #include "gromacs/mdtypes/md_enums.h"
53 #include "gromacs/nbnxm/nbnxm.h"
54 #include "gromacs/pbcutil/ishift.h"
55 #include "gromacs/simd/simd.h"
56 #include "gromacs/utility/exceptions.h"
57 #include "gromacs/utility/fatalerror.h"
58 #include "gromacs/utility/gmxomp.h"
59 #include "gromacs/utility/logger.h"
60 #include "gromacs/utility/strconvert.h"
61 #include "gromacs/utility/stringutil.h"
65 #include "nbnxm_geometry.h"
66 #include "nbnxm_gpu.h"
69 using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
72 const char* enumValueToString(LJCombinationRule enumValue)
74 static constexpr gmx::EnumerationArray<LJCombinationRule, const char*> s_ljCombinationRuleNames = {
75 "Geometric", "Lorentz-Berthelot", "None"
77 return s_ljCombinationRuleNames[enumValue];
80 void nbnxn_atomdata_t::resizeCoordinateBuffer(int numAtoms)
84 x_.resize(numAtoms * xstride);
87 void nbnxn_atomdata_t::resizeForceBuffers()
89 /* Force buffers need padding up to a multiple of the buffer flag size */
90 const int paddedSize =
91 (numAtoms() + NBNXN_BUFFERFLAG_SIZE - 1) / NBNXN_BUFFERFLAG_SIZE * NBNXN_BUFFERFLAG_SIZE;
93 /* Should we let each thread allocate it's own data instead? */
94 for (nbnxn_atomdata_output_t& outBuffer : out)
96 outBuffer.f.resize(paddedSize * fstride);
100 /* Initializes an nbnxn_atomdata_output_t data structure */
101 nbnxn_atomdata_output_t::nbnxn_atomdata_output_t(Nbnxm::KernelType kernelType,
103 int simdEnergyBufferStride,
104 gmx::PinningPolicy pinningPolicy) :
105 f({}, { pinningPolicy }),
106 fshift({}, { pinningPolicy }),
107 Vvdw({}, { pinningPolicy }),
108 Vc({}, { pinningPolicy })
110 fshift.resize(gmx::c_numShiftVectors * DIM);
111 Vvdw.resize(numEnergyGroups * numEnergyGroups);
112 Vc.resize(numEnergyGroups * numEnergyGroups);
114 if (Nbnxm::kernelTypeIsSimd(kernelType))
116 int cj_size = Nbnxm::JClusterSizePerKernelType[kernelType];
118 numEnergyGroups * numEnergyGroups * simdEnergyBufferStride * (cj_size / 2) * cj_size;
119 VSvdw.resize(numElements);
120 VSc.resize(numElements);
124 static void copy_int_to_nbat_int(const int* a, int na, int na_round, const int* in, int fill, int* innb)
127 for (int i = 0; i < na; i++)
129 innb[j++] = in[a[i]];
131 /* Complete the partially filled last cell with fill */
132 for (int i = na; i < na_round; i++)
138 void copy_rvec_to_nbat_real(const int* a, int na, int na_round, const rvec* x, int nbatFormat, real* xnb, int a0)
140 /* We complete partially filled cells, can only be the last one in each
141 * column, with coordinates farAway. The actual coordinate value does
142 * not influence the results, since these filler particles do not interact.
143 * Clusters with normal atoms + fillers have a bounding box based only
144 * on the coordinates of the atoms. Clusters with only fillers have as
145 * the bounding box the coordinates of the first filler. Such clusters
146 * are not considered as i-entries, but they are considered as j-entries.
147 * So for performance it is better to have their bounding boxes far away,
148 * such that filler only clusters don't end up in the pair list.
150 const real farAway = -1000000;
152 if (nbatFormat == nbatXYZ)
155 int j = a0 * STRIDE_XYZ;
158 xnb[j++] = x[a[i]][XX];
159 xnb[j++] = x[a[i]][YY];
160 xnb[j++] = x[a[i]][ZZ];
162 /* Complete the partially filled last cell with farAway elements */
163 for (; i < na_round; i++)
170 else if (nbatFormat == nbatXYZQ)
173 int j = a0 * STRIDE_XYZQ;
176 xnb[j++] = x[a[i]][XX];
177 xnb[j++] = x[a[i]][YY];
178 xnb[j++] = x[a[i]][ZZ];
181 /* Complete the partially filled last cell with zeros */
182 for (; i < na_round; i++)
190 else if (nbatFormat == nbatX4)
193 int j = atom_to_x_index<c_packX4>(a0);
194 int c = a0 & (c_packX4 - 1);
197 xnb[j + XX * c_packX4] = x[a[i]][XX];
198 xnb[j + YY * c_packX4] = x[a[i]][YY];
199 xnb[j + ZZ * c_packX4] = x[a[i]][ZZ];
204 j += (DIM - 1) * c_packX4;
208 /* Complete the partially filled last cell with zeros */
209 for (; i < na_round; i++)
211 xnb[j + XX * c_packX4] = farAway;
212 xnb[j + YY * c_packX4] = farAway;
213 xnb[j + ZZ * c_packX4] = farAway;
218 j += (DIM - 1) * c_packX4;
223 else if (nbatFormat == nbatX8)
226 int j = atom_to_x_index<c_packX8>(a0);
227 int c = a0 & (c_packX8 - 1);
230 xnb[j + XX * c_packX8] = x[a[i]][XX];
231 xnb[j + YY * c_packX8] = x[a[i]][YY];
232 xnb[j + ZZ * c_packX8] = x[a[i]][ZZ];
237 j += (DIM - 1) * c_packX8;
241 /* Complete the partially filled last cell with zeros */
242 for (; i < na_round; i++)
244 xnb[j + XX * c_packX8] = farAway;
245 xnb[j + YY * c_packX8] = farAway;
246 xnb[j + ZZ * c_packX8] = farAway;
251 j += (DIM - 1) * c_packX8;
258 gmx_incons("Unsupported nbnxn_atomdata_t format");
262 /* Stores the LJ parameter data in a format convenient for different kernels */
263 static void set_lj_parameter_data(nbnxn_atomdata_t::Params* params, gmx_bool bSIMD)
265 int nt = params->numTypes;
270 /* nbfp_aligned stores two parameters using the stride most suitable
271 * for the present SIMD architecture, as specified by the constant
272 * c_simdBestPairAlignment from the SIMD header.
273 * There's a slight inefficiency in allocating and initializing nbfp_aligned
274 * when it might not be used, but introducing the conditional code is not
277 params->nbfp_aligned.resize(nt * nt * c_simdBestPairAlignment);
279 for (int i = 0; i < nt; i++)
281 for (int j = 0; j < nt; j++)
283 params->nbfp_aligned[(i * nt + j) * c_simdBestPairAlignment + 0] =
284 params->nbfp[(i * nt + j) * 2 + 0];
285 params->nbfp_aligned[(i * nt + j) * c_simdBestPairAlignment + 1] =
286 params->nbfp[(i * nt + j) * 2 + 1];
287 if (c_simdBestPairAlignment > 2)
289 params->nbfp_aligned[(i * nt + j) * c_simdBestPairAlignment + 2] = 0;
290 params->nbfp_aligned[(i * nt + j) * c_simdBestPairAlignment + 3] = 0;
297 /* We use combination rule data for SIMD combination rule kernels
298 * and with LJ-PME kernels. We then only need parameters per atom type,
299 * not per pair of atom types.
301 params->nbfp_comb.resize(nt * 2);
302 switch (params->ljCombinationRule)
304 case LJCombinationRule::Geometric:
305 for (int i = 0; i < nt; i++)
307 /* Store the sqrt of the diagonal from the nbfp matrix */
308 params->nbfp_comb[i * 2] = std::sqrt(params->nbfp[(i * nt + i) * 2]);
309 params->nbfp_comb[i * 2 + 1] = std::sqrt(params->nbfp[(i * nt + i) * 2 + 1]);
312 case LJCombinationRule::LorentzBerthelot:
313 for (int i = 0; i < nt; i++)
315 /* Get 6*C6 and 12*C12 from the diagonal of the nbfp matrix */
316 const real c6 = params->nbfp[(i * nt + i) * 2];
317 const real c12 = params->nbfp[(i * nt + i) * 2 + 1];
318 if (c6 > 0 && c12 > 0)
320 /* We store 0.5*2^1/6*sigma and sqrt(4*3*eps),
321 * so we get 6*C6 and 12*C12 after combining.
323 params->nbfp_comb[i * 2] = 0.5 * gmx::sixthroot(c12 / c6);
324 params->nbfp_comb[i * 2 + 1] = std::sqrt(c6 * c6 / c12);
328 params->nbfp_comb[i * 2] = 0;
329 params->nbfp_comb[i * 2 + 1] = 0;
333 case LJCombinationRule::None:
334 /* We always store the full matrix (see code above) */
336 default: gmx_incons("Unknown combination rule");
340 nbnxn_atomdata_t::SimdMasks::SimdMasks()
343 constexpr int simd_width = GMX_SIMD_REAL_WIDTH;
344 /* Set the diagonal cluster pair exclusion mask setup data.
345 * In the kernel we check 0 < j - i to generate the masks.
346 * Here we store j - i for generating the mask for the first i,
347 * we subtract 0.5 to avoid rounding issues.
348 * In the kernel we can subtract 1 to generate the subsequent mask.
350 const int simd_4xn_diag_size = std::max(c_nbnxnCpuIClusterSize, simd_width);
351 diagonal_4xn_j_minus_i.resize(simd_4xn_diag_size);
352 for (int j = 0; j < simd_4xn_diag_size; j++)
354 diagonal_4xn_j_minus_i[j] = j - 0.5;
357 diagonal_2xnn_j_minus_i.resize(simd_width);
358 for (int j = 0; j < simd_width / 2; j++)
360 /* The j-cluster size is half the SIMD width */
361 diagonal_2xnn_j_minus_i[j] = j - 0.5;
362 /* The next half of the SIMD width is for i + 1 */
363 diagonal_2xnn_j_minus_i[simd_width / 2 + j] = j - 1 - 0.5;
366 /* We use up to 32 bits for exclusion masking.
367 * The same masks are used for the 4xN and 2x(N+N) kernels.
368 * The masks are read either into integer SIMD registers or into
369 * real SIMD registers (together with a cast).
370 * In single precision this means the real and integer SIMD registers
373 const int simd_excl_size = c_nbnxnCpuIClusterSize * simd_width;
374 # if GMX_DOUBLE && !GMX_SIMD_HAVE_INT32_LOGICAL
375 exclusion_filter64.resize(simd_excl_size);
377 exclusion_filter.resize(simd_excl_size);
380 for (int j = 0; j < simd_excl_size; j++)
382 /* Set the consecutive bits for masking pair exclusions */
383 # if GMX_DOUBLE && !GMX_SIMD_HAVE_INT32_LOGICAL
384 exclusion_filter64[j] = (1U << j);
386 exclusion_filter[j] = (1U << j);
390 if (!GMX_SIMD_HAVE_LOGICAL && !GMX_SIMD_HAVE_INT32_LOGICAL) // NOLINT(misc-redundant-expression)
392 // If the SIMD implementation has no bitwise logical operation support
393 // whatsoever we cannot use the normal masking. Instead,
394 // we generate a vector of all 2^4 possible ways an i atom
395 // interacts with its 4 j atoms. Each array entry contains
396 // GMX_SIMD_REAL_WIDTH values that are read with a single aligned SIMD load.
397 // Since there is no logical value representation in this case, we use
398 // any nonzero value to indicate 'true', while zero mean 'false'.
399 // This can then be converted to a SIMD boolean internally in the SIMD
400 // module by comparing to zero.
401 // Each array entry encodes how this i atom will interact with the 4 j atoms.
402 // Matching code exists in set_ci_top_excls() to generate indices into this array.
403 // Those indices are used in the kernels.
405 const int simd_excl_size = c_nbnxnCpuIClusterSize * c_nbnxnCpuIClusterSize;
406 const real simdFalse = 0.0;
407 const real simdTrue = 1.0;
409 interaction_array.resize(simd_excl_size * GMX_SIMD_REAL_WIDTH);
410 for (int j = 0; j < simd_excl_size; j++)
412 const int index = j * GMX_SIMD_REAL_WIDTH;
413 for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
415 interaction_array[index + i] = (j & (1 << i)) ? simdTrue : simdFalse;
422 nbnxn_atomdata_t::Params::Params(gmx::PinningPolicy pinningPolicy) :
424 nbfp({}, { pinningPolicy }),
425 nbfp_comb({}, { pinningPolicy }),
426 type({}, { pinningPolicy }),
427 lj_comb({}, { pinningPolicy }),
428 q({}, { pinningPolicy }),
431 energrp({}, { pinningPolicy })
435 /* Initializes an nbnxn_atomdata_t::Params data structure */
436 static void nbnxn_atomdata_params_init(const gmx::MDLogger& mdlog,
437 nbnxn_atomdata_t::Params* params,
438 const Nbnxm::KernelType kernelType,
439 int enbnxninitcombrule,
441 ArrayRef<const real> nbfp,
446 fprintf(debug, "There are %d atom types in the system, adding one for nbnxn_atomdata_t\n", ntype);
448 params->numTypes = ntype + 1;
449 params->nbfp.resize(params->numTypes * params->numTypes * 2);
450 params->nbfp_comb.resize(params->numTypes * 2);
452 /* A tolerance of 1e-5 seems reasonable for (possibly hand-typed)
453 * force-field floating point parameters.
456 const char* tolOverrideString = getenv("GMX_LJCOMB_TOL");
457 if (tolOverrideString != nullptr)
459 double tolOverride = std::strtod(tolOverrideString, nullptr);
462 bool bCombGeom = true;
465 /* Temporarily fill params->nbfp_comb with sigma and epsilon
466 * to check for the LB rule.
468 for (int i = 0; i < ntype; i++)
470 const real c6 = nbfp[(i * ntype + i) * 2] / 6.0;
471 const real c12 = nbfp[(i * ntype + i) * 2 + 1] / 12.0;
472 if (c6 > 0 && c12 > 0)
474 params->nbfp_comb[i * 2] = gmx::sixthroot(c12 / c6);
475 params->nbfp_comb[i * 2 + 1] = 0.25 * c6 * c6 / c12;
477 else if (c6 == 0 && c12 == 0)
479 params->nbfp_comb[i * 2] = 0;
480 params->nbfp_comb[i * 2 + 1] = 0;
484 /* Can not use LB rule with only dispersion or repulsion */
489 for (int i = 0; i < params->numTypes; i++)
491 for (int j = 0; j < params->numTypes; j++)
493 if (i < ntype && j < ntype)
495 /* fr->nbfp has been updated, so that array too now stores c6/c12 including
496 * the 6.0/12.0 prefactors to save 2 flops in the most common case (force-only).
498 real c6 = nbfp[(i * ntype + j) * 2];
499 real c12 = nbfp[(i * ntype + j) * 2 + 1];
501 params->nbfp[(i * params->numTypes + j) * 2] = c6;
502 params->nbfp[(i * params->numTypes + j) * 2 + 1] = c12;
504 /* Compare 6*C6 and 12*C12 for geometric cobination rule */
508 c6 * c6, nbfp[(i * ntype + i) * 2] * nbfp[(j * ntype + j) * 2], tol)
509 && gmx_within_tol(c12 * c12,
510 nbfp[(i * ntype + i) * 2 + 1] * nbfp[(j * ntype + j) * 2 + 1],
513 /* Compare C6 and C12 for Lorentz-Berthelot combination rule */
517 && ((c6 == 0 && c12 == 0
518 && (params->nbfp_comb[i * 2 + 1] == 0 || params->nbfp_comb[j * 2 + 1] == 0))
519 || (c6 > 0 && c12 > 0
521 gmx::sixthroot(c12 / c6),
522 0.5 * (params->nbfp_comb[i * 2] + params->nbfp_comb[j * 2]),
524 && gmx_within_tol(0.25 * c6 * c6 / c12,
525 std::sqrt(params->nbfp_comb[i * 2 + 1]
526 * params->nbfp_comb[j * 2 + 1]),
531 /* Add zero parameters for the additional dummy atom type */
532 params->nbfp[(i * params->numTypes + j) * 2] = 0;
533 params->nbfp[(i * params->numTypes + j) * 2 + 1] = 0;
540 "Combination rules: geometric %s Lorentz-Berthelot %s\n",
541 gmx::boolToString(bCombGeom),
542 gmx::boolToString(bCombLB));
545 const bool simple = Nbnxm::kernelTypeUsesSimplePairlist(kernelType);
547 switch (enbnxninitcombrule)
549 case enbnxninitcombruleDETECT:
550 /* We prefer the geometric combination rule,
551 * as that gives a slightly faster kernel than the LB rule.
555 params->ljCombinationRule = LJCombinationRule::Geometric;
559 params->ljCombinationRule = LJCombinationRule::LorentzBerthelot;
563 params->ljCombinationRule = LJCombinationRule::None;
565 params->nbfp_comb.clear();
570 if (params->ljCombinationRule == LJCombinationRule::None)
572 mesg = "Using full Lennard-Jones parameter combination matrix";
576 mesg = gmx::formatString("Using %s Lennard-Jones combination rule",
577 enumValueToString(params->ljCombinationRule));
579 GMX_LOG(mdlog.info).asParagraph().appendText(mesg);
582 case enbnxninitcombruleGEOM:
583 params->ljCombinationRule = LJCombinationRule::Geometric;
585 case enbnxninitcombruleLB:
586 params->ljCombinationRule = LJCombinationRule::LorentzBerthelot;
588 case enbnxninitcombruleNONE:
589 params->ljCombinationRule = LJCombinationRule::None;
591 params->nbfp_comb.clear();
593 default: gmx_incons("Unknown enbnxninitcombrule");
596 const bool bSIMD = Nbnxm::kernelTypeIsSimd(kernelType);
598 set_lj_parameter_data(params, bSIMD);
600 params->nenergrp = n_energygroups;
603 // We now check for energy groups already when starting mdrun
604 GMX_RELEASE_ASSERT(n_energygroups == 1, "GPU kernels do not support energy groups");
606 /* Temporary storage goes as #grp^3*simd_width^2/2, so limit to 64 */
607 if (params->nenergrp > 64)
609 gmx_fatal(FARGS, "With NxN kernels not more than 64 energy groups are supported\n");
611 params->neg_2log = 1;
612 while (params->nenergrp > (1 << params->neg_2log))
618 /* Initializes an nbnxn_atomdata_t data structure */
619 nbnxn_atomdata_t::nbnxn_atomdata_t(gmx::PinningPolicy pinningPolicy,
620 const gmx::MDLogger& mdlog,
621 const Nbnxm::KernelType kernelType,
622 int enbnxninitcombrule,
624 ArrayRef<const real> nbfp,
627 params_(pinningPolicy),
630 shift_vec({}, { pinningPolicy }),
631 x_({}, { pinningPolicy }),
633 bUseBufferFlags(FALSE)
635 nbnxn_atomdata_params_init(
636 mdlog, ¶msDeprecated(), kernelType, enbnxninitcombrule, ntype, nbfp, n_energygroups);
638 const bool simple = Nbnxm::kernelTypeUsesSimplePairlist(kernelType);
639 const bool bSIMD = Nbnxm::kernelTypeIsSimd(kernelType);
645 int pack_x = std::max(c_nbnxnCpuIClusterSize, Nbnxm::JClusterSizePerKernelType[kernelType]);
648 case 4: XFormat = nbatX4; break;
649 case 8: XFormat = nbatX8; break;
650 default: gmx_incons("Unsupported packing width");
666 shift_vec.resize(gmx::c_numShiftVectors);
668 xstride = (XFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
669 fstride = (FFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
671 /* Initialize the output data structures */
672 for (int i = 0; i < nout; i++)
674 const auto& pinningPolicy = params().type.get_allocator().pinningPolicy();
675 out.emplace_back(kernelType, params().nenergrp, 1 << params().neg_2log, pinningPolicy);
678 buffer_flags.clear();
681 template<int packSize>
682 static void copy_lj_to_nbat_lj_comb(gmx::ArrayRef<const real> ljparam_type, const int* type, int na, real* ljparam_at)
684 /* The LJ params follow the combination rule:
685 * copy the params for the type array to the atom array.
687 for (int is = 0; is < na; is += packSize)
689 for (int k = 0; k < packSize; k++)
692 ljparam_at[is * 2 + k] = ljparam_type[type[i] * 2];
693 ljparam_at[is * 2 + packSize + k] = ljparam_type[type[i] * 2 + 1];
698 /* Sets the atom type in nbnxn_atomdata_t */
699 static void nbnxn_atomdata_set_atomtypes(nbnxn_atomdata_t::Params* params,
700 const Nbnxm::GridSet& gridSet,
701 ArrayRef<const int> atomTypes)
703 params->type.resize(gridSet.numGridAtomsTotal());
705 for (const Nbnxm::Grid& grid : gridSet.grids())
707 /* Loop over all columns and copy and fill */
708 for (int i = 0; i < grid.numColumns(); i++)
710 const int numAtoms = grid.paddedNumAtomsInColumn(i);
711 const int atomOffset = grid.firstAtomInColumn(i);
713 copy_int_to_nbat_int(gridSet.atomIndices().data() + atomOffset,
714 grid.numAtomsInColumn(i),
717 params->numTypes - 1,
718 params->type.data() + atomOffset);
723 /* Sets the LJ combination rule parameters in nbnxn_atomdata_t */
724 static void nbnxn_atomdata_set_ljcombparams(nbnxn_atomdata_t::Params* params,
726 const Nbnxm::GridSet& gridSet)
728 params->lj_comb.resize(gridSet.numGridAtomsTotal() * 2);
730 if (params->ljCombinationRule != LJCombinationRule::None)
732 for (const Nbnxm::Grid& grid : gridSet.grids())
734 /* Loop over all columns and copy and fill */
735 for (int i = 0; i < grid.numColumns(); i++)
737 const int numAtoms = grid.paddedNumAtomsInColumn(i);
738 const int atomOffset = grid.firstAtomInColumn(i);
740 if (XFormat == nbatX4)
742 copy_lj_to_nbat_lj_comb<c_packX4>(params->nbfp_comb,
743 params->type.data() + atomOffset,
745 params->lj_comb.data() + atomOffset * 2);
747 else if (XFormat == nbatX8)
749 copy_lj_to_nbat_lj_comb<c_packX8>(params->nbfp_comb,
750 params->type.data() + atomOffset,
752 params->lj_comb.data() + atomOffset * 2);
754 else if (XFormat == nbatXYZQ)
756 copy_lj_to_nbat_lj_comb<1>(params->nbfp_comb,
757 params->type.data() + atomOffset,
759 params->lj_comb.data() + atomOffset * 2);
766 /* Sets the charges in nbnxn_atomdata_t *nbat */
767 static void nbnxn_atomdata_set_charges(nbnxn_atomdata_t* nbat,
768 const Nbnxm::GridSet& gridSet,
769 ArrayRef<const real> charges)
771 if (nbat->XFormat != nbatXYZQ)
773 nbat->paramsDeprecated().q.resize(nbat->numAtoms());
776 for (const Nbnxm::Grid& grid : gridSet.grids())
778 /* Loop over all columns and copy and fill */
779 for (int cxy = 0; cxy < grid.numColumns(); cxy++)
781 const int atomOffset = grid.firstAtomInColumn(cxy);
782 const int numAtoms = grid.numAtomsInColumn(cxy);
783 const int paddedNumAtoms = grid.paddedNumAtomsInColumn(cxy);
785 if (nbat->XFormat == nbatXYZQ)
787 real* q = nbat->x().data() + atomOffset * STRIDE_XYZQ + ZZ + 1;
788 for (int i = 0; i < numAtoms; i++)
790 *q = charges[gridSet.atomIndices()[atomOffset + i]];
793 /* Complete the partially filled last cell with zeros */
794 for (int i = numAtoms; i < paddedNumAtoms; i++)
802 real* q = nbat->paramsDeprecated().q.data() + atomOffset;
803 for (int i = 0; i < numAtoms; i++)
805 *q = charges[gridSet.atomIndices()[atomOffset + i]];
808 /* Complete the partially filled last cell with zeros */
809 for (int i = numAtoms; i < paddedNumAtoms; i++)
819 /* Set the charges of perturbed atoms in nbnxn_atomdata_t to 0.
820 * This is to automatically remove the RF/PME self term in the nbnxn kernels.
821 * Part of the zero interactions are still calculated in the normal kernels.
822 * All perturbed interactions are calculated in the free energy kernel,
823 * using the original charge and LJ data, not nbnxn_atomdata_t.
825 static void nbnxn_atomdata_mask_fep(nbnxn_atomdata_t* nbat, const Nbnxm::GridSet& gridSet)
827 nbnxn_atomdata_t::Params& params = nbat->paramsDeprecated();
829 const bool formatIsXYZQ = (nbat->XFormat == nbatXYZQ);
831 real* q = formatIsXYZQ ? (nbat->x().data() + ZZ + 1) : params.q.data();
832 int stride_q = formatIsXYZQ ? STRIDE_XYZQ : 1;
834 for (const Nbnxm::Grid& grid : gridSet.grids())
836 const int nsubc = (grid.geometry().isSimple) ? 1 : c_gpuNumClusterPerCell;
838 const int c_offset = grid.firstAtomInColumn(0);
840 /* Loop over all columns and copy and fill */
841 for (int c = 0; c < grid.numCells() * nsubc; c++)
843 /* Does this cluster contain perturbed particles? */
844 if (grid.clusterIsPerturbed(c))
846 const int numAtomsPerCluster = grid.geometry().numAtomsICluster;
847 for (int i = 0; i < numAtomsPerCluster; i++)
849 /* Is this a perturbed particle? */
850 if (grid.atomIsPerturbed(c, i))
852 int ind = c_offset + c * numAtomsPerCluster + i;
853 /* Set atom type and charge to non-interacting */
854 params.type[ind] = params.numTypes - 1;
855 q[ind * stride_q] = 0;
863 /* Copies the energy group indices to a reordered and packed array */
865 copy_egp_to_nbat_egps(const int* a, int na, int na_round, int na_c, int bit_shift, const int* in, int* innb)
868 for (; i < na; i += na_c)
870 /* Store na_c energy group numbers into one int */
872 for (int sa = 0; sa < na_c; sa++)
877 comb |= (GET_CGINFO_GID(in[at]) << (sa * bit_shift));
882 /* Complete the partially filled last cell with fill */
883 for (; i < na_round; i += na_c)
889 /* Set the energy group indices for atoms in nbnxn_atomdata_t */
890 static void nbnxn_atomdata_set_energygroups(nbnxn_atomdata_t::Params* params,
891 const Nbnxm::GridSet& gridSet,
892 ArrayRef<const int> atomInfo)
894 if (params->nenergrp == 1)
899 params->energrp.resize(gridSet.numGridAtomsTotal());
901 for (const Nbnxm::Grid& grid : gridSet.grids())
903 /* Loop over all columns and copy and fill */
904 for (int i = 0; i < grid.numColumns(); i++)
906 const int numAtoms = grid.paddedNumAtomsInColumn(i);
907 const int atomOffset = grid.firstAtomInColumn(i);
909 copy_egp_to_nbat_egps(gridSet.atomIndices().data() + atomOffset,
910 grid.numAtomsInColumn(i),
912 c_nbnxnCpuIClusterSize,
915 params->energrp.data() + grid.atomToCluster(atomOffset));
920 /* Sets all required atom parameter data in nbnxn_atomdata_t */
921 void nbnxn_atomdata_set(nbnxn_atomdata_t* nbat,
922 const Nbnxm::GridSet& gridSet,
923 ArrayRef<const int> atomTypes,
924 ArrayRef<const real> atomCharges,
925 ArrayRef<const int> atomInfo)
927 nbnxn_atomdata_t::Params& params = nbat->paramsDeprecated();
929 nbnxn_atomdata_set_atomtypes(¶ms, gridSet, atomTypes);
931 nbnxn_atomdata_set_charges(nbat, gridSet, atomCharges);
933 if (gridSet.haveFep())
935 nbnxn_atomdata_mask_fep(nbat, gridSet);
938 /* This must be done after masking types for FEP */
939 nbnxn_atomdata_set_ljcombparams(¶ms, nbat->XFormat, gridSet);
941 nbnxn_atomdata_set_energygroups(¶ms, gridSet, atomInfo);
944 /* Copies the shift vector array to nbnxn_atomdata_t */
945 void nbnxn_atomdata_copy_shiftvec(gmx_bool bDynamicBox, gmx::ArrayRef<gmx::RVec> shift_vec, nbnxn_atomdata_t* nbat)
947 nbat->bDynamicBox = bDynamicBox;
948 std::copy(shift_vec.begin(), shift_vec.end(), nbat->shift_vec.begin());
951 // This is slightly different from nbnxn_get_atom_range(...) at the end of the file
952 // TODO: Combine if possible
953 static void getAtomRanges(const Nbnxm::GridSet& gridSet,
954 const gmx::AtomLocality locality,
960 case gmx::AtomLocality::All:
962 *gridEnd = gridSet.grids().size();
964 case gmx::AtomLocality::Local:
968 case gmx::AtomLocality::NonLocal:
970 *gridEnd = gridSet.grids().size();
972 case gmx::AtomLocality::Count:
973 GMX_ASSERT(false, "Count is invalid locality specifier");
978 /* Copies (and reorders) the coordinates to nbnxn_atomdata_t */
979 void nbnxn_atomdata_copy_x_to_nbat_x(const Nbnxm::GridSet& gridSet,
980 const gmx::AtomLocality locality,
981 const rvec* coordinates,
982 nbnxn_atomdata_t* nbat)
987 getAtomRanges(gridSet, locality, &gridBegin, &gridEnd);
989 const int nth = gmx_omp_nthreads_get(ModuleMultiThread::Pairsearch);
990 #pragma omp parallel for num_threads(nth) schedule(static)
991 for (int th = 0; th < nth; th++)
995 for (int g = gridBegin; g < gridEnd; g++)
997 const Nbnxm::Grid& grid = gridSet.grids()[g];
998 const int numCellsXY = grid.numColumns();
1000 const int cxy0 = (numCellsXY * th + nth - 1) / nth;
1001 const int cxy1 = (numCellsXY * (th + 1) + nth - 1) / nth;
1003 for (int cxy = cxy0; cxy < cxy1; cxy++)
1005 const int na = grid.numAtomsInColumn(cxy);
1006 const int ash = grid.firstAtomInColumn(cxy);
1008 copy_rvec_to_nbat_real(gridSet.atomIndices().data() + ash,
1018 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1022 /* Copies (and reorders) the coordinates to nbnxn_atomdata_t on the GPU*/
1023 void nbnxn_atomdata_x_to_nbat_x_gpu(const Nbnxm::GridSet& gridSet,
1024 const gmx::AtomLocality locality,
1026 DeviceBuffer<RVec> d_x,
1027 GpuEventSynchronizer* xReadyOnDevice)
1029 GMX_ASSERT(xReadyOnDevice != nullptr, "Need a valid GpuEventSynchronizer object");
1033 getAtomRanges(gridSet, locality, &gridBegin, &gridEnd);
1035 for (int g = gridBegin; g < gridEnd; g++)
1037 nbnxn_gpu_x_to_nbat_x(gridSet.grids()[g],
1040 (g == gridBegin) ? xReadyOnDevice : nullptr, // Sync on first iteration only
1043 gridSet.numColumnsMax(),
1044 (g == gridEnd - 1));
1048 static void nbnxn_atomdata_clear_reals(gmx::ArrayRef<real> dest, int i0, int i1)
1050 for (int i = i0; i < i1; i++)
1056 gmx_unused static void nbnxn_atomdata_reduce_reals(real* gmx_restrict dest,
1058 const real** gmx_restrict src,
1065 /* The destination buffer contains data, add to it */
1066 for (int i = i0; i < i1; i++)
1068 for (int s = 0; s < nsrc; s++)
1070 dest[i] += src[s][i];
1076 /* The destination buffer is uninitialized, set it first */
1077 for (int i = i0; i < i1; i++)
1079 dest[i] = src[0][i];
1080 for (int s = 1; s < nsrc; s++)
1082 dest[i] += src[s][i];
1088 gmx_unused static void nbnxn_atomdata_reduce_reals_simd(real gmx_unused* gmx_restrict dest,
1089 gmx_bool gmx_unused bDestSet,
1090 const gmx_unused real** gmx_restrict src,
1091 int gmx_unused nsrc,
1096 /* The SIMD width here is actually independent of that in the kernels,
1097 * but we use the same width for simplicity (usually optimal anyhow).
1099 SimdReal dest_SSE, src_SSE;
1103 for (int i = i0; i < i1; i += GMX_SIMD_REAL_WIDTH)
1105 dest_SSE = load<SimdReal>(dest + i);
1106 for (int s = 0; s < nsrc; s++)
1108 src_SSE = load<SimdReal>(src[s] + i);
1109 dest_SSE = dest_SSE + src_SSE;
1111 store(dest + i, dest_SSE);
1116 for (int i = i0; i < i1; i += GMX_SIMD_REAL_WIDTH)
1118 dest_SSE = load<SimdReal>(src[0] + i);
1119 for (int s = 1; s < nsrc; s++)
1121 src_SSE = load<SimdReal>(src[s] + i);
1122 dest_SSE = dest_SSE + src_SSE;
1124 store(dest + i, dest_SSE);
1130 /* Add part of the force array(s) from nbnxn_atomdata_t to f
1132 * Note: Adding restrict to f makes this function 50% slower with gcc 7.3
1134 static void nbnxn_atomdata_add_nbat_f_to_f_part(const Nbnxm::GridSet& gridSet,
1135 const nbnxn_atomdata_t& nbat,
1136 const nbnxn_atomdata_output_t& out,
1141 gmx::ArrayRef<const int> cell = gridSet.cells();
1142 // Note: Using ArrayRef instead makes this code 25% slower with gcc 7.3
1143 const real* fnb = out.f.data();
1145 /* Loop over all columns and copy and fill */
1146 switch (nbat.FFormat)
1150 for (int a = a0; a < a1; a++)
1152 int i = cell[a] * nbat.fstride;
1155 f[a][YY] += fnb[i + 1];
1156 f[a][ZZ] += fnb[i + 2];
1160 for (int a = a0; a < a1; a++)
1162 int i = atom_to_x_index<c_packX4>(cell[a]);
1164 f[a][XX] += fnb[i + XX * c_packX4];
1165 f[a][YY] += fnb[i + YY * c_packX4];
1166 f[a][ZZ] += fnb[i + ZZ * c_packX4];
1170 for (int a = a0; a < a1; a++)
1172 int i = atom_to_x_index<c_packX8>(cell[a]);
1174 f[a][XX] += fnb[i + XX * c_packX8];
1175 f[a][YY] += fnb[i + YY * c_packX8];
1176 f[a][ZZ] += fnb[i + ZZ * c_packX8];
1179 default: gmx_incons("Unsupported nbnxn_atomdata_t format");
1183 static void nbnxn_atomdata_add_nbat_f_to_f_reduce(nbnxn_atomdata_t* nbat, int nth)
1185 #pragma omp parallel for num_threads(nth) schedule(static)
1186 for (int th = 0; th < nth; th++)
1190 const real* fptr[NBNXN_BUFFERFLAG_MAX_THREADS];
1192 gmx::ArrayRef<const gmx_bitmask_t> flags = nbat->buffer_flags;
1194 /* Calculate the cell-block range for our thread */
1195 const int b0 = (flags.size() * th) / nth;
1196 const int b1 = (flags.size() * (th + 1)) / nth;
1198 for (int b = b0; b < b1; b++)
1200 const int i0 = b * NBNXN_BUFFERFLAG_SIZE * nbat->fstride;
1201 const int i1 = (b + 1) * NBNXN_BUFFERFLAG_SIZE * nbat->fstride;
1204 for (gmx::index out = 1; out < gmx::ssize(nbat->out); out++)
1206 if (bitmask_is_set(flags[b], out))
1208 fptr[nfptr++] = nbat->out[out].f.data();
1214 nbnxn_atomdata_reduce_reals_simd
1216 nbnxn_atomdata_reduce_reals
1218 (nbat->out[0].f.data(), bitmask_is_set(flags[b], 0), fptr, nfptr, i0, i1);
1220 else if (!bitmask_is_set(flags[b], 0))
1222 nbnxn_atomdata_clear_reals(nbat->out[0].f, i0, i1);
1226 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1231 /* Add the force array(s) from nbnxn_atomdata_t to f */
1232 void reduceForces(nbnxn_atomdata_t* nbat, const gmx::AtomLocality locality, const Nbnxm::GridSet& gridSet, rvec* f)
1237 nbnxn_get_atom_range(locality, gridSet, &a0, &na);
1241 /* The are no atoms for this reduction, avoid some overhead */
1245 int nth = gmx_omp_nthreads_get(ModuleMultiThread::Nonbonded);
1247 if (nbat->out.size() > 1)
1249 if (locality != gmx::AtomLocality::All)
1251 gmx_incons("add_f_to_f called with nout>1 and locality!=eatAll");
1254 /* Reduce the force thread output buffers into buffer 0, before adding
1255 * them to the, differently ordered, "real" force buffer.
1257 nbnxn_atomdata_add_nbat_f_to_f_reduce(nbat, nth);
1259 #pragma omp parallel for num_threads(nth) schedule(static)
1260 for (int th = 0; th < nth; th++)
1264 nbnxn_atomdata_add_nbat_f_to_f_part(
1265 gridSet, *nbat, nbat->out[0], a0 + ((th + 0) * na) / nth, a0 + ((th + 1) * na) / nth, f);
1267 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1271 void nbnxn_atomdata_add_nbat_fshift_to_fshift(const nbnxn_atomdata_t& nbat, gmx::ArrayRef<gmx::RVec> fshift)
1273 gmx::ArrayRef<const nbnxn_atomdata_output_t> outputBuffers = nbat.out;
1275 for (int s = 0; s < gmx::c_numShiftVectors; s++)
1279 for (const nbnxn_atomdata_output_t& out : outputBuffers)
1281 sum[XX] += out.fshift[s * DIM + XX];
1282 sum[YY] += out.fshift[s * DIM + YY];
1283 sum[ZZ] += out.fshift[s * DIM + ZZ];
1289 void nbnxn_get_atom_range(const gmx::AtomLocality atomLocality,
1290 const Nbnxm::GridSet& gridSet,
1295 switch (atomLocality)
1297 case gmx::AtomLocality::All:
1299 *nAtoms = gridSet.numRealAtomsTotal();
1301 case gmx::AtomLocality::Local:
1303 *nAtoms = gridSet.numRealAtomsLocal();
1305 case gmx::AtomLocality::NonLocal:
1306 *atomStart = gridSet.numRealAtomsLocal();
1307 *nAtoms = gridSet.numRealAtomsTotal() - gridSet.numRealAtomsLocal();
1309 case gmx::AtomLocality::Count:
1310 GMX_ASSERT(false, "Count is invalid locality specifier");