2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
47 #include "thread_mpi/atomic.h"
49 #include "gromacs/math/functions.h"
50 #include "gromacs/math/utilities.h"
51 #include "gromacs/math/vec.h"
52 #include "gromacs/mdlib/gmx_omp_nthreads.h"
53 #include "gromacs/mdtypes/forcerec.h" // only for GET_CGINFO_*
54 #include "gromacs/mdtypes/mdatom.h"
55 #include "gromacs/nbnxm/nbnxm.h"
56 #include "gromacs/nbnxm/nbnxm_geometry.h"
57 #include "gromacs/nbnxm/pairlist.h"
58 #include "gromacs/pbcutil/ishift.h"
59 #include "gromacs/simd/simd.h"
60 #include "gromacs/utility/exceptions.h"
61 #include "gromacs/utility/fatalerror.h"
62 #include "gromacs/utility/gmxomp.h"
63 #include "gromacs/utility/logger.h"
64 #include "gromacs/utility/smalloc.h"
65 #include "gromacs/utility/strconvert.h"
66 #include "gromacs/utility/stringutil.h"
71 using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
73 void nbnxn_atomdata_t::resizeCoordinateBuffer(int numAtoms)
77 x_.resize(numAtoms*xstride);
80 void nbnxn_atomdata_t::resizeForceBuffers()
82 /* Force buffers need padding up to a multiple of the buffer flag size */
83 const int paddedSize = (numAtoms() + NBNXN_BUFFERFLAG_SIZE - 1)/NBNXN_BUFFERFLAG_SIZE*NBNXN_BUFFERFLAG_SIZE;
85 /* Should we let each thread allocate it's own data instead? */
86 for (nbnxn_atomdata_output_t &outBuffer : out)
88 outBuffer.f.resize(paddedSize*fstride);
92 /* Initializes an nbnxn_atomdata_output_t data structure */
93 nbnxn_atomdata_output_t::nbnxn_atomdata_output_t(Nbnxm::KernelType kernelType,
95 int simdEnergyBufferStride,
96 gmx::PinningPolicy pinningPolicy) :
97 f({}, {pinningPolicy}),
98 fshift({}, {pinningPolicy}),
99 Vvdw({}, {pinningPolicy}),
100 Vc({}, {pinningPolicy})
102 fshift.resize(SHIFTS*DIM);
103 Vvdw.resize(numEnergyGroups*numEnergyGroups);
104 Vc.resize(numEnergyGroups*numEnergyGroups);
106 if (Nbnxm::kernelTypeIsSimd(kernelType))
108 int cj_size = Nbnxm::JClusterSizePerKernelType[kernelType];
109 int numElements = numEnergyGroups*numEnergyGroups*simdEnergyBufferStride*(cj_size/2)*cj_size;
110 VSvdw.resize(numElements);
111 VSc.resize(numElements);
115 static void copy_int_to_nbat_int(const int *a, int na, int na_round,
116 const int *in, int fill, int *innb)
121 for (i = 0; i < na; i++)
123 innb[j++] = in[a[i]];
125 /* Complete the partially filled last cell with fill */
126 for (; i < na_round; i++)
132 void copy_rvec_to_nbat_real(const int *a, int na, int na_round,
133 const rvec *x, int nbatFormat,
136 /* We complete partially filled cells, can only be the last one in each
137 * column, with coordinates farAway. The actual coordinate value does
138 * not influence the results, since these filler particles do not interact.
139 * Clusters with normal atoms + fillers have a bounding box based only
140 * on the coordinates of the atoms. Clusters with only fillers have as
141 * the bounding box the coordinates of the first filler. Such clusters
142 * are not considered as i-entries, but they are considered as j-entries.
143 * So for performance it is better to have their bounding boxes far away,
144 * such that filler only clusters don't end up in the pair list.
146 const real farAway = -1000000;
154 for (i = 0; i < na; i++)
156 xnb[j++] = x[a[i]][XX];
157 xnb[j++] = x[a[i]][YY];
158 xnb[j++] = x[a[i]][ZZ];
160 /* Complete the partially filled last cell with farAway elements */
161 for (; i < na_round; i++)
170 for (i = 0; i < na; i++)
172 xnb[j++] = x[a[i]][XX];
173 xnb[j++] = x[a[i]][YY];
174 xnb[j++] = x[a[i]][ZZ];
177 /* Complete the partially filled last cell with zeros */
178 for (; i < na_round; i++)
187 j = atom_to_x_index<c_packX4>(a0);
188 c = a0 & (c_packX4-1);
189 for (i = 0; i < na; i++)
191 xnb[j+XX*c_packX4] = x[a[i]][XX];
192 xnb[j+YY*c_packX4] = x[a[i]][YY];
193 xnb[j+ZZ*c_packX4] = x[a[i]][ZZ];
198 j += (DIM-1)*c_packX4;
202 /* Complete the partially filled last cell with zeros */
203 for (; i < na_round; i++)
205 xnb[j+XX*c_packX4] = farAway;
206 xnb[j+YY*c_packX4] = farAway;
207 xnb[j+ZZ*c_packX4] = farAway;
212 j += (DIM-1)*c_packX4;
218 j = atom_to_x_index<c_packX8>(a0);
219 c = a0 & (c_packX8 - 1);
220 for (i = 0; i < na; i++)
222 xnb[j+XX*c_packX8] = x[a[i]][XX];
223 xnb[j+YY*c_packX8] = x[a[i]][YY];
224 xnb[j+ZZ*c_packX8] = x[a[i]][ZZ];
229 j += (DIM-1)*c_packX8;
233 /* Complete the partially filled last cell with zeros */
234 for (; i < na_round; i++)
236 xnb[j+XX*c_packX8] = farAway;
237 xnb[j+YY*c_packX8] = farAway;
238 xnb[j+ZZ*c_packX8] = farAway;
243 j += (DIM-1)*c_packX8;
249 gmx_incons("Unsupported nbnxn_atomdata_t format");
253 /* Stores the LJ parameter data in a format convenient for different kernels */
254 static void set_lj_parameter_data(nbnxn_atomdata_t::Params *params, gmx_bool bSIMD)
256 int nt = params->numTypes;
261 /* nbfp_aligned stores two parameters using the stride most suitable
262 * for the present SIMD architecture, as specified by the constant
263 * c_simdBestPairAlignment from the SIMD header.
264 * There's a slight inefficiency in allocating and initializing nbfp_aligned
265 * when it might not be used, but introducing the conditional code is not
268 params->nbfp_aligned.resize(nt*nt*c_simdBestPairAlignment);
270 for (int i = 0; i < nt; i++)
272 for (int j = 0; j < nt; j++)
274 params->nbfp_aligned[(i*nt+j)*c_simdBestPairAlignment+0] = params->nbfp[(i*nt+j)*2+0];
275 params->nbfp_aligned[(i*nt+j)*c_simdBestPairAlignment+1] = params->nbfp[(i*nt+j)*2+1];
276 if (c_simdBestPairAlignment > 2)
278 params->nbfp_aligned[(i*nt+j)*c_simdBestPairAlignment+2] = 0;
279 params->nbfp_aligned[(i*nt+j)*c_simdBestPairAlignment+3] = 0;
286 /* We use combination rule data for SIMD combination rule kernels
287 * and with LJ-PME kernels. We then only need parameters per atom type,
288 * not per pair of atom types.
290 params->nbfp_comb.resize(nt*2);
291 switch (params->comb_rule)
294 params->comb_rule = ljcrGEOM;
296 for (int i = 0; i < nt; i++)
298 /* Store the sqrt of the diagonal from the nbfp matrix */
299 params->nbfp_comb[i*2 ] = std::sqrt(params->nbfp[(i*nt+i)*2 ]);
300 params->nbfp_comb[i*2+1] = std::sqrt(params->nbfp[(i*nt+i)*2+1]);
304 for (int i = 0; i < nt; i++)
306 /* Get 6*C6 and 12*C12 from the diagonal of the nbfp matrix */
307 const real c6 = params->nbfp[(i*nt+i)*2 ];
308 const real c12 = params->nbfp[(i*nt+i)*2+1];
309 if (c6 > 0 && c12 > 0)
311 /* We store 0.5*2^1/6*sigma and sqrt(4*3*eps),
312 * so we get 6*C6 and 12*C12 after combining.
314 params->nbfp_comb[i*2 ] = 0.5*gmx::sixthroot(c12/c6);
315 params->nbfp_comb[i*2+1] = std::sqrt(c6*c6/c12);
319 params->nbfp_comb[i*2 ] = 0;
320 params->nbfp_comb[i*2+1] = 0;
325 /* We always store the full matrix (see code above) */
328 gmx_incons("Unknown combination rule");
332 nbnxn_atomdata_t::SimdMasks::SimdMasks()
335 constexpr int simd_width = GMX_SIMD_REAL_WIDTH;
336 /* Set the diagonal cluster pair exclusion mask setup data.
337 * In the kernel we check 0 < j - i to generate the masks.
338 * Here we store j - i for generating the mask for the first i,
339 * we substract 0.5 to avoid rounding issues.
340 * In the kernel we can subtract 1 to generate the subsequent mask.
342 const int simd_4xn_diag_size = std::max(c_nbnxnCpuIClusterSize, simd_width);
343 diagonal_4xn_j_minus_i.resize(simd_4xn_diag_size);
344 for (int j = 0; j < simd_4xn_diag_size; j++)
346 diagonal_4xn_j_minus_i[j] = j - 0.5;
349 diagonal_2xnn_j_minus_i.resize(simd_width);
350 for (int j = 0; j < simd_width/2; j++)
352 /* The j-cluster size is half the SIMD width */
353 diagonal_2xnn_j_minus_i[j] = j - 0.5;
354 /* The next half of the SIMD width is for i + 1 */
355 diagonal_2xnn_j_minus_i[simd_width/2 + j] = j - 1 - 0.5;
358 /* We use up to 32 bits for exclusion masking.
359 * The same masks are used for the 4xN and 2x(N+N) kernels.
360 * The masks are read either into integer SIMD registers or into
361 * real SIMD registers (together with a cast).
362 * In single precision this means the real and integer SIMD registers
365 const int simd_excl_size = c_nbnxnCpuIClusterSize*simd_width;
366 #if GMX_DOUBLE && !GMX_SIMD_HAVE_INT32_LOGICAL
367 exclusion_filter64.resize(simd_excl_size);
369 exclusion_filter.resize(simd_excl_size);
372 for (int j = 0; j < simd_excl_size; j++)
374 /* Set the consecutive bits for masking pair exclusions */
375 #if GMX_DOUBLE && !GMX_SIMD_HAVE_INT32_LOGICAL
376 exclusion_filter64[j] = (1U << j);
378 exclusion_filter[j] = (1U << j);
382 if (!GMX_SIMD_HAVE_LOGICAL && !GMX_SIMD_HAVE_INT32_LOGICAL) // NOLINT(misc-redundant-expression)
384 // If the SIMD implementation has no bitwise logical operation support
385 // whatsoever we cannot use the normal masking. Instead,
386 // we generate a vector of all 2^4 possible ways an i atom
387 // interacts with its 4 j atoms. Each array entry contains
388 // GMX_SIMD_REAL_WIDTH values that are read with a single aligned SIMD load.
389 // Since there is no logical value representation in this case, we use
390 // any nonzero value to indicate 'true', while zero mean 'false'.
391 // This can then be converted to a SIMD boolean internally in the SIMD
392 // module by comparing to zero.
393 // Each array entry encodes how this i atom will interact with the 4 j atoms.
394 // Matching code exists in set_ci_top_excls() to generate indices into this array.
395 // Those indices are used in the kernels.
397 const int simd_excl_size = c_nbnxnCpuIClusterSize*c_nbnxnCpuIClusterSize;
398 const real simdFalse = 0.0;
399 const real simdTrue = 1.0;
401 interaction_array.resize(simd_excl_size * GMX_SIMD_REAL_WIDTH);
402 for (int j = 0; j < simd_excl_size; j++)
404 const int index = j * GMX_SIMD_REAL_WIDTH;
405 for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
407 interaction_array[index + i] = (j & (1 << i)) ? simdTrue : simdFalse;
414 nbnxn_atomdata_t::Params::Params(gmx::PinningPolicy pinningPolicy) :
416 nbfp({}, {pinningPolicy}),
417 nbfp_comb({}, {pinningPolicy}),
418 type({}, {pinningPolicy}),
419 lj_comb({}, {pinningPolicy}),
420 q({}, {pinningPolicy}),
423 energrp({}, {pinningPolicy})
427 nbnxn_atomdata_t::nbnxn_atomdata_t(gmx::PinningPolicy pinningPolicy) :
428 params_(pinningPolicy),
431 shift_vec({}, {pinningPolicy}),
432 x_({}, {pinningPolicy}),
434 bUseBufferFlags(FALSE),
435 bUseTreeReduce(FALSE)
439 /* Initializes an nbnxn_atomdata_t::Params data structure */
440 static void nbnxn_atomdata_params_init(const gmx::MDLogger &mdlog,
441 nbnxn_atomdata_t::Params *params,
442 const Nbnxm::KernelType kernelType,
443 int enbnxninitcombrule,
444 int ntype, const real *nbfp,
449 gmx_bool simple, bCombGeom, bCombLB, bSIMD;
453 fprintf(debug, "There are %d atom types in the system, adding one for nbnxn_atomdata_t\n", ntype);
455 params->numTypes = ntype + 1;
456 params->nbfp.resize(params->numTypes*params->numTypes*2);
457 params->nbfp_comb.resize(params->numTypes*2);
459 /* A tolerance of 1e-5 seems reasonable for (possibly hand-typed)
460 * force-field floating point parameters.
463 ptr = getenv("GMX_LJCOMB_TOL");
468 sscanf(ptr, "%lf", &dbl);
474 /* Temporarily fill params->nbfp_comb with sigma and epsilon
475 * to check for the LB rule.
477 for (int i = 0; i < ntype; i++)
479 c6 = nbfp[(i*ntype+i)*2 ]/6.0;
480 c12 = nbfp[(i*ntype+i)*2 + 1]/12.0;
481 if (c6 > 0 && c12 > 0)
483 params->nbfp_comb[i*2 ] = gmx::sixthroot(c12/c6);
484 params->nbfp_comb[i*2 + 1] = 0.25*c6*c6/c12;
486 else if (c6 == 0 && c12 == 0)
488 params->nbfp_comb[i*2 ] = 0;
489 params->nbfp_comb[i*2 + 1] = 0;
493 /* Can not use LB rule with only dispersion or repulsion */
498 for (int i = 0; i < params->numTypes; i++)
500 for (int j = 0; j < params->numTypes; j++)
502 if (i < ntype && j < ntype)
504 /* fr->nbfp has been updated, so that array too now stores c6/c12 including
505 * the 6.0/12.0 prefactors to save 2 flops in the most common case (force-only).
507 c6 = nbfp[(i*ntype+j)*2 ];
508 c12 = nbfp[(i*ntype+j)*2 + 1];
509 params->nbfp[(i*params->numTypes+j)*2 ] = c6;
510 params->nbfp[(i*params->numTypes+j)*2 + 1] = c12;
512 /* Compare 6*C6 and 12*C12 for geometric cobination rule */
513 bCombGeom = bCombGeom &&
514 gmx_within_tol(c6*c6, nbfp[(i*ntype+i)*2 ]*nbfp[(j*ntype+j)*2 ], tol) &&
515 gmx_within_tol(c12*c12, nbfp[(i*ntype+i)*2 + 1]*nbfp[(j*ntype+j)*2 + 1], tol);
517 /* Compare C6 and C12 for Lorentz-Berthelot combination rule */
521 ((c6 == 0 && c12 == 0 &&
522 (params->nbfp_comb[i*2 + 1] == 0 || params->nbfp_comb[j*2 + 1] == 0)) ||
523 (c6 > 0 && c12 > 0 &&
524 gmx_within_tol(gmx::sixthroot(c12/c6),
525 0.5*(params->nbfp_comb[i*2]+params->nbfp_comb[j*2]), tol) &&
526 gmx_within_tol(0.25*c6*c6/c12, std::sqrt(params->nbfp_comb[i*2 + 1]*params->nbfp_comb[j*2 + 1]), tol)));
530 /* Add zero parameters for the additional dummy atom type */
531 params->nbfp[(i*params->numTypes + j)*2 ] = 0;
532 params->nbfp[(i*params->numTypes + j)*2+1] = 0;
538 fprintf(debug, "Combination rules: geometric %s Lorentz-Berthelot %s\n",
539 gmx::boolToString(bCombGeom), gmx::boolToString(bCombLB));
542 simple = Nbnxm::kernelTypeUsesSimplePairlist(kernelType);
544 switch (enbnxninitcombrule)
546 case enbnxninitcombruleDETECT:
547 /* We prefer the geometic combination rule,
548 * as that gives a slightly faster kernel than the LB rule.
552 params->comb_rule = ljcrGEOM;
556 params->comb_rule = ljcrLB;
560 params->comb_rule = ljcrNONE;
562 params->nbfp_comb.clear();
567 if (params->comb_rule == ljcrNONE)
569 mesg = "Using full Lennard-Jones parameter combination matrix";
573 mesg = gmx::formatString("Using %s Lennard-Jones combination rule",
574 params->comb_rule == ljcrGEOM ? "geometric" : "Lorentz-Berthelot");
576 GMX_LOG(mdlog.info).asParagraph().appendText(mesg);
579 case enbnxninitcombruleGEOM:
580 params->comb_rule = ljcrGEOM;
582 case enbnxninitcombruleLB:
583 params->comb_rule = ljcrLB;
585 case enbnxninitcombruleNONE:
586 params->comb_rule = ljcrNONE;
588 params->nbfp_comb.clear();
591 gmx_incons("Unknown enbnxninitcombrule");
594 bSIMD = Nbnxm::kernelTypeIsSimd(kernelType);
596 set_lj_parameter_data(params, bSIMD);
598 params->nenergrp = n_energygroups;
601 // We now check for energy groups already when starting mdrun
602 GMX_RELEASE_ASSERT(n_energygroups == 1, "GPU kernels do not support energy groups");
604 /* Temporary storage goes as #grp^3*simd_width^2/2, so limit to 64 */
605 if (params->nenergrp > 64)
607 gmx_fatal(FARGS, "With NxN kernels not more than 64 energy groups are supported\n");
609 params->neg_2log = 1;
610 while (params->nenergrp > (1<<params->neg_2log))
616 /* Initializes an nbnxn_atomdata_t data structure */
617 void nbnxn_atomdata_init(const gmx::MDLogger &mdlog,
618 nbnxn_atomdata_t *nbat,
619 const Nbnxm::KernelType kernelType,
620 int enbnxninitcombrule,
621 int ntype, const real *nbfp,
625 nbnxn_atomdata_params_init(mdlog, &nbat->paramsDeprecated(), kernelType,
626 enbnxninitcombrule, ntype, nbfp, n_energygroups);
628 const bool simple = Nbnxm::kernelTypeUsesSimplePairlist(kernelType);
629 const bool bSIMD = Nbnxm::kernelTypeIsSimd(kernelType);
637 pack_x = std::max(c_nbnxnCpuIClusterSize,
638 Nbnxm::JClusterSizePerKernelType[kernelType]);
642 nbat->XFormat = nbatX4;
645 nbat->XFormat = nbatX8;
648 gmx_incons("Unsupported packing width");
653 nbat->XFormat = nbatXYZ;
656 nbat->FFormat = nbat->XFormat;
660 nbat->XFormat = nbatXYZQ;
661 nbat->FFormat = nbatXYZ;
664 nbat->shift_vec.resize(SHIFTS);
666 nbat->xstride = (nbat->XFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
667 nbat->fstride = (nbat->FFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
669 /* Initialize the output data structures */
670 for (int i = 0; i < nout; i++)
672 const auto &pinningPolicy = nbat->params().type.get_allocator().pinningPolicy();
673 nbat->out.emplace_back(kernelType, nbat->params().nenergrp, 1 << nbat->params().neg_2log,
677 nbat->buffer_flags.flag = nullptr;
678 nbat->buffer_flags.flag_nalloc = 0;
680 const int nth = gmx_omp_nthreads_get(emntNonbonded);
682 const char *ptr = getenv("GMX_USE_TREEREDUCE");
685 nbat->bUseTreeReduce = (strtol(ptr, nullptr, 10) != 0);
688 else if (nth > 8) /*on the CPU we currently don't benefit even at 32*/
690 nbat->bUseTreeReduce = 1;
695 nbat->bUseTreeReduce = false;
697 if (nbat->bUseTreeReduce)
699 GMX_LOG(mdlog.info).asParagraph().appendText("Using tree force reduction");
701 nbat->syncStep = new tMPI_Atomic[nth];
705 template<int packSize>
706 static void copy_lj_to_nbat_lj_comb(gmx::ArrayRef<const real> ljparam_type,
707 const int *type, int na,
710 /* The LJ params follow the combination rule:
711 * copy the params for the type array to the atom array.
713 for (int is = 0; is < na; is += packSize)
715 for (int k = 0; k < packSize; k++)
718 ljparam_at[is*2 + k] = ljparam_type[type[i]*2 ];
719 ljparam_at[is*2 + packSize + k] = ljparam_type[type[i]*2 + 1];
724 /* Sets the atom type in nbnxn_atomdata_t */
725 static void nbnxn_atomdata_set_atomtypes(nbnxn_atomdata_t::Params *params,
726 const Nbnxm::GridSet &gridSet,
729 params->type.resize(gridSet.numGridAtomsTotal());
731 for (const Nbnxm::Grid &grid : gridSet.grids())
733 /* Loop over all columns and copy and fill */
734 for (int i = 0; i < grid.numColumns(); i++)
736 const int numAtoms = grid.paddedNumAtomsInColumn(i);
737 const int atomOffset = grid.firstAtomInColumn(i);
739 copy_int_to_nbat_int(gridSet.atomIndices().data() + atomOffset,
740 grid.numAtomsInColumn(i), numAtoms,
741 type, params->numTypes - 1, params->type.data() + atomOffset);
746 /* Sets the LJ combination rule parameters in nbnxn_atomdata_t */
747 static void nbnxn_atomdata_set_ljcombparams(nbnxn_atomdata_t::Params *params,
749 const Nbnxm::GridSet &gridSet)
751 params->lj_comb.resize(gridSet.numGridAtomsTotal()*2);
753 if (params->comb_rule != ljcrNONE)
755 for (const Nbnxm::Grid &grid : gridSet.grids())
757 /* Loop over all columns and copy and fill */
758 for (int i = 0; i < grid.numColumns(); i++)
760 const int numAtoms = grid.paddedNumAtomsInColumn(i);
761 const int atomOffset = grid.firstAtomInColumn(i);
763 if (XFormat == nbatX4)
765 copy_lj_to_nbat_lj_comb<c_packX4>(params->nbfp_comb,
766 params->type.data() + atomOffset,
768 params->lj_comb.data() + atomOffset*2);
770 else if (XFormat == nbatX8)
772 copy_lj_to_nbat_lj_comb<c_packX8>(params->nbfp_comb,
773 params->type.data() + atomOffset,
775 params->lj_comb.data() + atomOffset*2);
777 else if (XFormat == nbatXYZQ)
779 copy_lj_to_nbat_lj_comb<1>(params->nbfp_comb,
780 params->type.data() + atomOffset,
782 params->lj_comb.data() + atomOffset*2);
789 /* Sets the charges in nbnxn_atomdata_t *nbat */
790 static void nbnxn_atomdata_set_charges(nbnxn_atomdata_t *nbat,
791 const Nbnxm::GridSet &gridSet,
794 if (nbat->XFormat != nbatXYZQ)
796 nbat->paramsDeprecated().q.resize(nbat->numAtoms());
799 for (const Nbnxm::Grid &grid : gridSet.grids())
801 /* Loop over all columns and copy and fill */
802 for (int cxy = 0; cxy < grid.numColumns(); cxy++)
804 const int atomOffset = grid.firstAtomInColumn(cxy);
805 const int numAtoms = grid.numAtomsInColumn(cxy);
806 const int paddedNumAtoms = grid.paddedNumAtomsInColumn(cxy);
808 if (nbat->XFormat == nbatXYZQ)
810 real *q = nbat->x().data() + atomOffset*STRIDE_XYZQ + ZZ + 1;
812 for (i = 0; i < numAtoms; i++)
814 *q = charge[gridSet.atomIndices()[atomOffset + i]];
817 /* Complete the partially filled last cell with zeros */
818 for (; i < paddedNumAtoms; i++)
826 real *q = nbat->paramsDeprecated().q.data() + atomOffset;
828 for (i = 0; i < numAtoms; i++)
830 *q = charge[gridSet.atomIndices()[atomOffset + i]];
833 /* Complete the partially filled last cell with zeros */
834 for (; i < paddedNumAtoms; i++)
844 /* Set the charges of perturbed atoms in nbnxn_atomdata_t to 0.
845 * This is to automatically remove the RF/PME self term in the nbnxn kernels.
846 * Part of the zero interactions are still calculated in the normal kernels.
847 * All perturbed interactions are calculated in the free energy kernel,
848 * using the original charge and LJ data, not nbnxn_atomdata_t.
850 static void nbnxn_atomdata_mask_fep(nbnxn_atomdata_t *nbat,
851 const Nbnxm::GridSet &gridSet)
853 nbnxn_atomdata_t::Params ¶ms = nbat->paramsDeprecated();
857 if (nbat->XFormat == nbatXYZQ)
859 q = nbat->x().data() + ZZ + 1;
860 stride_q = STRIDE_XYZQ;
868 for (const Nbnxm::Grid &grid : gridSet.grids())
871 if (grid.geometry().isSimple)
877 nsubc = c_gpuNumClusterPerCell;
880 int c_offset = grid.firstAtomInColumn(0);
882 /* Loop over all columns and copy and fill */
883 for (int c = 0; c < grid.numCells()*nsubc; c++)
885 /* Does this cluster contain perturbed particles? */
886 if (grid.clusterIsPerturbed(c))
888 const int numAtomsPerCluster = grid.geometry().numAtomsICluster;
889 for (int i = 0; i < numAtomsPerCluster; i++)
891 /* Is this a perturbed particle? */
892 if (grid.atomIsPerturbed(c, i))
894 int ind = c_offset + c*numAtomsPerCluster + i;
895 /* Set atom type and charge to non-interacting */
896 params.type[ind] = params.numTypes - 1;
905 /* Copies the energy group indices to a reordered and packed array */
906 static void copy_egp_to_nbat_egps(const int *a, int na, int na_round,
907 int na_c, int bit_shift,
908 const int *in, int *innb)
914 for (i = 0; i < na; i += na_c)
916 /* Store na_c energy group numbers into one int */
918 for (int sa = 0; sa < na_c; sa++)
923 comb |= (GET_CGINFO_GID(in[at]) << (sa*bit_shift));
928 /* Complete the partially filled last cell with fill */
929 for (; i < na_round; i += na_c)
935 /* Set the energy group indices for atoms in nbnxn_atomdata_t */
936 static void nbnxn_atomdata_set_energygroups(nbnxn_atomdata_t::Params *params,
937 const Nbnxm::GridSet &gridSet,
940 if (params->nenergrp == 1)
945 params->energrp.resize(gridSet.numGridAtomsTotal());
947 for (const Nbnxm::Grid &grid : gridSet.grids())
949 /* Loop over all columns and copy and fill */
950 for (int i = 0; i < grid.numColumns(); i++)
952 const int numAtoms = grid.paddedNumAtomsInColumn(i);
953 const int atomOffset = grid.firstAtomInColumn(i);
955 copy_egp_to_nbat_egps(gridSet.atomIndices().data() + atomOffset,
956 grid.numAtomsInColumn(i), numAtoms,
957 c_nbnxnCpuIClusterSize, params->neg_2log,
959 params->energrp.data() + grid.atomToCluster(atomOffset));
964 /* Sets all required atom parameter data in nbnxn_atomdata_t */
965 void nbnxn_atomdata_set(nbnxn_atomdata_t *nbat,
966 const Nbnxm::GridSet &gridSet,
967 const t_mdatoms *mdatoms,
970 nbnxn_atomdata_t::Params ¶ms = nbat->paramsDeprecated();
972 nbnxn_atomdata_set_atomtypes(¶ms, gridSet, mdatoms->typeA);
974 nbnxn_atomdata_set_charges(nbat, gridSet, mdatoms->chargeA);
976 if (gridSet.haveFep())
978 nbnxn_atomdata_mask_fep(nbat, gridSet);
981 /* This must be done after masking types for FEP */
982 nbnxn_atomdata_set_ljcombparams(¶ms, nbat->XFormat, gridSet);
984 nbnxn_atomdata_set_energygroups(¶ms, gridSet, atinfo);
987 /* Copies the shift vector array to nbnxn_atomdata_t */
988 void nbnxn_atomdata_copy_shiftvec(gmx_bool bDynamicBox,
990 nbnxn_atomdata_t *nbat)
994 nbat->bDynamicBox = bDynamicBox;
995 for (i = 0; i < SHIFTS; i++)
997 copy_rvec(shift_vec[i], nbat->shift_vec[i]);
1001 /* Copies (and reorders) the coordinates to nbnxn_atomdata_t */
1002 void nbnxn_atomdata_copy_x_to_nbat_x(const Nbnxm::GridSet &gridSet,
1003 const Nbnxm::AtomLocality locality,
1006 nbnxn_atomdata_t *nbat)
1012 case Nbnxm::AtomLocality::All:
1013 case Nbnxm::AtomLocality::Count:
1015 gridEnd = gridSet.grids().size();
1017 case Nbnxm::AtomLocality::Local:
1021 case Nbnxm::AtomLocality::NonLocal:
1023 gridEnd = gridSet.grids().size();
1029 nbat->natoms_local = gridSet.grids()[0].atomIndexEnd();
1032 const int nth = gmx_omp_nthreads_get(emntPairsearch);
1034 #pragma omp parallel for num_threads(nth) schedule(static)
1035 for (int th = 0; th < nth; th++)
1039 for (int g = gridBegin; g < gridEnd; g++)
1041 const Nbnxm::Grid &grid = gridSet.grids()[g];
1042 const int numCellsXY = grid.numColumns();
1044 const int cxy0 = (numCellsXY* th + nth - 1)/nth;
1045 const int cxy1 = (numCellsXY*(th + 1) + nth - 1)/nth;
1047 for (int cxy = cxy0; cxy < cxy1; cxy++)
1049 const int na = grid.numAtomsInColumn(cxy);
1050 const int ash = grid.firstAtomInColumn(cxy);
1053 if (g == 0 && FillLocal)
1055 na_fill = grid.paddedNumAtomsInColumn(cxy);
1059 /* We fill only the real particle locations.
1060 * We assume the filling entries at the end have been
1061 * properly set before during pair-list generation.
1065 copy_rvec_to_nbat_real(gridSet.atomIndices().data() + ash,
1067 nbat->XFormat, nbat->x().data(), ash);
1071 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1076 nbnxn_atomdata_clear_reals(gmx::ArrayRef<real> dest,
1079 for (int i = i0; i < i1; i++)
1085 gmx_unused static void
1086 nbnxn_atomdata_reduce_reals(real * gmx_restrict dest,
1088 const real ** gmx_restrict src,
1094 /* The destination buffer contains data, add to it */
1095 for (int i = i0; i < i1; i++)
1097 for (int s = 0; s < nsrc; s++)
1099 dest[i] += src[s][i];
1105 /* The destination buffer is unitialized, set it first */
1106 for (int i = i0; i < i1; i++)
1108 dest[i] = src[0][i];
1109 for (int s = 1; s < nsrc; s++)
1111 dest[i] += src[s][i];
1117 gmx_unused static void
1118 nbnxn_atomdata_reduce_reals_simd(real gmx_unused * gmx_restrict dest,
1119 gmx_bool gmx_unused bDestSet,
1120 const gmx_unused real ** gmx_restrict src,
1121 int gmx_unused nsrc,
1122 int gmx_unused i0, int gmx_unused i1)
1125 /* The SIMD width here is actually independent of that in the kernels,
1126 * but we use the same width for simplicity (usually optimal anyhow).
1128 SimdReal dest_SSE, src_SSE;
1132 for (int i = i0; i < i1; i += GMX_SIMD_REAL_WIDTH)
1134 dest_SSE = load<SimdReal>(dest+i);
1135 for (int s = 0; s < nsrc; s++)
1137 src_SSE = load<SimdReal>(src[s]+i);
1138 dest_SSE = dest_SSE + src_SSE;
1140 store(dest+i, dest_SSE);
1145 for (int i = i0; i < i1; i += GMX_SIMD_REAL_WIDTH)
1147 dest_SSE = load<SimdReal>(src[0]+i);
1148 for (int s = 1; s < nsrc; s++)
1150 src_SSE = load<SimdReal>(src[s]+i);
1151 dest_SSE = dest_SSE + src_SSE;
1153 store(dest+i, dest_SSE);
1159 /* Add part of the force array(s) from nbnxn_atomdata_t to f */
1161 nbnxn_atomdata_add_nbat_f_to_f_part(const Nbnxm::GridSet &gridSet,
1162 const nbnxn_atomdata_t *nbat,
1163 gmx::ArrayRef<const nbnxn_atomdata_output_t> out,
1168 gmx::ArrayRef<const int> cell = gridSet.cells();
1170 /* Loop over all columns and copy and fill */
1171 switch (nbat->FFormat)
1177 const real *fnb = out[0].f.data();
1179 for (int a = a0; a < a1; a++)
1181 int i = cell[a]*nbat->fstride;
1184 f[a][YY] += fnb[i+1];
1185 f[a][ZZ] += fnb[i+2];
1190 for (int a = a0; a < a1; a++)
1192 int i = cell[a]*nbat->fstride;
1194 for (int fa = 0; fa < nfa; fa++)
1196 f[a][XX] += out[fa].f[i];
1197 f[a][YY] += out[fa].f[i+1];
1198 f[a][ZZ] += out[fa].f[i+2];
1206 const real *fnb = out[0].f.data();
1208 for (int a = a0; a < a1; a++)
1210 int i = atom_to_x_index<c_packX4>(cell[a]);
1212 f[a][XX] += fnb[i+XX*c_packX4];
1213 f[a][YY] += fnb[i+YY*c_packX4];
1214 f[a][ZZ] += fnb[i+ZZ*c_packX4];
1219 for (int a = a0; a < a1; a++)
1221 int i = atom_to_x_index<c_packX4>(cell[a]);
1223 for (int fa = 0; fa < nfa; fa++)
1225 f[a][XX] += out[fa].f[i+XX*c_packX4];
1226 f[a][YY] += out[fa].f[i+YY*c_packX4];
1227 f[a][ZZ] += out[fa].f[i+ZZ*c_packX4];
1235 const real *fnb = out[0].f.data();
1237 for (int a = a0; a < a1; a++)
1239 int i = atom_to_x_index<c_packX8>(cell[a]);
1241 f[a][XX] += fnb[i+XX*c_packX8];
1242 f[a][YY] += fnb[i+YY*c_packX8];
1243 f[a][ZZ] += fnb[i+ZZ*c_packX8];
1248 for (int a = a0; a < a1; a++)
1250 int i = atom_to_x_index<c_packX8>(cell[a]);
1252 for (int fa = 0; fa < nfa; fa++)
1254 f[a][XX] += out[fa].f[i+XX*c_packX8];
1255 f[a][YY] += out[fa].f[i+YY*c_packX8];
1256 f[a][ZZ] += out[fa].f[i+ZZ*c_packX8];
1262 gmx_incons("Unsupported nbnxn_atomdata_t format");
1266 static inline unsigned char reverse_bits(unsigned char b)
1268 /* http://graphics.stanford.edu/~seander/bithacks.html#ReverseByteWith64BitsDiv */
1269 return (b * 0x0202020202ULL & 0x010884422010ULL) % 1023;
1272 static void nbnxn_atomdata_add_nbat_f_to_f_treereduce(nbnxn_atomdata_t *nbat,
1275 const nbnxn_buffer_flags_t *flags = &nbat->buffer_flags;
1277 int next_pow2 = 1<<(gmx::log2I(nth-1)+1);
1279 const int numOutputBuffers = nbat->out.size();
1280 GMX_ASSERT(numOutputBuffers == nth,
1281 "tree-reduce currently only works for numOutputBuffers==nth");
1283 memset(nbat->syncStep, 0, sizeof(*(nbat->syncStep))*nth);
1285 #pragma omp parallel num_threads(nth)
1293 th = gmx_omp_get_thread_num();
1295 for (group_size = 2; group_size < 2*next_pow2; group_size *= 2)
1297 int index[2], group_pos, partner_pos, wu;
1298 int partner_th = th ^ (group_size/2);
1303 /* wait on partner thread - replaces full barrier */
1304 int sync_th, sync_group_size;
1306 tMPI_Atomic_memory_barrier(); /* gurantee data is saved before marking work as done */
1307 tMPI_Atomic_set(&(nbat->syncStep[th]), group_size/2); /* mark previous step as completed */
1309 /* find thread to sync with. Equal to partner_th unless nth is not a power of two. */
1310 for (sync_th = partner_th, sync_group_size = group_size; sync_th >= nth && sync_group_size > 2; sync_group_size /= 2)
1312 sync_th &= ~(sync_group_size/4);
1314 if (sync_th < nth) /* otherwise nothing to sync index[1] will be >=nout */
1316 /* wait on the thread which computed input data in previous step */
1317 while (tMPI_Atomic_get(static_cast<volatile tMPI_Atomic_t*>(&(nbat->syncStep[sync_th]))) < group_size/2)
1321 /* guarantee that no later load happens before wait loop is finisehd */
1322 tMPI_Atomic_memory_barrier();
1324 #else /* TMPI_ATOMICS */
1329 /* Calculate buffers to sum (result goes into first buffer) */
1330 group_pos = th % group_size;
1331 index[0] = th - group_pos;
1332 index[1] = index[0] + group_size/2;
1334 /* If no second buffer, nothing to do */
1335 if (index[1] >= numOutputBuffers && group_size > 2)
1340 #if NBNXN_BUFFERFLAG_MAX_THREADS > 256
1341 #error reverse_bits assumes max 256 threads
1343 /* Position is permuted so that one of the 2 vectors being added was computed on the same thread in the previous step.
1344 This improves locality and enables to sync with just a single thread between steps (=the levels in the btree).
1345 The permutation which allows this corresponds to reversing the bits of the group position.
1347 group_pos = reverse_bits(group_pos)/(256/group_size);
1349 partner_pos = group_pos ^ 1;
1351 /* loop over two work-units (own and partner) */
1352 for (wu = 0; wu < 2; wu++)
1356 if (partner_th < nth)
1358 break; /* partner exists we don't have to do his work */
1362 group_pos = partner_pos;
1366 /* Calculate the cell-block range for our thread */
1367 b0 = (flags->nflag* group_pos )/group_size;
1368 b1 = (flags->nflag*(group_pos+1))/group_size;
1370 for (b = b0; b < b1; b++)
1372 i0 = b *NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1373 i1 = (b+1)*NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1375 if (bitmask_is_set(flags->flag[b], index[1]) || group_size > 2)
1377 const real *fIndex1 = nbat->out[index[1]].f.data();
1379 nbnxn_atomdata_reduce_reals_simd
1381 nbnxn_atomdata_reduce_reals
1383 (nbat->out[index[0]].f.data(),
1384 bitmask_is_set(flags->flag[b], index[0]) || group_size > 2,
1385 &fIndex1, 1, i0, i1);
1388 else if (!bitmask_is_set(flags->flag[b], index[0]))
1390 nbnxn_atomdata_clear_reals(nbat->out[index[0]].f,
1397 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1402 static void nbnxn_atomdata_add_nbat_f_to_f_stdreduce(nbnxn_atomdata_t *nbat,
1405 #pragma omp parallel for num_threads(nth) schedule(static)
1406 for (int th = 0; th < nth; th++)
1410 const nbnxn_buffer_flags_t *flags;
1412 const real *fptr[NBNXN_BUFFERFLAG_MAX_THREADS];
1414 flags = &nbat->buffer_flags;
1416 /* Calculate the cell-block range for our thread */
1417 int b0 = (flags->nflag* th )/nth;
1418 int b1 = (flags->nflag*(th+1))/nth;
1420 for (int b = b0; b < b1; b++)
1422 int i0 = b *NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1423 int i1 = (b+1)*NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1426 for (int out = 1; out < gmx::ssize(nbat->out); out++)
1428 if (bitmask_is_set(flags->flag[b], out))
1430 fptr[nfptr++] = nbat->out[out].f.data();
1436 nbnxn_atomdata_reduce_reals_simd
1438 nbnxn_atomdata_reduce_reals
1440 (nbat->out[0].f.data(),
1441 bitmask_is_set(flags->flag[b], 0),
1445 else if (!bitmask_is_set(flags->flag[b], 0))
1447 nbnxn_atomdata_clear_reals(nbat->out[0].f,
1452 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1456 /* Add the force array(s) from nbnxn_atomdata_t to f */
1457 void reduceForces(nbnxn_atomdata_t *nbat,
1458 const Nbnxm::AtomLocality locality,
1459 const Nbnxm::GridSet &gridSet,
1466 case Nbnxm::AtomLocality::All:
1467 case Nbnxm::AtomLocality::Count:
1469 na = gridSet.numRealAtomsTotal();
1471 case Nbnxm::AtomLocality::Local:
1473 na = gridSet.numRealAtomsLocal();
1475 case Nbnxm::AtomLocality::NonLocal:
1476 a0 = gridSet.numRealAtomsLocal();
1477 na = gridSet.numRealAtomsTotal() - gridSet.numRealAtomsLocal();
1481 int nth = gmx_omp_nthreads_get(emntNonbonded);
1483 if (nbat->out.size() > 1)
1485 if (locality != Nbnxm::AtomLocality::All)
1487 gmx_incons("add_f_to_f called with nout>1 and locality!=eatAll");
1490 /* Reduce the force thread output buffers into buffer 0, before adding
1491 * them to the, differently ordered, "real" force buffer.
1493 if (nbat->bUseTreeReduce)
1495 nbnxn_atomdata_add_nbat_f_to_f_treereduce(nbat, nth);
1499 nbnxn_atomdata_add_nbat_f_to_f_stdreduce(nbat, nth);
1502 #pragma omp parallel for num_threads(nth) schedule(static)
1503 for (int th = 0; th < nth; th++)
1507 nbnxn_atomdata_add_nbat_f_to_f_part(gridSet, nbat,
1514 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1518 void nbnxn_atomdata_add_nbat_fshift_to_fshift(const nbnxn_atomdata_t *nbat,
1521 gmx::ArrayRef<const nbnxn_atomdata_output_t> outputBuffers = nbat->out;
1523 if (outputBuffers.size() == 1)
1525 /* When there is a single output object, with CPU or GPU, shift forces
1526 * have been written directly to the main buffer instead of to the
1527 * (single) thread local output object. There is nothing to reduce.
1532 for (int s = 0; s < SHIFTS; s++)
1536 for (const nbnxn_atomdata_output_t &out : outputBuffers)
1538 sum[XX] += out.fshift[s*DIM+XX];
1539 sum[YY] += out.fshift[s*DIM+YY];
1540 sum[ZZ] += out.fshift[s*DIM+ZZ];
1542 rvec_inc(fshift[s], sum);