2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
47 #include "thread_mpi/atomic.h"
49 #include "gromacs/math/functions.h"
50 #include "gromacs/math/utilities.h"
51 #include "gromacs/math/vec.h"
52 #include "gromacs/mdlib/gmx_omp_nthreads.h"
53 #include "gromacs/mdtypes/forcerec.h" // only for GET_CGINFO_*
54 #include "gromacs/mdtypes/mdatom.h"
55 #include "gromacs/nbnxm/nbnxm.h"
56 #include "gromacs/nbnxm/nbnxm_geometry.h"
57 #include "gromacs/nbnxm/pairlist.h"
58 #include "gromacs/pbcutil/ishift.h"
59 #include "gromacs/simd/simd.h"
60 #include "gromacs/timing/wallcycle.h"
61 #include "gromacs/utility/exceptions.h"
62 #include "gromacs/utility/fatalerror.h"
63 #include "gromacs/utility/gmxomp.h"
64 #include "gromacs/utility/logger.h"
65 #include "gromacs/utility/smalloc.h"
66 #include "gromacs/utility/strconvert.h"
67 #include "gromacs/utility/stringutil.h"
72 using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
74 void nbnxn_atomdata_t::resizeCoordinateBuffer(int numAtoms)
78 x_.resize(numAtoms*xstride);
81 void nbnxn_atomdata_t::resizeForceBuffers()
83 /* Force buffers need padding up to a multiple of the buffer flag size */
84 const int paddedSize = (numAtoms() + NBNXN_BUFFERFLAG_SIZE - 1)/NBNXN_BUFFERFLAG_SIZE*NBNXN_BUFFERFLAG_SIZE;
86 /* Should we let each thread allocate it's own data instead? */
87 for (nbnxn_atomdata_output_t &outBuffer : out)
89 outBuffer.f.resize(paddedSize*fstride);
93 /* Initializes an nbnxn_atomdata_output_t data structure */
94 nbnxn_atomdata_output_t::nbnxn_atomdata_output_t(Nbnxm::KernelType kernelType,
96 int simdEnergyBufferStride,
97 gmx::PinningPolicy pinningPolicy) :
98 f({}, {pinningPolicy}),
99 fshift({}, {pinningPolicy}),
100 Vvdw({}, {pinningPolicy}),
101 Vc({}, {pinningPolicy})
103 fshift.resize(SHIFTS*DIM);
104 Vvdw.resize(numEnergyGroups*numEnergyGroups);
105 Vc.resize(numEnergyGroups*numEnergyGroups);
107 if (Nbnxm::kernelTypeIsSimd(kernelType))
109 int cj_size = Nbnxm::JClusterSizePerKernelType[kernelType];
110 int numElements = numEnergyGroups*numEnergyGroups*simdEnergyBufferStride*(cj_size/2)*cj_size;
111 VSvdw.resize(numElements);
112 VSc.resize(numElements);
116 static void copy_int_to_nbat_int(const int *a, int na, int na_round,
117 const int *in, int fill, int *innb)
122 for (i = 0; i < na; i++)
124 innb[j++] = in[a[i]];
126 /* Complete the partially filled last cell with fill */
127 for (; i < na_round; i++)
133 void copy_rvec_to_nbat_real(const int *a, int na, int na_round,
134 const rvec *x, int nbatFormat,
137 /* We complete partially filled cells, can only be the last one in each
138 * column, with coordinates farAway. The actual coordinate value does
139 * not influence the results, since these filler particles do not interact.
140 * Clusters with normal atoms + fillers have a bounding box based only
141 * on the coordinates of the atoms. Clusters with only fillers have as
142 * the bounding box the coordinates of the first filler. Such clusters
143 * are not considered as i-entries, but they are considered as j-entries.
144 * So for performance it is better to have their bounding boxes far away,
145 * such that filler only clusters don't end up in the pair list.
147 const real farAway = -1000000;
155 for (i = 0; i < na; i++)
157 xnb[j++] = x[a[i]][XX];
158 xnb[j++] = x[a[i]][YY];
159 xnb[j++] = x[a[i]][ZZ];
161 /* Complete the partially filled last cell with farAway elements */
162 for (; i < na_round; i++)
171 for (i = 0; i < na; i++)
173 xnb[j++] = x[a[i]][XX];
174 xnb[j++] = x[a[i]][YY];
175 xnb[j++] = x[a[i]][ZZ];
178 /* Complete the partially filled last cell with zeros */
179 for (; i < na_round; i++)
188 j = atom_to_x_index<c_packX4>(a0);
189 c = a0 & (c_packX4-1);
190 for (i = 0; i < na; i++)
192 xnb[j+XX*c_packX4] = x[a[i]][XX];
193 xnb[j+YY*c_packX4] = x[a[i]][YY];
194 xnb[j+ZZ*c_packX4] = x[a[i]][ZZ];
199 j += (DIM-1)*c_packX4;
203 /* Complete the partially filled last cell with zeros */
204 for (; i < na_round; i++)
206 xnb[j+XX*c_packX4] = farAway;
207 xnb[j+YY*c_packX4] = farAway;
208 xnb[j+ZZ*c_packX4] = farAway;
213 j += (DIM-1)*c_packX4;
219 j = atom_to_x_index<c_packX8>(a0);
220 c = a0 & (c_packX8 - 1);
221 for (i = 0; i < na; i++)
223 xnb[j+XX*c_packX8] = x[a[i]][XX];
224 xnb[j+YY*c_packX8] = x[a[i]][YY];
225 xnb[j+ZZ*c_packX8] = x[a[i]][ZZ];
230 j += (DIM-1)*c_packX8;
234 /* Complete the partially filled last cell with zeros */
235 for (; i < na_round; i++)
237 xnb[j+XX*c_packX8] = farAway;
238 xnb[j+YY*c_packX8] = farAway;
239 xnb[j+ZZ*c_packX8] = farAway;
244 j += (DIM-1)*c_packX8;
250 gmx_incons("Unsupported nbnxn_atomdata_t format");
254 /* Stores the LJ parameter data in a format convenient for different kernels */
255 static void set_lj_parameter_data(nbnxn_atomdata_t::Params *params, gmx_bool bSIMD)
257 int nt = params->numTypes;
262 /* nbfp_aligned stores two parameters using the stride most suitable
263 * for the present SIMD architecture, as specified by the constant
264 * c_simdBestPairAlignment from the SIMD header.
265 * There's a slight inefficiency in allocating and initializing nbfp_aligned
266 * when it might not be used, but introducing the conditional code is not
269 params->nbfp_aligned.resize(nt*nt*c_simdBestPairAlignment);
271 for (int i = 0; i < nt; i++)
273 for (int j = 0; j < nt; j++)
275 params->nbfp_aligned[(i*nt+j)*c_simdBestPairAlignment+0] = params->nbfp[(i*nt+j)*2+0];
276 params->nbfp_aligned[(i*nt+j)*c_simdBestPairAlignment+1] = params->nbfp[(i*nt+j)*2+1];
277 params->nbfp_aligned[(i*nt+j)*c_simdBestPairAlignment+2] = 0;
278 params->nbfp_aligned[(i*nt+j)*c_simdBestPairAlignment+3] = 0;
284 /* We use combination rule data for SIMD combination rule kernels
285 * and with LJ-PME kernels. We then only need parameters per atom type,
286 * not per pair of atom types.
288 params->nbfp_comb.resize(nt*2);
289 switch (params->comb_rule)
292 params->comb_rule = ljcrGEOM;
294 for (int i = 0; i < nt; i++)
296 /* Store the sqrt of the diagonal from the nbfp matrix */
297 params->nbfp_comb[i*2 ] = std::sqrt(params->nbfp[(i*nt+i)*2 ]);
298 params->nbfp_comb[i*2+1] = std::sqrt(params->nbfp[(i*nt+i)*2+1]);
302 for (int i = 0; i < nt; i++)
304 /* Get 6*C6 and 12*C12 from the diagonal of the nbfp matrix */
305 const real c6 = params->nbfp[(i*nt+i)*2 ];
306 const real c12 = params->nbfp[(i*nt+i)*2+1];
307 if (c6 > 0 && c12 > 0)
309 /* We store 0.5*2^1/6*sigma and sqrt(4*3*eps),
310 * so we get 6*C6 and 12*C12 after combining.
312 params->nbfp_comb[i*2 ] = 0.5*gmx::sixthroot(c12/c6);
313 params->nbfp_comb[i*2+1] = std::sqrt(c6*c6/c12);
317 params->nbfp_comb[i*2 ] = 0;
318 params->nbfp_comb[i*2+1] = 0;
323 /* We always store the full matrix (see code above) */
326 gmx_incons("Unknown combination rule");
330 nbnxn_atomdata_t::SimdMasks::SimdMasks()
333 constexpr int simd_width = GMX_SIMD_REAL_WIDTH;
334 /* Set the diagonal cluster pair exclusion mask setup data.
335 * In the kernel we check 0 < j - i to generate the masks.
336 * Here we store j - i for generating the mask for the first i,
337 * we substract 0.5 to avoid rounding issues.
338 * In the kernel we can subtract 1 to generate the subsequent mask.
340 const int simd_4xn_diag_size = std::max(c_nbnxnCpuIClusterSize, simd_width);
341 diagonal_4xn_j_minus_i.resize(simd_4xn_diag_size);
342 for (int j = 0; j < simd_4xn_diag_size; j++)
344 diagonal_4xn_j_minus_i[j] = j - 0.5;
347 diagonal_2xnn_j_minus_i.resize(simd_width);
348 for (int j = 0; j < simd_width/2; j++)
350 /* The j-cluster size is half the SIMD width */
351 diagonal_2xnn_j_minus_i[j] = j - 0.5;
352 /* The next half of the SIMD width is for i + 1 */
353 diagonal_2xnn_j_minus_i[simd_width/2 + j] = j - 1 - 0.5;
356 /* We use up to 32 bits for exclusion masking.
357 * The same masks are used for the 4xN and 2x(N+N) kernels.
358 * The masks are read either into integer SIMD registers or into
359 * real SIMD registers (together with a cast).
360 * In single precision this means the real and integer SIMD registers
363 const int simd_excl_size = c_nbnxnCpuIClusterSize*simd_width;
364 #if GMX_DOUBLE && !GMX_SIMD_HAVE_INT32_LOGICAL
365 exclusion_filter64.resize(simd_excl_size);
367 exclusion_filter.resize(simd_excl_size);
370 for (int j = 0; j < simd_excl_size; j++)
372 /* Set the consecutive bits for masking pair exclusions */
373 #if GMX_DOUBLE && !GMX_SIMD_HAVE_INT32_LOGICAL
374 exclusion_filter64[j] = (1U << j);
376 exclusion_filter[j] = (1U << j);
380 if (!GMX_SIMD_HAVE_LOGICAL && !GMX_SIMD_HAVE_INT32_LOGICAL) // NOLINT(misc-redundant-expression)
382 // If the SIMD implementation has no bitwise logical operation support
383 // whatsoever we cannot use the normal masking. Instead,
384 // we generate a vector of all 2^4 possible ways an i atom
385 // interacts with its 4 j atoms. Each array entry contains
386 // GMX_SIMD_REAL_WIDTH values that are read with a single aligned SIMD load.
387 // Since there is no logical value representation in this case, we use
388 // any nonzero value to indicate 'true', while zero mean 'false'.
389 // This can then be converted to a SIMD boolean internally in the SIMD
390 // module by comparing to zero.
391 // Each array entry encodes how this i atom will interact with the 4 j atoms.
392 // Matching code exists in set_ci_top_excls() to generate indices into this array.
393 // Those indices are used in the kernels.
395 const int simd_excl_size = c_nbnxnCpuIClusterSize*c_nbnxnCpuIClusterSize;
396 const real simdFalse = 0.0;
397 const real simdTrue = 1.0;
399 interaction_array.resize(simd_excl_size * GMX_SIMD_REAL_WIDTH);
400 for (int j = 0; j < simd_excl_size; j++)
402 const int index = j * GMX_SIMD_REAL_WIDTH;
403 for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
405 interaction_array[index + i] = (j & (1 << i)) ? simdTrue : simdFalse;
412 nbnxn_atomdata_t::Params::Params(gmx::PinningPolicy pinningPolicy) :
414 nbfp({}, {pinningPolicy}),
415 nbfp_comb({}, {pinningPolicy}),
416 type({}, {pinningPolicy}),
417 lj_comb({}, {pinningPolicy}),
418 q({}, {pinningPolicy}),
421 energrp({}, {pinningPolicy})
425 nbnxn_atomdata_t::nbnxn_atomdata_t(gmx::PinningPolicy pinningPolicy) :
426 params_(pinningPolicy),
429 shift_vec({}, {pinningPolicy}),
430 x_({}, {pinningPolicy}),
432 bUseBufferFlags(FALSE),
433 bUseTreeReduce(FALSE)
437 /* Initializes an nbnxn_atomdata_t::Params data structure */
438 static void nbnxn_atomdata_params_init(const gmx::MDLogger &mdlog,
439 nbnxn_atomdata_t::Params *params,
440 const Nbnxm::KernelType kernelType,
441 int enbnxninitcombrule,
442 int ntype, const real *nbfp,
447 gmx_bool simple, bCombGeom, bCombLB, bSIMD;
451 fprintf(debug, "There are %d atom types in the system, adding one for nbnxn_atomdata_t\n", ntype);
453 params->numTypes = ntype + 1;
454 params->nbfp.resize(params->numTypes*params->numTypes*2);
455 params->nbfp_comb.resize(params->numTypes*2);
457 /* A tolerance of 1e-5 seems reasonable for (possibly hand-typed)
458 * force-field floating point parameters.
461 ptr = getenv("GMX_LJCOMB_TOL");
466 sscanf(ptr, "%lf", &dbl);
472 /* Temporarily fill params->nbfp_comb with sigma and epsilon
473 * to check for the LB rule.
475 for (int i = 0; i < ntype; i++)
477 c6 = nbfp[(i*ntype+i)*2 ]/6.0;
478 c12 = nbfp[(i*ntype+i)*2 + 1]/12.0;
479 if (c6 > 0 && c12 > 0)
481 params->nbfp_comb[i*2 ] = gmx::sixthroot(c12/c6);
482 params->nbfp_comb[i*2 + 1] = 0.25*c6*c6/c12;
484 else if (c6 == 0 && c12 == 0)
486 params->nbfp_comb[i*2 ] = 0;
487 params->nbfp_comb[i*2 + 1] = 0;
491 /* Can not use LB rule with only dispersion or repulsion */
496 for (int i = 0; i < params->numTypes; i++)
498 for (int j = 0; j < params->numTypes; j++)
500 if (i < ntype && j < ntype)
502 /* fr->nbfp has been updated, so that array too now stores c6/c12 including
503 * the 6.0/12.0 prefactors to save 2 flops in the most common case (force-only).
505 c6 = nbfp[(i*ntype+j)*2 ];
506 c12 = nbfp[(i*ntype+j)*2 + 1];
507 params->nbfp[(i*params->numTypes+j)*2 ] = c6;
508 params->nbfp[(i*params->numTypes+j)*2 + 1] = c12;
510 /* Compare 6*C6 and 12*C12 for geometric cobination rule */
511 bCombGeom = bCombGeom &&
512 gmx_within_tol(c6*c6, nbfp[(i*ntype+i)*2 ]*nbfp[(j*ntype+j)*2 ], tol) &&
513 gmx_within_tol(c12*c12, nbfp[(i*ntype+i)*2 + 1]*nbfp[(j*ntype+j)*2 + 1], tol);
515 /* Compare C6 and C12 for Lorentz-Berthelot combination rule */
519 ((c6 == 0 && c12 == 0 &&
520 (params->nbfp_comb[i*2 + 1] == 0 || params->nbfp_comb[j*2 + 1] == 0)) ||
521 (c6 > 0 && c12 > 0 &&
522 gmx_within_tol(gmx::sixthroot(c12/c6),
523 0.5*(params->nbfp_comb[i*2]+params->nbfp_comb[j*2]), tol) &&
524 gmx_within_tol(0.25*c6*c6/c12, std::sqrt(params->nbfp_comb[i*2 + 1]*params->nbfp_comb[j*2 + 1]), tol)));
528 /* Add zero parameters for the additional dummy atom type */
529 params->nbfp[(i*params->numTypes + j)*2 ] = 0;
530 params->nbfp[(i*params->numTypes + j)*2+1] = 0;
536 fprintf(debug, "Combination rules: geometric %s Lorentz-Berthelot %s\n",
537 gmx::boolToString(bCombGeom), gmx::boolToString(bCombLB));
540 simple = Nbnxm::kernelTypeUsesSimplePairlist(kernelType);
542 switch (enbnxninitcombrule)
544 case enbnxninitcombruleDETECT:
545 /* We prefer the geometic combination rule,
546 * as that gives a slightly faster kernel than the LB rule.
550 params->comb_rule = ljcrGEOM;
554 params->comb_rule = ljcrLB;
558 params->comb_rule = ljcrNONE;
560 params->nbfp_comb.clear();
565 if (params->comb_rule == ljcrNONE)
567 mesg = "Using full Lennard-Jones parameter combination matrix";
571 mesg = gmx::formatString("Using %s Lennard-Jones combination rule",
572 params->comb_rule == ljcrGEOM ? "geometric" : "Lorentz-Berthelot");
574 GMX_LOG(mdlog.info).asParagraph().appendText(mesg);
577 case enbnxninitcombruleGEOM:
578 params->comb_rule = ljcrGEOM;
580 case enbnxninitcombruleLB:
581 params->comb_rule = ljcrLB;
583 case enbnxninitcombruleNONE:
584 params->comb_rule = ljcrNONE;
586 params->nbfp_comb.clear();
589 gmx_incons("Unknown enbnxninitcombrule");
592 bSIMD = Nbnxm::kernelTypeIsSimd(kernelType);
594 set_lj_parameter_data(params, bSIMD);
596 params->nenergrp = n_energygroups;
599 // We now check for energy groups already when starting mdrun
600 GMX_RELEASE_ASSERT(n_energygroups == 1, "GPU kernels do not support energy groups");
602 /* Temporary storage goes as #grp^3*simd_width^2/2, so limit to 64 */
603 if (params->nenergrp > 64)
605 gmx_fatal(FARGS, "With NxN kernels not more than 64 energy groups are supported\n");
607 params->neg_2log = 1;
608 while (params->nenergrp > (1<<params->neg_2log))
614 /* Initializes an nbnxn_atomdata_t data structure */
615 void nbnxn_atomdata_init(const gmx::MDLogger &mdlog,
616 nbnxn_atomdata_t *nbat,
617 const Nbnxm::KernelType kernelType,
618 int enbnxninitcombrule,
619 int ntype, const real *nbfp,
623 nbnxn_atomdata_params_init(mdlog, &nbat->paramsDeprecated(), kernelType,
624 enbnxninitcombrule, ntype, nbfp, n_energygroups);
626 const bool simple = Nbnxm::kernelTypeUsesSimplePairlist(kernelType);
627 const bool bSIMD = Nbnxm::kernelTypeIsSimd(kernelType);
635 pack_x = std::max(c_nbnxnCpuIClusterSize,
636 Nbnxm::JClusterSizePerKernelType[kernelType]);
640 nbat->XFormat = nbatX4;
643 nbat->XFormat = nbatX8;
646 gmx_incons("Unsupported packing width");
651 nbat->XFormat = nbatXYZ;
654 nbat->FFormat = nbat->XFormat;
658 nbat->XFormat = nbatXYZQ;
659 nbat->FFormat = nbatXYZ;
662 nbat->shift_vec.resize(SHIFTS);
664 nbat->xstride = (nbat->XFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
665 nbat->fstride = (nbat->FFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
667 /* Initialize the output data structures */
668 for (int i = 0; i < nout; i++)
670 const auto &pinningPolicy = nbat->params().type.get_allocator().pinningPolicy();
671 nbat->out.emplace_back(kernelType, nbat->params().nenergrp, 1 << nbat->params().neg_2log,
675 nbat->buffer_flags.flag = nullptr;
676 nbat->buffer_flags.flag_nalloc = 0;
678 const int nth = gmx_omp_nthreads_get(emntNonbonded);
680 const char *ptr = getenv("GMX_USE_TREEREDUCE");
683 nbat->bUseTreeReduce = (strtol(ptr, nullptr, 10) != 0);
686 else if (nth > 8) /*on the CPU we currently don't benefit even at 32*/
688 nbat->bUseTreeReduce = 1;
693 nbat->bUseTreeReduce = false;
695 if (nbat->bUseTreeReduce)
697 GMX_LOG(mdlog.info).asParagraph().appendText("Using tree force reduction");
699 nbat->syncStep = new tMPI_Atomic[nth];
703 template<int packSize>
704 static void copy_lj_to_nbat_lj_comb(gmx::ArrayRef<const real> ljparam_type,
705 const int *type, int na,
708 /* The LJ params follow the combination rule:
709 * copy the params for the type array to the atom array.
711 for (int is = 0; is < na; is += packSize)
713 for (int k = 0; k < packSize; k++)
716 ljparam_at[is*2 + k] = ljparam_type[type[i]*2 ];
717 ljparam_at[is*2 + packSize + k] = ljparam_type[type[i]*2 + 1];
722 /* Sets the atom type in nbnxn_atomdata_t */
723 static void nbnxn_atomdata_set_atomtypes(nbnxn_atomdata_t::Params *params,
724 const Nbnxm::GridSet &gridSet,
727 params->type.resize(gridSet.numGridAtomsTotal());
729 for (const Nbnxm::Grid &grid : gridSet.grids())
731 /* Loop over all columns and copy and fill */
732 for (int i = 0; i < grid.numColumns(); i++)
734 const int numAtoms = grid.paddedNumAtomsInColumn(i);
735 const int atomOffset = grid.firstAtomInColumn(i);
737 copy_int_to_nbat_int(gridSet.atomIndices().data() + atomOffset,
738 grid.numAtomsInColumn(i), numAtoms,
739 type, params->numTypes - 1, params->type.data() + atomOffset);
744 /* Sets the LJ combination rule parameters in nbnxn_atomdata_t */
745 static void nbnxn_atomdata_set_ljcombparams(nbnxn_atomdata_t::Params *params,
747 const Nbnxm::GridSet &gridSet)
749 params->lj_comb.resize(gridSet.numGridAtomsTotal()*2);
751 if (params->comb_rule != ljcrNONE)
753 for (const Nbnxm::Grid &grid : gridSet.grids())
755 /* Loop over all columns and copy and fill */
756 for (int i = 0; i < grid.numColumns(); i++)
758 const int numAtoms = grid.paddedNumAtomsInColumn(i);
759 const int atomOffset = grid.firstAtomInColumn(i);
761 if (XFormat == nbatX4)
763 copy_lj_to_nbat_lj_comb<c_packX4>(params->nbfp_comb,
764 params->type.data() + atomOffset,
766 params->lj_comb.data() + atomOffset*2);
768 else if (XFormat == nbatX8)
770 copy_lj_to_nbat_lj_comb<c_packX8>(params->nbfp_comb,
771 params->type.data() + atomOffset,
773 params->lj_comb.data() + atomOffset*2);
775 else if (XFormat == nbatXYZQ)
777 copy_lj_to_nbat_lj_comb<1>(params->nbfp_comb,
778 params->type.data() + atomOffset,
780 params->lj_comb.data() + atomOffset*2);
787 /* Sets the charges in nbnxn_atomdata_t *nbat */
788 static void nbnxn_atomdata_set_charges(nbnxn_atomdata_t *nbat,
789 const Nbnxm::GridSet &gridSet,
792 if (nbat->XFormat != nbatXYZQ)
794 nbat->paramsDeprecated().q.resize(nbat->numAtoms());
797 for (const Nbnxm::Grid &grid : gridSet.grids())
799 /* Loop over all columns and copy and fill */
800 for (int cxy = 0; cxy < grid.numColumns(); cxy++)
802 const int atomOffset = grid.firstAtomInColumn(cxy);
803 const int numAtoms = grid.numAtomsInColumn(cxy);
804 const int paddedNumAtoms = grid.paddedNumAtomsInColumn(cxy);
806 if (nbat->XFormat == nbatXYZQ)
808 real *q = nbat->x().data() + atomOffset*STRIDE_XYZQ + ZZ + 1;
810 for (i = 0; i < numAtoms; i++)
812 *q = charge[gridSet.atomIndices()[atomOffset + i]];
815 /* Complete the partially filled last cell with zeros */
816 for (; i < paddedNumAtoms; i++)
824 real *q = nbat->paramsDeprecated().q.data() + atomOffset;
826 for (i = 0; i < numAtoms; i++)
828 *q = charge[gridSet.atomIndices()[atomOffset + i]];
831 /* Complete the partially filled last cell with zeros */
832 for (; i < paddedNumAtoms; i++)
842 /* Set the charges of perturbed atoms in nbnxn_atomdata_t to 0.
843 * This is to automatically remove the RF/PME self term in the nbnxn kernels.
844 * Part of the zero interactions are still calculated in the normal kernels.
845 * All perturbed interactions are calculated in the free energy kernel,
846 * using the original charge and LJ data, not nbnxn_atomdata_t.
848 static void nbnxn_atomdata_mask_fep(nbnxn_atomdata_t *nbat,
849 const Nbnxm::GridSet &gridSet)
851 nbnxn_atomdata_t::Params ¶ms = nbat->paramsDeprecated();
855 if (nbat->XFormat == nbatXYZQ)
857 q = nbat->x().data() + ZZ + 1;
858 stride_q = STRIDE_XYZQ;
866 for (const Nbnxm::Grid &grid : gridSet.grids())
869 if (grid.geometry().isSimple)
875 nsubc = c_gpuNumClusterPerCell;
878 int c_offset = grid.firstAtomInColumn(0);
880 /* Loop over all columns and copy and fill */
881 for (int c = 0; c < grid.numCells()*nsubc; c++)
883 /* Does this cluster contain perturbed particles? */
884 if (grid.clusterIsPerturbed(c))
886 const int numAtomsPerCluster = grid.geometry().numAtomsICluster;
887 for (int i = 0; i < numAtomsPerCluster; i++)
889 /* Is this a perturbed particle? */
890 if (grid.atomIsPerturbed(c, i))
892 int ind = c_offset + c*numAtomsPerCluster + i;
893 /* Set atom type and charge to non-interacting */
894 params.type[ind] = params.numTypes - 1;
903 /* Copies the energy group indices to a reordered and packed array */
904 static void copy_egp_to_nbat_egps(const int *a, int na, int na_round,
905 int na_c, int bit_shift,
906 const int *in, int *innb)
912 for (i = 0; i < na; i += na_c)
914 /* Store na_c energy group numbers into one int */
916 for (int sa = 0; sa < na_c; sa++)
921 comb |= (GET_CGINFO_GID(in[at]) << (sa*bit_shift));
926 /* Complete the partially filled last cell with fill */
927 for (; i < na_round; i += na_c)
933 /* Set the energy group indices for atoms in nbnxn_atomdata_t */
934 static void nbnxn_atomdata_set_energygroups(nbnxn_atomdata_t::Params *params,
935 const Nbnxm::GridSet &gridSet,
938 if (params->nenergrp == 1)
943 params->energrp.resize(gridSet.numGridAtomsTotal());
945 for (const Nbnxm::Grid &grid : gridSet.grids())
947 /* Loop over all columns and copy and fill */
948 for (int i = 0; i < grid.numColumns(); i++)
950 const int numAtoms = grid.paddedNumAtomsInColumn(i);
951 const int atomOffset = grid.firstAtomInColumn(i);
953 copy_egp_to_nbat_egps(gridSet.atomIndices().data() + atomOffset,
954 grid.numAtomsInColumn(i), numAtoms,
955 c_nbnxnCpuIClusterSize, params->neg_2log,
957 params->energrp.data() + grid.atomToCluster(atomOffset));
962 /* Sets all required atom parameter data in nbnxn_atomdata_t */
963 void nbnxn_atomdata_set(nbnxn_atomdata_t *nbat,
964 const nbnxn_search *nbs,
965 const t_mdatoms *mdatoms,
968 nbnxn_atomdata_t::Params ¶ms = nbat->paramsDeprecated();
970 const Nbnxm::GridSet &gridSet = nbs->gridSet();
972 nbnxn_atomdata_set_atomtypes(¶ms, gridSet, mdatoms->typeA);
974 nbnxn_atomdata_set_charges(nbat, gridSet, mdatoms->chargeA);
976 if (gridSet.haveFep())
978 nbnxn_atomdata_mask_fep(nbat, gridSet);
981 /* This must be done after masking types for FEP */
982 nbnxn_atomdata_set_ljcombparams(¶ms, nbat->XFormat, gridSet);
984 nbnxn_atomdata_set_energygroups(¶ms, gridSet, atinfo);
987 /* Copies the shift vector array to nbnxn_atomdata_t */
988 void nbnxn_atomdata_copy_shiftvec(gmx_bool bDynamicBox,
990 nbnxn_atomdata_t *nbat)
994 nbat->bDynamicBox = bDynamicBox;
995 for (i = 0; i < SHIFTS; i++)
997 copy_rvec(shift_vec[i], nbat->shift_vec[i]);
1001 /* Copies (and reorders) the coordinates to nbnxn_atomdata_t */
1002 void nbnxn_atomdata_copy_x_to_nbat_x(const nbnxn_search *nbs,
1003 const Nbnxm::AtomLocality locality,
1006 nbnxn_atomdata_t *nbat,
1007 gmx_wallcycle *wcycle)
1009 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1010 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1012 const Nbnxm::GridSet &gridSet = nbs->gridSet();
1018 case Nbnxm::AtomLocality::All:
1019 case Nbnxm::AtomLocality::Count:
1021 gridEnd = gridSet.grids().size();
1023 case Nbnxm::AtomLocality::Local:
1027 case Nbnxm::AtomLocality::NonLocal:
1029 gridEnd = gridSet.grids().size();
1035 nbat->natoms_local = gridSet.grids()[0].atomIndexEnd();
1038 const int nth = gmx_omp_nthreads_get(emntPairsearch);
1040 #pragma omp parallel for num_threads(nth) schedule(static)
1041 for (int th = 0; th < nth; th++)
1045 for (int g = gridBegin; g < gridEnd; g++)
1047 const Nbnxm::Grid &grid = gridSet.grids()[g];
1048 const int numCellsXY = grid.numColumns();
1050 const int cxy0 = (numCellsXY* th + nth - 1)/nth;
1051 const int cxy1 = (numCellsXY*(th + 1) + nth - 1)/nth;
1053 for (int cxy = cxy0; cxy < cxy1; cxy++)
1055 const int na = grid.numAtomsInColumn(cxy);
1056 const int ash = grid.firstAtomInColumn(cxy);
1059 if (g == 0 && FillLocal)
1061 na_fill = grid.paddedNumAtomsInColumn(cxy);
1065 /* We fill only the real particle locations.
1066 * We assume the filling entries at the end have been
1067 * properly set before during pair-list generation.
1071 copy_rvec_to_nbat_real(gridSet.atomIndices().data() + ash,
1073 nbat->XFormat, nbat->x().data(), ash);
1077 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1080 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1081 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1085 nbnxn_atomdata_clear_reals(gmx::ArrayRef<real> dest,
1088 for (int i = i0; i < i1; i++)
1094 gmx_unused static void
1095 nbnxn_atomdata_reduce_reals(real * gmx_restrict dest,
1097 const real ** gmx_restrict src,
1103 /* The destination buffer contains data, add to it */
1104 for (int i = i0; i < i1; i++)
1106 for (int s = 0; s < nsrc; s++)
1108 dest[i] += src[s][i];
1114 /* The destination buffer is unitialized, set it first */
1115 for (int i = i0; i < i1; i++)
1117 dest[i] = src[0][i];
1118 for (int s = 1; s < nsrc; s++)
1120 dest[i] += src[s][i];
1126 gmx_unused static void
1127 nbnxn_atomdata_reduce_reals_simd(real gmx_unused * gmx_restrict dest,
1128 gmx_bool gmx_unused bDestSet,
1129 const gmx_unused real ** gmx_restrict src,
1130 int gmx_unused nsrc,
1131 int gmx_unused i0, int gmx_unused i1)
1134 /* The SIMD width here is actually independent of that in the kernels,
1135 * but we use the same width for simplicity (usually optimal anyhow).
1137 SimdReal dest_SSE, src_SSE;
1141 for (int i = i0; i < i1; i += GMX_SIMD_REAL_WIDTH)
1143 dest_SSE = load<SimdReal>(dest+i);
1144 for (int s = 0; s < nsrc; s++)
1146 src_SSE = load<SimdReal>(src[s]+i);
1147 dest_SSE = dest_SSE + src_SSE;
1149 store(dest+i, dest_SSE);
1154 for (int i = i0; i < i1; i += GMX_SIMD_REAL_WIDTH)
1156 dest_SSE = load<SimdReal>(src[0]+i);
1157 for (int s = 1; s < nsrc; s++)
1159 src_SSE = load<SimdReal>(src[s]+i);
1160 dest_SSE = dest_SSE + src_SSE;
1162 store(dest+i, dest_SSE);
1168 /* Add part of the force array(s) from nbnxn_atomdata_t to f */
1170 nbnxn_atomdata_add_nbat_f_to_f_part(const Nbnxm::GridSet &gridSet,
1171 const nbnxn_atomdata_t *nbat,
1172 gmx::ArrayRef<const nbnxn_atomdata_output_t> out,
1177 gmx::ArrayRef<const int> cell = gridSet.cells();
1179 /* Loop over all columns and copy and fill */
1180 switch (nbat->FFormat)
1186 const real *fnb = out[0].f.data();
1188 for (int a = a0; a < a1; a++)
1190 int i = cell[a]*nbat->fstride;
1193 f[a][YY] += fnb[i+1];
1194 f[a][ZZ] += fnb[i+2];
1199 for (int a = a0; a < a1; a++)
1201 int i = cell[a]*nbat->fstride;
1203 for (int fa = 0; fa < nfa; fa++)
1205 f[a][XX] += out[fa].f[i];
1206 f[a][YY] += out[fa].f[i+1];
1207 f[a][ZZ] += out[fa].f[i+2];
1215 const real *fnb = out[0].f.data();
1217 for (int a = a0; a < a1; a++)
1219 int i = atom_to_x_index<c_packX4>(cell[a]);
1221 f[a][XX] += fnb[i+XX*c_packX4];
1222 f[a][YY] += fnb[i+YY*c_packX4];
1223 f[a][ZZ] += fnb[i+ZZ*c_packX4];
1228 for (int a = a0; a < a1; a++)
1230 int i = atom_to_x_index<c_packX4>(cell[a]);
1232 for (int fa = 0; fa < nfa; fa++)
1234 f[a][XX] += out[fa].f[i+XX*c_packX4];
1235 f[a][YY] += out[fa].f[i+YY*c_packX4];
1236 f[a][ZZ] += out[fa].f[i+ZZ*c_packX4];
1244 const real *fnb = out[0].f.data();
1246 for (int a = a0; a < a1; a++)
1248 int i = atom_to_x_index<c_packX8>(cell[a]);
1250 f[a][XX] += fnb[i+XX*c_packX8];
1251 f[a][YY] += fnb[i+YY*c_packX8];
1252 f[a][ZZ] += fnb[i+ZZ*c_packX8];
1257 for (int a = a0; a < a1; a++)
1259 int i = atom_to_x_index<c_packX8>(cell[a]);
1261 for (int fa = 0; fa < nfa; fa++)
1263 f[a][XX] += out[fa].f[i+XX*c_packX8];
1264 f[a][YY] += out[fa].f[i+YY*c_packX8];
1265 f[a][ZZ] += out[fa].f[i+ZZ*c_packX8];
1271 gmx_incons("Unsupported nbnxn_atomdata_t format");
1275 static inline unsigned char reverse_bits(unsigned char b)
1277 /* http://graphics.stanford.edu/~seander/bithacks.html#ReverseByteWith64BitsDiv */
1278 return (b * 0x0202020202ULL & 0x010884422010ULL) % 1023;
1281 static void nbnxn_atomdata_add_nbat_f_to_f_treereduce(nbnxn_atomdata_t *nbat,
1284 const nbnxn_buffer_flags_t *flags = &nbat->buffer_flags;
1286 int next_pow2 = 1<<(gmx::log2I(nth-1)+1);
1288 const int numOutputBuffers = nbat->out.size();
1289 GMX_ASSERT(numOutputBuffers == nth,
1290 "tree-reduce currently only works for numOutputBuffers==nth");
1292 memset(nbat->syncStep, 0, sizeof(*(nbat->syncStep))*nth);
1294 #pragma omp parallel num_threads(nth)
1302 th = gmx_omp_get_thread_num();
1304 for (group_size = 2; group_size < 2*next_pow2; group_size *= 2)
1306 int index[2], group_pos, partner_pos, wu;
1307 int partner_th = th ^ (group_size/2);
1312 /* wait on partner thread - replaces full barrier */
1313 int sync_th, sync_group_size;
1315 tMPI_Atomic_memory_barrier(); /* gurantee data is saved before marking work as done */
1316 tMPI_Atomic_set(&(nbat->syncStep[th]), group_size/2); /* mark previous step as completed */
1318 /* find thread to sync with. Equal to partner_th unless nth is not a power of two. */
1319 for (sync_th = partner_th, sync_group_size = group_size; sync_th >= nth && sync_group_size > 2; sync_group_size /= 2)
1321 sync_th &= ~(sync_group_size/4);
1323 if (sync_th < nth) /* otherwise nothing to sync index[1] will be >=nout */
1325 /* wait on the thread which computed input data in previous step */
1326 while (tMPI_Atomic_get(static_cast<volatile tMPI_Atomic_t*>(&(nbat->syncStep[sync_th]))) < group_size/2)
1330 /* guarantee that no later load happens before wait loop is finisehd */
1331 tMPI_Atomic_memory_barrier();
1333 #else /* TMPI_ATOMICS */
1338 /* Calculate buffers to sum (result goes into first buffer) */
1339 group_pos = th % group_size;
1340 index[0] = th - group_pos;
1341 index[1] = index[0] + group_size/2;
1343 /* If no second buffer, nothing to do */
1344 if (index[1] >= numOutputBuffers && group_size > 2)
1349 #if NBNXN_BUFFERFLAG_MAX_THREADS > 256
1350 #error reverse_bits assumes max 256 threads
1352 /* Position is permuted so that one of the 2 vectors being added was computed on the same thread in the previous step.
1353 This improves locality and enables to sync with just a single thread between steps (=the levels in the btree).
1354 The permutation which allows this corresponds to reversing the bits of the group position.
1356 group_pos = reverse_bits(group_pos)/(256/group_size);
1358 partner_pos = group_pos ^ 1;
1360 /* loop over two work-units (own and partner) */
1361 for (wu = 0; wu < 2; wu++)
1365 if (partner_th < nth)
1367 break; /* partner exists we don't have to do his work */
1371 group_pos = partner_pos;
1375 /* Calculate the cell-block range for our thread */
1376 b0 = (flags->nflag* group_pos )/group_size;
1377 b1 = (flags->nflag*(group_pos+1))/group_size;
1379 for (b = b0; b < b1; b++)
1381 i0 = b *NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1382 i1 = (b+1)*NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1384 if (bitmask_is_set(flags->flag[b], index[1]) || group_size > 2)
1386 const real *fIndex1 = nbat->out[index[1]].f.data();
1388 nbnxn_atomdata_reduce_reals_simd
1390 nbnxn_atomdata_reduce_reals
1392 (nbat->out[index[0]].f.data(),
1393 bitmask_is_set(flags->flag[b], index[0]) || group_size > 2,
1394 &fIndex1, 1, i0, i1);
1397 else if (!bitmask_is_set(flags->flag[b], index[0]))
1399 nbnxn_atomdata_clear_reals(nbat->out[index[0]].f,
1406 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1411 static void nbnxn_atomdata_add_nbat_f_to_f_stdreduce(nbnxn_atomdata_t *nbat,
1414 #pragma omp parallel for num_threads(nth) schedule(static)
1415 for (int th = 0; th < nth; th++)
1419 const nbnxn_buffer_flags_t *flags;
1421 const real *fptr[NBNXN_BUFFERFLAG_MAX_THREADS];
1423 flags = &nbat->buffer_flags;
1425 /* Calculate the cell-block range for our thread */
1426 int b0 = (flags->nflag* th )/nth;
1427 int b1 = (flags->nflag*(th+1))/nth;
1429 for (int b = b0; b < b1; b++)
1431 int i0 = b *NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1432 int i1 = (b+1)*NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1435 for (int out = 1; out < gmx::ssize(nbat->out); out++)
1437 if (bitmask_is_set(flags->flag[b], out))
1439 fptr[nfptr++] = nbat->out[out].f.data();
1445 nbnxn_atomdata_reduce_reals_simd
1447 nbnxn_atomdata_reduce_reals
1449 (nbat->out[0].f.data(),
1450 bitmask_is_set(flags->flag[b], 0),
1454 else if (!bitmask_is_set(flags->flag[b], 0))
1456 nbnxn_atomdata_clear_reals(nbat->out[0].f,
1461 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1465 /* Add the force array(s) from nbnxn_atomdata_t to f */
1467 nonbonded_verlet_t::atomdata_add_nbat_f_to_f(const Nbnxm::AtomLocality locality,
1469 gmx_wallcycle *wcycle)
1471 /* Skip the non-local reduction if there was no non-local work to do */
1472 if (locality == Nbnxm::AtomLocality::NonLocal &&
1473 pairlistSets().pairlistSet(Nbnxm::InteractionLocality::NonLocal).nblGpu[0]->sci.empty())
1478 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1479 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1481 const Nbnxm::GridSet &gridSet = nbs->gridSet();
1483 nbs_cycle_start(&nbs->cc[enbsCCreducef]);
1489 case Nbnxm::AtomLocality::All:
1490 case Nbnxm::AtomLocality::Count:
1492 na = gridSet.numRealAtomsTotal();
1494 case Nbnxm::AtomLocality::Local:
1496 na = gridSet.numRealAtomsLocal();
1498 case Nbnxm::AtomLocality::NonLocal:
1499 a0 = gridSet.numRealAtomsLocal();
1500 na = gridSet.numRealAtomsTotal() - gridSet.numRealAtomsLocal();
1504 int nth = gmx_omp_nthreads_get(emntNonbonded);
1506 if (nbat->out.size() > 1)
1508 if (locality != Nbnxm::AtomLocality::All)
1510 gmx_incons("add_f_to_f called with nout>1 and locality!=eatAll");
1513 /* Reduce the force thread output buffers into buffer 0, before adding
1514 * them to the, differently ordered, "real" force buffer.
1516 if (nbat->bUseTreeReduce)
1518 nbnxn_atomdata_add_nbat_f_to_f_treereduce(nbat.get(), nth);
1522 nbnxn_atomdata_add_nbat_f_to_f_stdreduce(nbat.get(), nth);
1525 #pragma omp parallel for num_threads(nth) schedule(static)
1526 for (int th = 0; th < nth; th++)
1530 nbnxn_atomdata_add_nbat_f_to_f_part(gridSet, nbat.get(),
1537 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1540 nbs_cycle_stop(&nbs->cc[enbsCCreducef]);
1542 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1543 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1546 /* Adds the shift forces from nbnxn_atomdata_t to fshift */
1547 void nbnxn_atomdata_add_nbat_fshift_to_fshift(const nbnxn_atomdata_t *nbat,
1550 gmx::ArrayRef<const nbnxn_atomdata_output_t> outputBuffers = nbat->out;
1552 if (outputBuffers.size() == 1)
1554 /* When there is a single output object, with CPU or GPU, shift forces
1555 * have been written directly to the main buffer instead of to the
1556 * (single) thread local output object. There is nothing to reduce.
1561 for (int s = 0; s < SHIFTS; s++)
1565 for (const nbnxn_atomdata_output_t &out : outputBuffers)
1567 sum[XX] += out.fshift[s*DIM+XX];
1568 sum[YY] += out.fshift[s*DIM+YY];
1569 sum[ZZ] += out.fshift[s*DIM+ZZ];
1571 rvec_inc(fshift[s], sum);