2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
47 #include "thread_mpi/atomic.h"
49 #include "gromacs/math/functions.h"
50 #include "gromacs/math/utilities.h"
51 #include "gromacs/math/vec.h"
52 #include "gromacs/mdlib/gmx_omp_nthreads.h"
53 #include "gromacs/mdtypes/forcerec.h" // only for GET_CGINFO_*
54 #include "gromacs/mdtypes/mdatom.h"
55 #include "gromacs/nbnxm/nbnxm.h"
56 #include "gromacs/nbnxm/nbnxm_geometry.h"
57 #include "gromacs/nbnxm/pairlist.h"
58 #include "gromacs/pbcutil/ishift.h"
59 #include "gromacs/simd/simd.h"
60 #include "gromacs/timing/wallcycle.h"
61 #include "gromacs/utility/exceptions.h"
62 #include "gromacs/utility/fatalerror.h"
63 #include "gromacs/utility/gmxomp.h"
64 #include "gromacs/utility/logger.h"
65 #include "gromacs/utility/smalloc.h"
66 #include "gromacs/utility/strconvert.h"
67 #include "gromacs/utility/stringutil.h"
72 using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
75 void nbnxn_atomdata_t::resizeCoordinateBuffer(int numAtoms)
79 x_.resize(numAtoms*xstride);
82 void nbnxn_atomdata_t::resizeForceBuffers()
84 /* Force buffers need padding up to a multiple of the buffer flag size */
85 const int paddedSize = (numAtoms() + NBNXN_BUFFERFLAG_SIZE - 1)/NBNXN_BUFFERFLAG_SIZE*NBNXN_BUFFERFLAG_SIZE;
87 /* Should we let each thread allocate it's own data instead? */
88 for (nbnxn_atomdata_output_t &outBuffer : out)
90 outBuffer.f.resize(paddedSize*fstride);
94 /* Initializes an nbnxn_atomdata_output_t data structure */
95 nbnxn_atomdata_output_t::nbnxn_atomdata_output_t(Nbnxm::KernelType kernelType,
97 int simdEnergyBufferStride,
98 gmx::PinningPolicy pinningPolicy) :
99 f({}, {pinningPolicy}),
100 fshift({}, {pinningPolicy}),
101 Vvdw({}, {pinningPolicy}),
102 Vc({}, {pinningPolicy})
104 fshift.resize(SHIFTS*DIM);
105 Vvdw.resize(numEnergyGroups*numEnergyGroups);
106 Vc.resize(numEnergyGroups*numEnergyGroups);
108 if (Nbnxm::kernelTypeIsSimd(kernelType))
110 int cj_size = Nbnxm::JClusterSizePerKernelType[kernelType];
111 int numElements = numEnergyGroups*numEnergyGroups*simdEnergyBufferStride*(cj_size/2)*cj_size;
112 VSvdw.resize(numElements);
113 VSc.resize(numElements);
117 static void copy_int_to_nbat_int(const int *a, int na, int na_round,
118 const int *in, int fill, int *innb)
123 for (i = 0; i < na; i++)
125 innb[j++] = in[a[i]];
127 /* Complete the partially filled last cell with fill */
128 for (; i < na_round; i++)
134 void copy_rvec_to_nbat_real(const int *a, int na, int na_round,
135 const rvec *x, int nbatFormat,
138 /* We complete partially filled cells, can only be the last one in each
139 * column, with coordinates farAway. The actual coordinate value does
140 * not influence the results, since these filler particles do not interact.
141 * Clusters with normal atoms + fillers have a bounding box based only
142 * on the coordinates of the atoms. Clusters with only fillers have as
143 * the bounding box the coordinates of the first filler. Such clusters
144 * are not considered as i-entries, but they are considered as j-entries.
145 * So for performance it is better to have their bounding boxes far away,
146 * such that filler only clusters don't end up in the pair list.
148 const real farAway = -1000000;
156 for (i = 0; i < na; i++)
158 xnb[j++] = x[a[i]][XX];
159 xnb[j++] = x[a[i]][YY];
160 xnb[j++] = x[a[i]][ZZ];
162 /* Complete the partially filled last cell with farAway elements */
163 for (; i < na_round; i++)
172 for (i = 0; i < na; i++)
174 xnb[j++] = x[a[i]][XX];
175 xnb[j++] = x[a[i]][YY];
176 xnb[j++] = x[a[i]][ZZ];
179 /* Complete the partially filled last cell with zeros */
180 for (; i < na_round; i++)
189 j = atom_to_x_index<c_packX4>(a0);
190 c = a0 & (c_packX4-1);
191 for (i = 0; i < na; i++)
193 xnb[j+XX*c_packX4] = x[a[i]][XX];
194 xnb[j+YY*c_packX4] = x[a[i]][YY];
195 xnb[j+ZZ*c_packX4] = x[a[i]][ZZ];
200 j += (DIM-1)*c_packX4;
204 /* Complete the partially filled last cell with zeros */
205 for (; i < na_round; i++)
207 xnb[j+XX*c_packX4] = farAway;
208 xnb[j+YY*c_packX4] = farAway;
209 xnb[j+ZZ*c_packX4] = farAway;
214 j += (DIM-1)*c_packX4;
220 j = atom_to_x_index<c_packX8>(a0);
221 c = a0 & (c_packX8 - 1);
222 for (i = 0; i < na; i++)
224 xnb[j+XX*c_packX8] = x[a[i]][XX];
225 xnb[j+YY*c_packX8] = x[a[i]][YY];
226 xnb[j+ZZ*c_packX8] = x[a[i]][ZZ];
231 j += (DIM-1)*c_packX8;
235 /* Complete the partially filled last cell with zeros */
236 for (; i < na_round; i++)
238 xnb[j+XX*c_packX8] = farAway;
239 xnb[j+YY*c_packX8] = farAway;
240 xnb[j+ZZ*c_packX8] = farAway;
245 j += (DIM-1)*c_packX8;
251 gmx_incons("Unsupported nbnxn_atomdata_t format");
255 /* Stores the LJ parameter data in a format convenient for different kernels */
256 static void set_lj_parameter_data(nbnxn_atomdata_t::Params *params, gmx_bool bSIMD)
258 int nt = params->numTypes;
263 /* nbfp_aligned stores two parameters using the stride most suitable
264 * for the present SIMD architecture, as specified by the constant
265 * c_simdBestPairAlignment from the SIMD header.
266 * There's a slight inefficiency in allocating and initializing nbfp_aligned
267 * when it might not be used, but introducing the conditional code is not
270 params->nbfp_aligned.resize(nt*nt*c_simdBestPairAlignment);
272 for (int i = 0; i < nt; i++)
274 for (int j = 0; j < nt; j++)
276 params->nbfp_aligned[(i*nt+j)*c_simdBestPairAlignment+0] = params->nbfp[(i*nt+j)*2+0];
277 params->nbfp_aligned[(i*nt+j)*c_simdBestPairAlignment+1] = params->nbfp[(i*nt+j)*2+1];
278 params->nbfp_aligned[(i*nt+j)*c_simdBestPairAlignment+2] = 0;
279 params->nbfp_aligned[(i*nt+j)*c_simdBestPairAlignment+3] = 0;
285 /* We use combination rule data for SIMD combination rule kernels
286 * and with LJ-PME kernels. We then only need parameters per atom type,
287 * not per pair of atom types.
289 params->nbfp_comb.resize(nt*2);
290 switch (params->comb_rule)
293 params->comb_rule = ljcrGEOM;
295 for (int i = 0; i < nt; i++)
297 /* Store the sqrt of the diagonal from the nbfp matrix */
298 params->nbfp_comb[i*2 ] = std::sqrt(params->nbfp[(i*nt+i)*2 ]);
299 params->nbfp_comb[i*2+1] = std::sqrt(params->nbfp[(i*nt+i)*2+1]);
303 for (int i = 0; i < nt; i++)
305 /* Get 6*C6 and 12*C12 from the diagonal of the nbfp matrix */
306 const real c6 = params->nbfp[(i*nt+i)*2 ];
307 const real c12 = params->nbfp[(i*nt+i)*2+1];
308 if (c6 > 0 && c12 > 0)
310 /* We store 0.5*2^1/6*sigma and sqrt(4*3*eps),
311 * so we get 6*C6 and 12*C12 after combining.
313 params->nbfp_comb[i*2 ] = 0.5*gmx::sixthroot(c12/c6);
314 params->nbfp_comb[i*2+1] = std::sqrt(c6*c6/c12);
318 params->nbfp_comb[i*2 ] = 0;
319 params->nbfp_comb[i*2+1] = 0;
324 /* We always store the full matrix (see code above) */
327 gmx_incons("Unknown combination rule");
331 nbnxn_atomdata_t::SimdMasks::SimdMasks()
334 constexpr int simd_width = GMX_SIMD_REAL_WIDTH;
335 /* Set the diagonal cluster pair exclusion mask setup data.
336 * In the kernel we check 0 < j - i to generate the masks.
337 * Here we store j - i for generating the mask for the first i,
338 * we substract 0.5 to avoid rounding issues.
339 * In the kernel we can subtract 1 to generate the subsequent mask.
341 const int simd_4xn_diag_size = std::max(c_nbnxnCpuIClusterSize, simd_width);
342 diagonal_4xn_j_minus_i.resize(simd_4xn_diag_size);
343 for (int j = 0; j < simd_4xn_diag_size; j++)
345 diagonal_4xn_j_minus_i[j] = j - 0.5;
348 diagonal_2xnn_j_minus_i.resize(simd_width);
349 for (int j = 0; j < simd_width/2; j++)
351 /* The j-cluster size is half the SIMD width */
352 diagonal_2xnn_j_minus_i[j] = j - 0.5;
353 /* The next half of the SIMD width is for i + 1 */
354 diagonal_2xnn_j_minus_i[simd_width/2 + j] = j - 1 - 0.5;
357 /* We use up to 32 bits for exclusion masking.
358 * The same masks are used for the 4xN and 2x(N+N) kernels.
359 * The masks are read either into integer SIMD registers or into
360 * real SIMD registers (together with a cast).
361 * In single precision this means the real and integer SIMD registers
364 const int simd_excl_size = c_nbnxnCpuIClusterSize*simd_width;
365 #if GMX_DOUBLE && !GMX_SIMD_HAVE_INT32_LOGICAL
366 exclusion_filter64.resize(simd_excl_size);
368 exclusion_filter.resize(simd_excl_size);
371 for (int j = 0; j < simd_excl_size; j++)
373 /* Set the consecutive bits for masking pair exclusions */
374 #if GMX_DOUBLE && !GMX_SIMD_HAVE_INT32_LOGICAL
375 exclusion_filter64[j] = (1U << j);
377 exclusion_filter[j] = (1U << j);
381 if (!GMX_SIMD_HAVE_LOGICAL && !GMX_SIMD_HAVE_INT32_LOGICAL) // NOLINT(misc-redundant-expression)
383 // If the SIMD implementation has no bitwise logical operation support
384 // whatsoever we cannot use the normal masking. Instead,
385 // we generate a vector of all 2^4 possible ways an i atom
386 // interacts with its 4 j atoms. Each array entry contains
387 // GMX_SIMD_REAL_WIDTH values that are read with a single aligned SIMD load.
388 // Since there is no logical value representation in this case, we use
389 // any nonzero value to indicate 'true', while zero mean 'false'.
390 // This can then be converted to a SIMD boolean internally in the SIMD
391 // module by comparing to zero.
392 // Each array entry encodes how this i atom will interact with the 4 j atoms.
393 // Matching code exists in set_ci_top_excls() to generate indices into this array.
394 // Those indices are used in the kernels.
396 const int simd_excl_size = c_nbnxnCpuIClusterSize*c_nbnxnCpuIClusterSize;
397 const real simdFalse = 0.0;
398 const real simdTrue = 1.0;
400 interaction_array.resize(simd_excl_size * GMX_SIMD_REAL_WIDTH);
401 for (int j = 0; j < simd_excl_size; j++)
403 const int index = j * GMX_SIMD_REAL_WIDTH;
404 for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
406 interaction_array[index + i] = (j & (1 << i)) ? simdTrue : simdFalse;
413 nbnxn_atomdata_t::Params::Params(gmx::PinningPolicy pinningPolicy) :
415 nbfp({}, {pinningPolicy}),
416 nbfp_comb({}, {pinningPolicy}),
417 type({}, {pinningPolicy}),
418 lj_comb({}, {pinningPolicy}),
419 q({}, {pinningPolicy}),
422 energrp({}, {pinningPolicy})
426 nbnxn_atomdata_t::nbnxn_atomdata_t(gmx::PinningPolicy pinningPolicy) :
427 params_(pinningPolicy),
430 shift_vec({}, {pinningPolicy}),
431 x_({}, {pinningPolicy}),
433 bUseBufferFlags(FALSE),
434 bUseTreeReduce(FALSE)
438 /* Initializes an nbnxn_atomdata_t::Params data structure */
439 static void nbnxn_atomdata_params_init(const gmx::MDLogger &mdlog,
440 nbnxn_atomdata_t::Params *params,
441 const Nbnxm::KernelType kernelType,
442 int enbnxninitcombrule,
443 int ntype, const real *nbfp,
448 gmx_bool simple, bCombGeom, bCombLB, bSIMD;
452 fprintf(debug, "There are %d atom types in the system, adding one for nbnxn_atomdata_t\n", ntype);
454 params->numTypes = ntype + 1;
455 params->nbfp.resize(params->numTypes*params->numTypes*2);
456 params->nbfp_comb.resize(params->numTypes*2);
458 /* A tolerance of 1e-5 seems reasonable for (possibly hand-typed)
459 * force-field floating point parameters.
462 ptr = getenv("GMX_LJCOMB_TOL");
467 sscanf(ptr, "%lf", &dbl);
473 /* Temporarily fill params->nbfp_comb with sigma and epsilon
474 * to check for the LB rule.
476 for (int i = 0; i < ntype; i++)
478 c6 = nbfp[(i*ntype+i)*2 ]/6.0;
479 c12 = nbfp[(i*ntype+i)*2 + 1]/12.0;
480 if (c6 > 0 && c12 > 0)
482 params->nbfp_comb[i*2 ] = gmx::sixthroot(c12/c6);
483 params->nbfp_comb[i*2 + 1] = 0.25*c6*c6/c12;
485 else if (c6 == 0 && c12 == 0)
487 params->nbfp_comb[i*2 ] = 0;
488 params->nbfp_comb[i*2 + 1] = 0;
492 /* Can not use LB rule with only dispersion or repulsion */
497 for (int i = 0; i < params->numTypes; i++)
499 for (int j = 0; j < params->numTypes; j++)
501 if (i < ntype && j < ntype)
503 /* fr->nbfp has been updated, so that array too now stores c6/c12 including
504 * the 6.0/12.0 prefactors to save 2 flops in the most common case (force-only).
506 c6 = nbfp[(i*ntype+j)*2 ];
507 c12 = nbfp[(i*ntype+j)*2 + 1];
508 params->nbfp[(i*params->numTypes+j)*2 ] = c6;
509 params->nbfp[(i*params->numTypes+j)*2 + 1] = c12;
511 /* Compare 6*C6 and 12*C12 for geometric cobination rule */
512 bCombGeom = bCombGeom &&
513 gmx_within_tol(c6*c6, nbfp[(i*ntype+i)*2 ]*nbfp[(j*ntype+j)*2 ], tol) &&
514 gmx_within_tol(c12*c12, nbfp[(i*ntype+i)*2 + 1]*nbfp[(j*ntype+j)*2 + 1], tol);
516 /* Compare C6 and C12 for Lorentz-Berthelot combination rule */
520 ((c6 == 0 && c12 == 0 &&
521 (params->nbfp_comb[i*2 + 1] == 0 || params->nbfp_comb[j*2 + 1] == 0)) ||
522 (c6 > 0 && c12 > 0 &&
523 gmx_within_tol(gmx::sixthroot(c12/c6),
524 0.5*(params->nbfp_comb[i*2]+params->nbfp_comb[j*2]), tol) &&
525 gmx_within_tol(0.25*c6*c6/c12, std::sqrt(params->nbfp_comb[i*2 + 1]*params->nbfp_comb[j*2 + 1]), tol)));
529 /* Add zero parameters for the additional dummy atom type */
530 params->nbfp[(i*params->numTypes + j)*2 ] = 0;
531 params->nbfp[(i*params->numTypes + j)*2+1] = 0;
537 fprintf(debug, "Combination rules: geometric %s Lorentz-Berthelot %s\n",
538 gmx::boolToString(bCombGeom), gmx::boolToString(bCombLB));
541 simple = Nbnxm::kernelTypeUsesSimplePairlist(kernelType);
543 switch (enbnxninitcombrule)
545 case enbnxninitcombruleDETECT:
546 /* We prefer the geometic combination rule,
547 * as that gives a slightly faster kernel than the LB rule.
551 params->comb_rule = ljcrGEOM;
555 params->comb_rule = ljcrLB;
559 params->comb_rule = ljcrNONE;
561 params->nbfp_comb.clear();
566 if (params->comb_rule == ljcrNONE)
568 mesg = "Using full Lennard-Jones parameter combination matrix";
572 mesg = gmx::formatString("Using %s Lennard-Jones combination rule",
573 params->comb_rule == ljcrGEOM ? "geometric" : "Lorentz-Berthelot");
575 GMX_LOG(mdlog.info).asParagraph().appendText(mesg);
578 case enbnxninitcombruleGEOM:
579 params->comb_rule = ljcrGEOM;
581 case enbnxninitcombruleLB:
582 params->comb_rule = ljcrLB;
584 case enbnxninitcombruleNONE:
585 params->comb_rule = ljcrNONE;
587 params->nbfp_comb.clear();
590 gmx_incons("Unknown enbnxninitcombrule");
593 bSIMD = Nbnxm::kernelTypeIsSimd(kernelType);
595 set_lj_parameter_data(params, bSIMD);
597 params->nenergrp = n_energygroups;
600 // We now check for energy groups already when starting mdrun
601 GMX_RELEASE_ASSERT(n_energygroups == 1, "GPU kernels do not support energy groups");
603 /* Temporary storage goes as #grp^3*simd_width^2/2, so limit to 64 */
604 if (params->nenergrp > 64)
606 gmx_fatal(FARGS, "With NxN kernels not more than 64 energy groups are supported\n");
608 params->neg_2log = 1;
609 while (params->nenergrp > (1<<params->neg_2log))
615 /* Initializes an nbnxn_atomdata_t data structure */
616 void nbnxn_atomdata_init(const gmx::MDLogger &mdlog,
617 nbnxn_atomdata_t *nbat,
618 const Nbnxm::KernelType kernelType,
619 int enbnxninitcombrule,
620 int ntype, const real *nbfp,
624 nbnxn_atomdata_params_init(mdlog, &nbat->paramsDeprecated(), kernelType,
625 enbnxninitcombrule, ntype, nbfp, n_energygroups);
627 const bool simple = Nbnxm::kernelTypeUsesSimplePairlist(kernelType);
628 const bool bSIMD = Nbnxm::kernelTypeIsSimd(kernelType);
636 pack_x = std::max(c_nbnxnCpuIClusterSize,
637 Nbnxm::JClusterSizePerKernelType[kernelType]);
641 nbat->XFormat = nbatX4;
644 nbat->XFormat = nbatX8;
647 gmx_incons("Unsupported packing width");
652 nbat->XFormat = nbatXYZ;
655 nbat->FFormat = nbat->XFormat;
659 nbat->XFormat = nbatXYZQ;
660 nbat->FFormat = nbatXYZ;
663 nbat->shift_vec.resize(SHIFTS);
665 nbat->xstride = (nbat->XFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
666 nbat->fstride = (nbat->FFormat == nbatXYZQ ? STRIDE_XYZQ : DIM);
668 /* Initialize the output data structures */
669 for (int i = 0; i < nout; i++)
671 const auto &pinningPolicy = nbat->params().type.get_allocator().pinningPolicy();
672 nbat->out.emplace_back(kernelType, nbat->params().nenergrp, 1 << nbat->params().neg_2log,
676 nbat->buffer_flags.flag = nullptr;
677 nbat->buffer_flags.flag_nalloc = 0;
679 const int nth = gmx_omp_nthreads_get(emntNonbonded);
681 const char *ptr = getenv("GMX_USE_TREEREDUCE");
684 nbat->bUseTreeReduce = (strtol(ptr, nullptr, 10) != 0);
687 else if (nth > 8) /*on the CPU we currently don't benefit even at 32*/
689 nbat->bUseTreeReduce = 1;
694 nbat->bUseTreeReduce = false;
696 if (nbat->bUseTreeReduce)
698 GMX_LOG(mdlog.info).asParagraph().appendText("Using tree force reduction");
700 nbat->syncStep = new tMPI_Atomic[nth];
704 template<int packSize>
705 static void copy_lj_to_nbat_lj_comb(gmx::ArrayRef<const real> ljparam_type,
706 const int *type, int na,
709 /* The LJ params follow the combination rule:
710 * copy the params for the type array to the atom array.
712 for (int is = 0; is < na; is += packSize)
714 for (int k = 0; k < packSize; k++)
717 ljparam_at[is*2 + k] = ljparam_type[type[i]*2 ];
718 ljparam_at[is*2 + packSize + k] = ljparam_type[type[i]*2 + 1];
723 static int numAtomsFromGrids(const nbnxn_search &nbs)
725 const nbnxn_grid_t &lastGrid = nbs.grid.back();
727 return (lastGrid.cell0 + lastGrid.nc)*lastGrid.na_sc;
730 /* Sets the atom type in nbnxn_atomdata_t */
731 static void nbnxn_atomdata_set_atomtypes(nbnxn_atomdata_t::Params *params,
732 const nbnxn_search *nbs,
735 params->type.resize(numAtomsFromGrids(*nbs));
737 for (const nbnxn_grid_t &grid : nbs->grid)
739 /* Loop over all columns and copy and fill */
740 for (int i = 0; i < grid.numCells[XX]*grid.numCells[YY]; i++)
742 int ncz = grid.cxy_ind[i+1] - grid.cxy_ind[i];
743 int ash = (grid.cell0 + grid.cxy_ind[i])*grid.na_sc;
745 copy_int_to_nbat_int(nbs->a.data() + ash, grid.cxy_na[i], ncz*grid.na_sc,
746 type, params->numTypes - 1, params->type.data() + ash);
751 /* Sets the LJ combination rule parameters in nbnxn_atomdata_t */
752 static void nbnxn_atomdata_set_ljcombparams(nbnxn_atomdata_t::Params *params,
754 const nbnxn_search *nbs)
756 params->lj_comb.resize(numAtomsFromGrids(*nbs)*2);
758 if (params->comb_rule != ljcrNONE)
760 for (const nbnxn_grid_t &grid : nbs->grid)
762 /* Loop over all columns and copy and fill */
763 for (int i = 0; i < grid.numCells[XX]*grid.numCells[YY]; i++)
765 int ncz = grid.cxy_ind[i+1] - grid.cxy_ind[i];
766 int ash = (grid.cell0 + grid.cxy_ind[i])*grid.na_sc;
768 if (XFormat == nbatX4)
770 copy_lj_to_nbat_lj_comb<c_packX4>(params->nbfp_comb,
771 params->type.data() + ash,
773 params->lj_comb.data() + ash*2);
775 else if (XFormat == nbatX8)
777 copy_lj_to_nbat_lj_comb<c_packX8>(params->nbfp_comb,
778 params->type.data() + ash,
780 params->lj_comb.data() + ash*2);
782 else if (XFormat == nbatXYZQ)
784 copy_lj_to_nbat_lj_comb<1>(params->nbfp_comb,
785 params->type.data() + ash,
787 params->lj_comb.data() + ash*2);
794 /* Sets the charges in nbnxn_atomdata_t *nbat */
795 static void nbnxn_atomdata_set_charges(nbnxn_atomdata_t *nbat,
796 const nbnxn_search *nbs,
799 if (nbat->XFormat != nbatXYZQ)
801 nbat->paramsDeprecated().q.resize(nbat->numAtoms());
804 for (const nbnxn_grid_t &grid : nbs->grid)
806 /* Loop over all columns and copy and fill */
807 for (int cxy = 0; cxy < grid.numCells[XX]*grid.numCells[YY]; cxy++)
809 int ash = (grid.cell0 + grid.cxy_ind[cxy])*grid.na_sc;
810 int na = grid.cxy_na[cxy];
811 int na_round = (grid.cxy_ind[cxy+1] - grid.cxy_ind[cxy])*grid.na_sc;
813 if (nbat->XFormat == nbatXYZQ)
815 real *q = nbat->x().data() + ash*STRIDE_XYZQ + ZZ + 1;
817 for (i = 0; i < na; i++)
819 *q = charge[nbs->a[ash+i]];
822 /* Complete the partially filled last cell with zeros */
823 for (; i < na_round; i++)
831 real *q = nbat->paramsDeprecated().q.data() + ash;
833 for (i = 0; i < na; i++)
835 *q = charge[nbs->a[ash+i]];
838 /* Complete the partially filled last cell with zeros */
839 for (; i < na_round; i++)
849 /* Set the charges of perturbed atoms in nbnxn_atomdata_t to 0.
850 * This is to automatically remove the RF/PME self term in the nbnxn kernels.
851 * Part of the zero interactions are still calculated in the normal kernels.
852 * All perturbed interactions are calculated in the free energy kernel,
853 * using the original charge and LJ data, not nbnxn_atomdata_t.
855 static void nbnxn_atomdata_mask_fep(nbnxn_atomdata_t *nbat,
856 const nbnxn_search *nbs)
858 nbnxn_atomdata_t::Params ¶ms = nbat->paramsDeprecated();
862 if (nbat->XFormat == nbatXYZQ)
864 q = nbat->x().data() + ZZ + 1;
865 stride_q = STRIDE_XYZQ;
873 for (const nbnxn_grid_t &grid : nbs->grid)
882 nsubc = c_gpuNumClusterPerCell;
885 int c_offset = grid.cell0*grid.na_sc;
887 /* Loop over all columns and copy and fill */
888 for (int c = 0; c < grid.nc*nsubc; c++)
890 /* Does this cluster contain perturbed particles? */
891 if (grid.fep[c] != 0)
893 for (int i = 0; i < grid.na_c; i++)
895 /* Is this a perturbed particle? */
896 if (grid.fep[c] & (1 << i))
898 int ind = c_offset + c*grid.na_c + i;
899 /* Set atom type and charge to non-interacting */
900 params.type[ind] = params.numTypes - 1;
909 /* Copies the energy group indices to a reordered and packed array */
910 static void copy_egp_to_nbat_egps(const int *a, int na, int na_round,
911 int na_c, int bit_shift,
912 const int *in, int *innb)
918 for (i = 0; i < na; i += na_c)
920 /* Store na_c energy group numbers into one int */
922 for (int sa = 0; sa < na_c; sa++)
927 comb |= (GET_CGINFO_GID(in[at]) << (sa*bit_shift));
932 /* Complete the partially filled last cell with fill */
933 for (; i < na_round; i += na_c)
939 /* Set the energy group indices for atoms in nbnxn_atomdata_t */
940 static void nbnxn_atomdata_set_energygroups(nbnxn_atomdata_t::Params *params,
941 const nbnxn_search *nbs,
944 if (params->nenergrp == 1)
949 params->energrp.resize(numAtomsFromGrids(*nbs));
951 for (const nbnxn_grid_t &grid : nbs->grid)
953 /* Loop over all columns and copy and fill */
954 for (int i = 0; i < grid.numCells[XX]*grid.numCells[YY]; i++)
956 int ncz = grid.cxy_ind[i+1] - grid.cxy_ind[i];
957 int ash = (grid.cell0 + grid.cxy_ind[i])*grid.na_sc;
959 copy_egp_to_nbat_egps(nbs->a.data() + ash, grid.cxy_na[i], ncz*grid.na_sc,
960 c_nbnxnCpuIClusterSize, params->neg_2log,
962 params->energrp.data() + (ash >> grid.na_c_2log));
967 /* Sets all required atom parameter data in nbnxn_atomdata_t */
968 void nbnxn_atomdata_set(nbnxn_atomdata_t *nbat,
969 const nbnxn_search *nbs,
970 const t_mdatoms *mdatoms,
973 nbnxn_atomdata_t::Params ¶ms = nbat->paramsDeprecated();
975 nbnxn_atomdata_set_atomtypes(¶ms, nbs, mdatoms->typeA);
977 nbnxn_atomdata_set_charges(nbat, nbs, mdatoms->chargeA);
981 nbnxn_atomdata_mask_fep(nbat, nbs);
984 /* This must be done after masking types for FEP */
985 nbnxn_atomdata_set_ljcombparams(¶ms, nbat->XFormat, nbs);
987 nbnxn_atomdata_set_energygroups(¶ms, nbs, atinfo);
990 /* Copies the shift vector array to nbnxn_atomdata_t */
991 void nbnxn_atomdata_copy_shiftvec(gmx_bool bDynamicBox,
993 nbnxn_atomdata_t *nbat)
997 nbat->bDynamicBox = bDynamicBox;
998 for (i = 0; i < SHIFTS; i++)
1000 copy_rvec(shift_vec[i], nbat->shift_vec[i]);
1004 /* Copies (and reorders) the coordinates to nbnxn_atomdata_t */
1005 void nbnxn_atomdata_copy_x_to_nbat_x(const nbnxn_search *nbs,
1006 const Nbnxm::AtomLocality locality,
1009 nbnxn_atomdata_t *nbat,
1010 gmx_wallcycle *wcycle)
1012 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1013 wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1020 case Nbnxm::AtomLocality::All:
1021 case Nbnxm::AtomLocality::Count:
1023 g1 = nbs->grid.size();
1025 case Nbnxm::AtomLocality::Local:
1029 case Nbnxm::AtomLocality::NonLocal:
1031 g1 = nbs->grid.size();
1037 nbat->natoms_local = nbs->grid[0].nc*nbs->grid[0].na_sc;
1040 nth = gmx_omp_nthreads_get(emntPairsearch);
1042 #pragma omp parallel for num_threads(nth) schedule(static)
1043 for (th = 0; th < nth; th++)
1047 for (int g = g0; g < g1; g++)
1049 const nbnxn_grid_t &grid = nbs->grid[g];
1050 const int numCellsXY = grid.numCells[XX]*grid.numCells[YY];
1052 const int cxy0 = (numCellsXY* th + nth - 1)/nth;
1053 const int cxy1 = (numCellsXY*(th + 1) + nth - 1)/nth;
1055 for (int cxy = cxy0; cxy < cxy1; cxy++)
1057 int na, ash, na_fill;
1059 na = grid.cxy_na[cxy];
1060 ash = (grid.cell0 + grid.cxy_ind[cxy])*grid.na_sc;
1062 if (g == 0 && FillLocal)
1065 (grid.cxy_ind[cxy+1] - grid.cxy_ind[cxy])*grid.na_sc;
1069 /* We fill only the real particle locations.
1070 * We assume the filling entries at the end have been
1071 * properly set before during pair-list generation.
1075 copy_rvec_to_nbat_real(nbs->a.data() + ash, na, na_fill, x,
1076 nbat->XFormat, nbat->x().data(), ash);
1080 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1083 wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1084 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1088 nbnxn_atomdata_clear_reals(gmx::ArrayRef<real> dest,
1091 for (int i = i0; i < i1; i++)
1097 gmx_unused static void
1098 nbnxn_atomdata_reduce_reals(real * gmx_restrict dest,
1100 const real ** gmx_restrict src,
1106 /* The destination buffer contains data, add to it */
1107 for (int i = i0; i < i1; i++)
1109 for (int s = 0; s < nsrc; s++)
1111 dest[i] += src[s][i];
1117 /* The destination buffer is unitialized, set it first */
1118 for (int i = i0; i < i1; i++)
1120 dest[i] = src[0][i];
1121 for (int s = 1; s < nsrc; s++)
1123 dest[i] += src[s][i];
1129 gmx_unused static void
1130 nbnxn_atomdata_reduce_reals_simd(real gmx_unused * gmx_restrict dest,
1131 gmx_bool gmx_unused bDestSet,
1132 const gmx_unused real ** gmx_restrict src,
1133 int gmx_unused nsrc,
1134 int gmx_unused i0, int gmx_unused i1)
1137 /* The SIMD width here is actually independent of that in the kernels,
1138 * but we use the same width for simplicity (usually optimal anyhow).
1140 SimdReal dest_SSE, src_SSE;
1144 for (int i = i0; i < i1; i += GMX_SIMD_REAL_WIDTH)
1146 dest_SSE = load<SimdReal>(dest+i);
1147 for (int s = 0; s < nsrc; s++)
1149 src_SSE = load<SimdReal>(src[s]+i);
1150 dest_SSE = dest_SSE + src_SSE;
1152 store(dest+i, dest_SSE);
1157 for (int i = i0; i < i1; i += GMX_SIMD_REAL_WIDTH)
1159 dest_SSE = load<SimdReal>(src[0]+i);
1160 for (int s = 1; s < nsrc; s++)
1162 src_SSE = load<SimdReal>(src[s]+i);
1163 dest_SSE = dest_SSE + src_SSE;
1165 store(dest+i, dest_SSE);
1171 /* Add part of the force array(s) from nbnxn_atomdata_t to f */
1173 nbnxn_atomdata_add_nbat_f_to_f_part(const nbnxn_search *nbs,
1174 const nbnxn_atomdata_t *nbat,
1175 gmx::ArrayRef<const nbnxn_atomdata_output_t> out,
1180 gmx::ArrayRef<const int> cell = nbs->cell;
1182 /* Loop over all columns and copy and fill */
1183 switch (nbat->FFormat)
1189 const real *fnb = out[0].f.data();
1191 for (int a = a0; a < a1; a++)
1193 int i = cell[a]*nbat->fstride;
1196 f[a][YY] += fnb[i+1];
1197 f[a][ZZ] += fnb[i+2];
1202 for (int a = a0; a < a1; a++)
1204 int i = cell[a]*nbat->fstride;
1206 for (int fa = 0; fa < nfa; fa++)
1208 f[a][XX] += out[fa].f[i];
1209 f[a][YY] += out[fa].f[i+1];
1210 f[a][ZZ] += out[fa].f[i+2];
1218 const real *fnb = out[0].f.data();
1220 for (int a = a0; a < a1; a++)
1222 int i = atom_to_x_index<c_packX4>(cell[a]);
1224 f[a][XX] += fnb[i+XX*c_packX4];
1225 f[a][YY] += fnb[i+YY*c_packX4];
1226 f[a][ZZ] += fnb[i+ZZ*c_packX4];
1231 for (int a = a0; a < a1; a++)
1233 int i = atom_to_x_index<c_packX4>(cell[a]);
1235 for (int fa = 0; fa < nfa; fa++)
1237 f[a][XX] += out[fa].f[i+XX*c_packX4];
1238 f[a][YY] += out[fa].f[i+YY*c_packX4];
1239 f[a][ZZ] += out[fa].f[i+ZZ*c_packX4];
1247 const real *fnb = out[0].f.data();
1249 for (int a = a0; a < a1; a++)
1251 int i = atom_to_x_index<c_packX8>(cell[a]);
1253 f[a][XX] += fnb[i+XX*c_packX8];
1254 f[a][YY] += fnb[i+YY*c_packX8];
1255 f[a][ZZ] += fnb[i+ZZ*c_packX8];
1260 for (int a = a0; a < a1; a++)
1262 int i = atom_to_x_index<c_packX8>(cell[a]);
1264 for (int fa = 0; fa < nfa; fa++)
1266 f[a][XX] += out[fa].f[i+XX*c_packX8];
1267 f[a][YY] += out[fa].f[i+YY*c_packX8];
1268 f[a][ZZ] += out[fa].f[i+ZZ*c_packX8];
1274 gmx_incons("Unsupported nbnxn_atomdata_t format");
1278 static inline unsigned char reverse_bits(unsigned char b)
1280 /* http://graphics.stanford.edu/~seander/bithacks.html#ReverseByteWith64BitsDiv */
1281 return (b * 0x0202020202ULL & 0x010884422010ULL) % 1023;
1284 static void nbnxn_atomdata_add_nbat_f_to_f_treereduce(nbnxn_atomdata_t *nbat,
1287 const nbnxn_buffer_flags_t *flags = &nbat->buffer_flags;
1289 int next_pow2 = 1<<(gmx::log2I(nth-1)+1);
1291 const int numOutputBuffers = nbat->out.size();
1292 GMX_ASSERT(numOutputBuffers == nth,
1293 "tree-reduce currently only works for numOutputBuffers==nth");
1295 memset(nbat->syncStep, 0, sizeof(*(nbat->syncStep))*nth);
1297 #pragma omp parallel num_threads(nth)
1305 th = gmx_omp_get_thread_num();
1307 for (group_size = 2; group_size < 2*next_pow2; group_size *= 2)
1309 int index[2], group_pos, partner_pos, wu;
1310 int partner_th = th ^ (group_size/2);
1315 /* wait on partner thread - replaces full barrier */
1316 int sync_th, sync_group_size;
1318 tMPI_Atomic_memory_barrier(); /* gurantee data is saved before marking work as done */
1319 tMPI_Atomic_set(&(nbat->syncStep[th]), group_size/2); /* mark previous step as completed */
1321 /* find thread to sync with. Equal to partner_th unless nth is not a power of two. */
1322 for (sync_th = partner_th, sync_group_size = group_size; sync_th >= nth && sync_group_size > 2; sync_group_size /= 2)
1324 sync_th &= ~(sync_group_size/4);
1326 if (sync_th < nth) /* otherwise nothing to sync index[1] will be >=nout */
1328 /* wait on the thread which computed input data in previous step */
1329 while (tMPI_Atomic_get(static_cast<volatile tMPI_Atomic_t*>(&(nbat->syncStep[sync_th]))) < group_size/2)
1333 /* guarantee that no later load happens before wait loop is finisehd */
1334 tMPI_Atomic_memory_barrier();
1336 #else /* TMPI_ATOMICS */
1341 /* Calculate buffers to sum (result goes into first buffer) */
1342 group_pos = th % group_size;
1343 index[0] = th - group_pos;
1344 index[1] = index[0] + group_size/2;
1346 /* If no second buffer, nothing to do */
1347 if (index[1] >= numOutputBuffers && group_size > 2)
1352 #if NBNXN_BUFFERFLAG_MAX_THREADS > 256
1353 #error reverse_bits assumes max 256 threads
1355 /* Position is permuted so that one of the 2 vectors being added was computed on the same thread in the previous step.
1356 This improves locality and enables to sync with just a single thread between steps (=the levels in the btree).
1357 The permutation which allows this corresponds to reversing the bits of the group position.
1359 group_pos = reverse_bits(group_pos)/(256/group_size);
1361 partner_pos = group_pos ^ 1;
1363 /* loop over two work-units (own and partner) */
1364 for (wu = 0; wu < 2; wu++)
1368 if (partner_th < nth)
1370 break; /* partner exists we don't have to do his work */
1374 group_pos = partner_pos;
1378 /* Calculate the cell-block range for our thread */
1379 b0 = (flags->nflag* group_pos )/group_size;
1380 b1 = (flags->nflag*(group_pos+1))/group_size;
1382 for (b = b0; b < b1; b++)
1384 i0 = b *NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1385 i1 = (b+1)*NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1387 if (bitmask_is_set(flags->flag[b], index[1]) || group_size > 2)
1389 const real *fIndex1 = nbat->out[index[1]].f.data();
1391 nbnxn_atomdata_reduce_reals_simd
1393 nbnxn_atomdata_reduce_reals
1395 (nbat->out[index[0]].f.data(),
1396 bitmask_is_set(flags->flag[b], index[0]) || group_size > 2,
1397 &fIndex1, 1, i0, i1);
1400 else if (!bitmask_is_set(flags->flag[b], index[0]))
1402 nbnxn_atomdata_clear_reals(nbat->out[index[0]].f,
1409 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1414 static void nbnxn_atomdata_add_nbat_f_to_f_stdreduce(nbnxn_atomdata_t *nbat,
1417 #pragma omp parallel for num_threads(nth) schedule(static)
1418 for (int th = 0; th < nth; th++)
1422 const nbnxn_buffer_flags_t *flags;
1424 const real *fptr[NBNXN_BUFFERFLAG_MAX_THREADS];
1426 flags = &nbat->buffer_flags;
1428 /* Calculate the cell-block range for our thread */
1429 int b0 = (flags->nflag* th )/nth;
1430 int b1 = (flags->nflag*(th+1))/nth;
1432 for (int b = b0; b < b1; b++)
1434 int i0 = b *NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1435 int i1 = (b+1)*NBNXN_BUFFERFLAG_SIZE*nbat->fstride;
1438 for (int out = 1; out < gmx::ssize(nbat->out); out++)
1440 if (bitmask_is_set(flags->flag[b], out))
1442 fptr[nfptr++] = nbat->out[out].f.data();
1448 nbnxn_atomdata_reduce_reals_simd
1450 nbnxn_atomdata_reduce_reals
1452 (nbat->out[0].f.data(),
1453 bitmask_is_set(flags->flag[b], 0),
1457 else if (!bitmask_is_set(flags->flag[b], 0))
1459 nbnxn_atomdata_clear_reals(nbat->out[0].f,
1464 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1468 /* Add the force array(s) from nbnxn_atomdata_t to f */
1470 nonbonded_verlet_t::atomdata_add_nbat_f_to_f(const Nbnxm::AtomLocality locality,
1472 gmx_wallcycle *wcycle)
1474 /* Skip the non-local reduction if there was no non-local work to do */
1475 if (locality == Nbnxm::AtomLocality::NonLocal &&
1476 pairlistSets().pairlistSet(Nbnxm::InteractionLocality::NonLocal).nblGpu[0]->sci.empty())
1481 wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1482 wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1486 nbs_cycle_start(&nbs->cc[enbsCCreducef]);
1490 case Nbnxm::AtomLocality::All:
1491 case Nbnxm::AtomLocality::Count:
1493 na = nbs->natoms_nonlocal;
1495 case Nbnxm::AtomLocality::Local:
1497 na = nbs->natoms_local;
1499 case Nbnxm::AtomLocality::NonLocal:
1500 a0 = nbs->natoms_local;
1501 na = nbs->natoms_nonlocal - nbs->natoms_local;
1505 int nth = gmx_omp_nthreads_get(emntNonbonded);
1507 if (nbat->out.size() > 1)
1509 if (locality != Nbnxm::AtomLocality::All)
1511 gmx_incons("add_f_to_f called with nout>1 and locality!=eatAll");
1514 /* Reduce the force thread output buffers into buffer 0, before adding
1515 * them to the, differently ordered, "real" force buffer.
1517 if (nbat->bUseTreeReduce)
1519 nbnxn_atomdata_add_nbat_f_to_f_treereduce(nbat.get(), nth);
1523 nbnxn_atomdata_add_nbat_f_to_f_stdreduce(nbat.get(), nth);
1526 #pragma omp parallel for num_threads(nth) schedule(static)
1527 for (int th = 0; th < nth; th++)
1531 nbnxn_atomdata_add_nbat_f_to_f_part(nbs.get(), nbat.get(),
1538 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1541 nbs_cycle_stop(&nbs->cc[enbsCCreducef]);
1543 wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1544 wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1547 /* Adds the shift forces from nbnxn_atomdata_t to fshift */
1548 void nbnxn_atomdata_add_nbat_fshift_to_fshift(const nbnxn_atomdata_t *nbat,
1551 gmx::ArrayRef<const nbnxn_atomdata_output_t> outputBuffers = nbat->out;
1553 if (outputBuffers.size() == 1)
1555 /* When there is a single output object, with CPU or GPU, shift forces
1556 * have been written directly to the main buffer instead of to the
1557 * (single) thread local output object. There is nothing to reduce.
1562 for (int s = 0; s < SHIFTS; s++)
1566 for (const nbnxn_atomdata_output_t &out : outputBuffers)
1568 sum[XX] += out.fshift[s*DIM+XX];
1569 sum[YY] += out.fshift[s*DIM+YY];
1570 sum[ZZ] += out.fshift[s*DIM+ZZ];
1572 rvec_inc(fshift[s], sum);