2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
7 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief This file defines functions for managing threading of listed
42 * \author Mark Abraham <mark.j.abraham@gmail.com>
43 * \ingroup module_listed_forces
47 #include "manage_threading.h"
59 #include "gromacs/listed_forces/gpubonded.h"
60 #include "gromacs/mdlib/gmx_omp_nthreads.h"
61 #include "gromacs/pbcutil/ishift.h"
62 #include "gromacs/topology/ifunc.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/smalloc.h"
68 #include "listed_internal.h"
69 #include "utilities.h"
71 /*! \brief struct for passing all data required for a function type */
74 const t_ilist* il; /**< pointer to t_ilist entry corresponding to ftype */
75 int ftype; /**< the function type index */
76 int nat; /**< nr of atoms involved in a single ftype interaction */
79 /*! \brief Divides listed interactions over threads
81 * This routine attempts to divide all interactions of the numType bondeds
82 * types stored in ild over the threads such that each thread has roughly
83 * equal load and different threads avoid touching the same atoms as much
86 static void divide_bondeds_by_locality(bonded_threading_t* bt, int numType, const ilist_data_t* ild)
89 int ind[F_NRE]; /* index into the ild[].il->iatoms */
90 int at_ind[F_NRE]; /* index of the first atom of the interaction at ind */
93 assert(numType <= F_NRE);
96 for (f = 0; f < numType; f++)
98 /* Sum #bondeds*#atoms_per_bond over all bonded types */
99 nat_tot += ild[f].il->nr / (ild[f].nat + 1) * ild[f].nat;
100 /* The start bound for thread 0 is 0 for all interactions */
102 /* Initialize the next atom index array */
103 assert(ild[f].il->nr > 0);
104 at_ind[f] = ild[f].il->iatoms[1];
108 /* Loop over the end bounds of the nthreads threads to determine
109 * which interactions threads 0 to nthreads shall calculate.
111 * NOTE: The cost of these combined loops is #interactions*numType.
112 * This code is running single threaded (difficult to parallelize
113 * over threads). So the relative cost of this function increases
114 * linearly with the number of threads. Since the inner-most loop
115 * is cheap and this is done only at DD repartitioning, the cost should
116 * be negligble. At high thread count many other parts of the code
117 * scale the same way, so it's (currently) not worth improving this.
119 for (t = 1; t <= bt->nthreads; t++)
123 /* Here we assume that the computational cost is proportional
124 * to the number of atoms in the interaction. This is a rough
125 * measure, but roughly correct. Usually there are very few
126 * interactions anyhow and there are distributed relatively
127 * uniformly. Proper and RB dihedrals are often distributed
128 * non-uniformly, but their cost is roughly equal.
130 nat_thread = (nat_tot * t) / bt->nthreads;
132 while (nat_sum < nat_thread)
134 /* To divide bonds based on atom order, we compare
135 * the index of the first atom in the bonded interaction.
136 * This works well, since the domain decomposition generates
137 * bondeds in order of the atoms by looking up interactions
138 * which are linked to the first atom in each interaction.
139 * It usually also works well without DD, since than the atoms
140 * in bonded interactions are usually in increasing order.
141 * If they are not assigned in increasing order, the balancing
142 * is still good, but the memory access and reduction cost will
147 /* Find out which of the types has the lowest atom index */
149 for (f = 1; f < numType; f++)
151 if (at_ind[f] < at_ind[f_min])
156 assert(f_min >= 0 && f_min < numType);
158 /* Assign the interaction with the lowest atom index (of type
159 * index f_min) to thread t-1 by increasing ind.
161 ind[f_min] += ild[f_min].nat + 1;
162 nat_sum += ild[f_min].nat;
164 /* Update the first unassigned atom index for this type */
165 if (ind[f_min] < ild[f_min].il->nr)
167 at_ind[f_min] = ild[f_min].il->iatoms[ind[f_min] + 1];
171 /* We have assigned all interactions of this type.
172 * Setting at_ind to INT_MAX ensures this type will not be
173 * chosen in the for loop above during next iterations.
175 at_ind[f_min] = INT_MAX;
179 /* Store the bonded end boundaries (at index t) for thread t-1 */
180 for (f = 0; f < numType; f++)
182 bt->workDivision.setBound(ild[f].ftype, t, ind[f]);
186 for (f = 0; f < numType; f++)
188 assert(ind[f] == ild[f].il->nr);
192 //! Return whether function type \p ftype in \p idef has perturbed interactions
193 static bool ftypeHasPerturbedEntries(const t_idef& idef, int ftype)
195 GMX_ASSERT(idef.ilsort == ilsortNO_FE || idef.ilsort == ilsortFE_SORTED,
196 "Perturbed interations should be sorted here");
198 const t_ilist& ilist = idef.il[ftype];
200 return (idef.ilsort != ilsortNO_FE && idef.numNonperturbedInteractions[ftype] != ilist.nr);
203 //! Divides bonded interactions over threads and GPU
204 static void divide_bondeds_over_threads(bonded_threading_t* bt, bool useGpuForBondeds, const t_idef& idef)
206 ilist_data_t ild[F_NRE];
208 GMX_ASSERT(bt->nthreads > 0, "Must have positive number of threads");
209 const int numThreads = bt->nthreads;
211 bt->haveBondeds = false;
213 size_t fTypeGpuIndex = 0;
214 for (int fType = 0; fType < F_NRE; fType++)
216 if (!ftype_is_bonded_potential(fType))
221 const t_ilist& il = idef.il[fType];
222 int nrToAssignToCpuThreads = il.nr;
224 if (useGpuForBondeds && fTypeGpuIndex < gmx::fTypesOnGpu.size()
225 && gmx::fTypesOnGpu[fTypeGpuIndex] == fType)
229 /* Perturbation is not implemented in the GPU bonded kernels.
230 * But instead of doing all on the CPU, we could do only
231 * the actually perturbed interactions on the CPU.
233 if (!ftypeHasPerturbedEntries(idef, fType))
235 /* We will assign this interaction type to the GPU */
236 nrToAssignToCpuThreads = 0;
240 if (nrToAssignToCpuThreads > 0)
242 bt->haveBondeds = true;
245 if (nrToAssignToCpuThreads == 0)
247 /* No interactions, avoid all the integer math below */
248 for (int t = 0; t <= numThreads; t++)
250 bt->workDivision.setBound(fType, t, 0);
253 else if (numThreads <= bt->max_nthread_uniform || fType == F_DISRES)
255 /* On up to 4 threads, load balancing the bonded work
256 * is more important than minimizing the reduction cost.
259 const int stride = 1 + NRAL(fType);
261 for (int t = 0; t <= numThreads; t++)
263 /* Divide equally over the threads */
264 int nr_t = (((nrToAssignToCpuThreads / stride) * t) / numThreads) * stride;
266 if (fType == F_DISRES)
268 /* Ensure that distance restraint pairs with the same label
269 * end up on the same thread.
271 while (nr_t > 0 && nr_t < nrToAssignToCpuThreads
272 && idef.iparams[il.iatoms[nr_t]].disres.label
273 == idef.iparams[il.iatoms[nr_t - stride]].disres.label)
279 bt->workDivision.setBound(fType, t, nr_t);
284 /* Add this fType to the list to be distributed */
285 int nat = NRAL(fType);
286 ild[numType].ftype = fType;
287 ild[numType].il = &il;
288 ild[numType].nat = nat;
290 /* The first index for the thread division is always 0 */
291 bt->workDivision.setBound(fType, 0, 0);
299 divide_bondeds_by_locality(bt, numType, ild);
306 fprintf(debug, "Division of bondeds over threads:\n");
307 for (f = 0; f < F_NRE; f++)
309 if (ftype_is_bonded_potential(f) && idef.il[f].nr > 0)
313 fprintf(debug, "%16s", interaction_function[f].name);
314 for (t = 0; t < numThreads; t++)
316 fprintf(debug, " %4d",
317 (bt->workDivision.bound(f, t + 1) - bt->workDivision.bound(f, t))
320 fprintf(debug, "\n");
326 //! Construct a reduction mask for which parts (blocks) of the force array are touched on which thread task
327 static void calc_bonded_reduction_mask(int natoms,
328 f_thread_t* f_thread,
331 const bonded_threading_t& bondedThreading)
333 static_assert(BITMASK_SIZE == GMX_OPENMP_MAX_THREADS,
334 "For the error message below we assume these two are equal.");
336 if (bondedThreading.nthreads > BITMASK_SIZE)
340 "You are using %d OpenMP threads, which is larger than GMX_OPENMP_MAX_THREADS "
341 "(%d). Decrease the number of OpenMP threads or rebuild GROMACS with a larger "
342 "value for GMX_OPENMP_MAX_THREADS passed to CMake.",
343 bondedThreading.nthreads, GMX_OPENMP_MAX_THREADS);
346 GMX_ASSERT(bondedThreading.nthreads <= BITMASK_SIZE,
347 "We need at least nthreads bits in the mask");
349 int nblock = (natoms + reduction_block_size - 1) >> reduction_block_bits;
351 if (nblock > f_thread->block_nalloc)
353 f_thread->block_nalloc = over_alloc_large(nblock);
354 srenew(f_thread->mask, f_thread->block_nalloc);
355 srenew(f_thread->block_index, f_thread->block_nalloc);
356 // NOTE: It seems f_thread->f does not need to be aligned
357 sfree_aligned(f_thread->f);
358 snew_aligned(f_thread->f, f_thread->block_nalloc * reduction_block_size, 128);
361 gmx_bitmask_t* mask = f_thread->mask;
363 for (int b = 0; b < nblock; b++)
365 bitmask_clear(&mask[b]);
368 for (int ftype = 0; ftype < F_NRE; ftype++)
370 if (ftype_is_bonded_potential(ftype))
372 int nb = idef.il[ftype].nr;
375 int nat1 = interaction_function[ftype].nratoms + 1;
377 int nb0 = bondedThreading.workDivision.bound(ftype, thread);
378 int nb1 = bondedThreading.workDivision.bound(ftype, thread + 1);
380 for (int i = nb0; i < nb1; i += nat1)
382 for (int a = 1; a < nat1; a++)
384 bitmask_set_bit(&mask[idef.il[ftype].iatoms[i + a] >> reduction_block_bits], thread);
391 /* Make an index of the blocks our thread touches, so we can do fast
392 * force buffer clearing.
394 f_thread->nblock_used = 0;
395 for (int b = 0; b < nblock; b++)
397 if (bitmask_is_set(mask[b], thread))
399 f_thread->block_index[f_thread->nblock_used++] = b;
404 void setup_bonded_threading(bonded_threading_t* bt, int numAtoms, bool useGpuForBondeds, const t_idef& idef)
408 assert(bt->nthreads >= 1);
410 /* Divide the bonded interaction over the threads */
411 divide_bondeds_over_threads(bt, useGpuForBondeds, idef);
413 if (!bt->haveBondeds)
415 /* We don't have bondeds, so there is nothing to reduce */
419 /* Determine to which blocks each thread's bonded force calculation
420 * contributes. Store this as a mask for each thread.
422 #pragma omp parallel for num_threads(bt->nthreads) schedule(static)
423 for (int t = 0; t < bt->nthreads; t++)
427 calc_bonded_reduction_mask(numAtoms, bt->f_t[t].get(), idef, t, *bt);
429 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
432 /* Reduce the masks over the threads and determine which blocks
433 * we need to reduce over.
435 int nblock_tot = (numAtoms + reduction_block_size - 1) >> reduction_block_bits;
436 /* Ensure we have sufficient space for all blocks */
437 if (static_cast<size_t>(nblock_tot) > bt->block_index.size())
439 bt->block_index.resize(nblock_tot);
441 if (static_cast<size_t>(nblock_tot) > bt->mask.size())
443 bt->mask.resize(nblock_tot);
446 for (int b = 0; b < nblock_tot; b++)
448 gmx_bitmask_t* mask = &bt->mask[b];
450 /* Generate the union over the threads of the bitmask */
452 for (int t = 0; t < bt->nthreads; t++)
454 bitmask_union(mask, bt->f_t[t]->mask[b]);
456 if (!bitmask_is_zero(*mask))
458 bt->block_index[bt->nblock_used++] = b;
464 for (int t = 0; t < bt->nthreads; t++)
466 if (bitmask_is_set(*mask, t))
475 fprintf(debug, "block %d flags %s count %d\n", b, to_hex_string(*mask).c_str(), c);
481 fprintf(debug, "Number of %d atom blocks to reduce: %d\n", reduction_block_size, bt->nblock_used);
482 fprintf(debug, "Reduction density %.2f for touched blocks only %.2f\n",
483 ctot * reduction_block_size / static_cast<double>(numAtoms),
484 ctot / static_cast<double>(bt->nblock_used));
488 void tear_down_bonded_threading(bonded_threading_t* bt)
493 f_thread_t::f_thread_t(int numEnergyGroups) : grpp(numEnergyGroups)
495 snew(fshift, SHIFTS);
498 f_thread_t::~f_thread_t()
506 bonded_threading_t::bonded_threading_t(const int numThreads, const int numEnergyGroups) :
507 nthreads(numThreads),
510 workDivision(nthreads),
511 foreignLambdaWorkDivision(1)
513 f_t.resize(numThreads);
514 #pragma omp parallel for num_threads(nthreads) schedule(static)
515 for (int t = 0; t < nthreads; t++)
519 /* Note that thread 0 uses the global fshift and energy arrays,
520 * but to keep the code simple, we initialize all data here.
522 f_t[t] = std::make_unique<f_thread_t>(numEnergyGroups);
524 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
528 bonded_threading_t* init_bonded_threading(FILE* fplog, const int nenergrp)
530 /* These thread local data structures are used for bondeds only.
532 * Note that we also use there structures when running single-threaded.
533 * This is because the bonded force buffer uses type rvec4, whereas
534 * the normal force buffer is uses type rvec. This leads to a little
535 * reduction overhead, but the speed gain in the bonded calculations
536 * of doing transposeScatterIncr/DecrU with aligment 4 instead of 3
537 * is much larger than the reduction overhead.
539 bonded_threading_t* bt = new bonded_threading_t(gmx_omp_nthreads_get(emntBonded), nenergrp);
541 /* The optimal value after which to switch from uniform to localized
542 * bonded interaction distribution is 3, 4 or 5 depending on the system
545 const int max_nthread_uniform = 4;
548 if ((ptr = getenv("GMX_BONDED_NTHREAD_UNIFORM")) != nullptr)
550 sscanf(ptr, "%d", &bt->max_nthread_uniform);
551 if (fplog != nullptr)
553 fprintf(fplog, "\nMax threads for uniform bonded distribution set to %d by env.var.\n",
554 bt->max_nthread_uniform);
559 bt->max_nthread_uniform = max_nthread_uniform;