2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
38 * \brief This file defines functions for managing threading of listed
41 * \author Mark Abraham <mark.j.abraham@gmail.com>
42 * \ingroup module_listed_forces
46 #include "manage_threading.h"
58 #include "gromacs/listed_forces/gpubonded.h"
59 #include "gromacs/mdlib/gmx_omp_nthreads.h"
60 #include "gromacs/pbcutil/ishift.h"
61 #include "gromacs/topology/ifunc.h"
62 #include "gromacs/utility/exceptions.h"
63 #include "gromacs/utility/fatalerror.h"
64 #include "gromacs/utility/gmxassert.h"
65 #include "gromacs/utility/smalloc.h"
67 #include "listed_internal.h"
68 #include "utilities.h"
70 /*! \brief struct for passing all data required for a function type */
73 const t_ilist* il; /**< pointer to t_ilist entry corresponding to ftype */
74 int ftype; /**< the function type index */
75 int nat; /**< nr of atoms involved in a single ftype interaction */
78 /*! \brief Divides listed interactions over threads
80 * This routine attempts to divide all interactions of the numType bondeds
81 * types stored in ild over the threads such that each thread has roughly
82 * equal load and different threads avoid touching the same atoms as much
85 static void divide_bondeds_by_locality(bonded_threading_t* bt, int numType, const ilist_data_t* ild)
88 int ind[F_NRE]; /* index into the ild[].il->iatoms */
89 int at_ind[F_NRE]; /* index of the first atom of the interaction at ind */
92 assert(numType <= F_NRE);
95 for (f = 0; f < numType; f++)
97 /* Sum #bondeds*#atoms_per_bond over all bonded types */
98 nat_tot += ild[f].il->nr / (ild[f].nat + 1) * ild[f].nat;
99 /* The start bound for thread 0 is 0 for all interactions */
101 /* Initialize the next atom index array */
102 assert(ild[f].il->nr > 0);
103 at_ind[f] = ild[f].il->iatoms[1];
107 /* Loop over the end bounds of the nthreads threads to determine
108 * which interactions threads 0 to nthreads shall calculate.
110 * NOTE: The cost of these combined loops is #interactions*numType.
111 * This code is running single threaded (difficult to parallelize
112 * over threads). So the relative cost of this function increases
113 * linearly with the number of threads. Since the inner-most loop
114 * is cheap and this is done only at DD repartitioning, the cost should
115 * be negligble. At high thread count many other parts of the code
116 * scale the same way, so it's (currently) not worth improving this.
118 for (t = 1; t <= bt->nthreads; t++)
122 /* Here we assume that the computational cost is proportional
123 * to the number of atoms in the interaction. This is a rough
124 * measure, but roughly correct. Usually there are very few
125 * interactions anyhow and there are distributed relatively
126 * uniformly. Proper and RB dihedrals are often distributed
127 * non-uniformly, but their cost is roughly equal.
129 nat_thread = (nat_tot * t) / bt->nthreads;
131 while (nat_sum < nat_thread)
133 /* To divide bonds based on atom order, we compare
134 * the index of the first atom in the bonded interaction.
135 * This works well, since the domain decomposition generates
136 * bondeds in order of the atoms by looking up interactions
137 * which are linked to the first atom in each interaction.
138 * It usually also works well without DD, since than the atoms
139 * in bonded interactions are usually in increasing order.
140 * If they are not assigned in increasing order, the balancing
141 * is still good, but the memory access and reduction cost will
146 /* Find out which of the types has the lowest atom index */
148 for (f = 1; f < numType; f++)
150 if (at_ind[f] < at_ind[f_min])
155 assert(f_min >= 0 && f_min < numType);
157 /* Assign the interaction with the lowest atom index (of type
158 * index f_min) to thread t-1 by increasing ind.
160 ind[f_min] += ild[f_min].nat + 1;
161 nat_sum += ild[f_min].nat;
163 /* Update the first unassigned atom index for this type */
164 if (ind[f_min] < ild[f_min].il->nr)
166 at_ind[f_min] = ild[f_min].il->iatoms[ind[f_min] + 1];
170 /* We have assigned all interactions of this type.
171 * Setting at_ind to INT_MAX ensures this type will not be
172 * chosen in the for loop above during next iterations.
174 at_ind[f_min] = INT_MAX;
178 /* Store the bonded end boundaries (at index t) for thread t-1 */
179 for (f = 0; f < numType; f++)
181 bt->workDivision.setBound(ild[f].ftype, t, ind[f]);
185 for (f = 0; f < numType; f++)
187 assert(ind[f] == ild[f].il->nr);
191 //! Return whether function type \p ftype in \p idef has perturbed interactions
192 static bool ftypeHasPerturbedEntries(const t_idef& idef, int ftype)
194 GMX_ASSERT(idef.ilsort == ilsortNO_FE || idef.ilsort == ilsortFE_SORTED,
195 "Perturbed interations should be sorted here");
197 const t_ilist& ilist = idef.il[ftype];
199 return (idef.ilsort != ilsortNO_FE && idef.numNonperturbedInteractions[ftype] != ilist.nr);
202 //! Divides bonded interactions over threads and GPU
203 static void divide_bondeds_over_threads(bonded_threading_t* bt, bool useGpuForBondeds, const t_idef& idef)
205 ilist_data_t ild[F_NRE];
207 GMX_ASSERT(bt->nthreads > 0, "Must have positive number of threads");
208 const int numThreads = bt->nthreads;
210 bt->haveBondeds = false;
212 size_t fTypeGpuIndex = 0;
213 for (int fType = 0; fType < F_NRE; fType++)
215 if (!ftype_is_bonded_potential(fType))
220 const t_ilist& il = idef.il[fType];
221 int nrToAssignToCpuThreads = il.nr;
223 if (useGpuForBondeds && fTypeGpuIndex < gmx::fTypesOnGpu.size()
224 && gmx::fTypesOnGpu[fTypeGpuIndex] == fType)
228 /* Perturbation is not implemented in the GPU bonded kernels.
229 * But instead of doing all on the CPU, we could do only
230 * the actually perturbed interactions on the CPU.
232 if (!ftypeHasPerturbedEntries(idef, fType))
234 /* We will assign this interaction type to the GPU */
235 nrToAssignToCpuThreads = 0;
239 if (nrToAssignToCpuThreads > 0)
241 bt->haveBondeds = true;
244 if (nrToAssignToCpuThreads == 0)
246 /* No interactions, avoid all the integer math below */
247 for (int t = 0; t <= numThreads; t++)
249 bt->workDivision.setBound(fType, t, 0);
252 else if (numThreads <= bt->max_nthread_uniform || fType == F_DISRES)
254 /* On up to 4 threads, load balancing the bonded work
255 * is more important than minimizing the reduction cost.
258 const int stride = 1 + NRAL(fType);
260 for (int t = 0; t <= numThreads; t++)
262 /* Divide equally over the threads */
263 int nr_t = (((nrToAssignToCpuThreads / stride) * t) / numThreads) * stride;
265 if (fType == F_DISRES)
267 /* Ensure that distance restraint pairs with the same label
268 * end up on the same thread.
270 while (nr_t > 0 && nr_t < nrToAssignToCpuThreads
271 && idef.iparams[il.iatoms[nr_t]].disres.label
272 == idef.iparams[il.iatoms[nr_t - stride]].disres.label)
278 bt->workDivision.setBound(fType, t, nr_t);
283 /* Add this fType to the list to be distributed */
284 int nat = NRAL(fType);
285 ild[numType].ftype = fType;
286 ild[numType].il = &il;
287 ild[numType].nat = nat;
289 /* The first index for the thread division is always 0 */
290 bt->workDivision.setBound(fType, 0, 0);
298 divide_bondeds_by_locality(bt, numType, ild);
305 fprintf(debug, "Division of bondeds over threads:\n");
306 for (f = 0; f < F_NRE; f++)
308 if (ftype_is_bonded_potential(f) && idef.il[f].nr > 0)
312 fprintf(debug, "%16s", interaction_function[f].name);
313 for (t = 0; t < numThreads; t++)
315 fprintf(debug, " %4d",
316 (bt->workDivision.bound(f, t + 1) - bt->workDivision.bound(f, t))
319 fprintf(debug, "\n");
325 //! Construct a reduction mask for which parts (blocks) of the force array are touched on which thread task
326 static void calc_bonded_reduction_mask(int natoms,
327 f_thread_t* f_thread,
330 const bonded_threading_t& bondedThreading)
332 static_assert(BITMASK_SIZE == GMX_OPENMP_MAX_THREADS,
333 "For the error message below we assume these two are equal.");
335 if (bondedThreading.nthreads > BITMASK_SIZE)
339 "You are using %d OpenMP threads, which is larger than GMX_OPENMP_MAX_THREADS "
340 "(%d). Decrease the number of OpenMP threads or rebuild GROMACS with a larger "
341 "value for GMX_OPENMP_MAX_THREADS passed to CMake.",
342 bondedThreading.nthreads, GMX_OPENMP_MAX_THREADS);
345 GMX_ASSERT(bondedThreading.nthreads <= BITMASK_SIZE,
346 "We need at least nthreads bits in the mask");
348 int nblock = (natoms + reduction_block_size - 1) >> reduction_block_bits;
350 if (nblock > f_thread->block_nalloc)
352 f_thread->block_nalloc = over_alloc_large(nblock);
353 srenew(f_thread->mask, f_thread->block_nalloc);
354 srenew(f_thread->block_index, f_thread->block_nalloc);
355 // NOTE: It seems f_thread->f does not need to be aligned
356 sfree_aligned(f_thread->f);
357 snew_aligned(f_thread->f, f_thread->block_nalloc * reduction_block_size, 128);
360 gmx_bitmask_t* mask = f_thread->mask;
362 for (int b = 0; b < nblock; b++)
364 bitmask_clear(&mask[b]);
367 for (int ftype = 0; ftype < F_NRE; ftype++)
369 if (ftype_is_bonded_potential(ftype))
371 int nb = idef.il[ftype].nr;
374 int nat1 = interaction_function[ftype].nratoms + 1;
376 int nb0 = bondedThreading.workDivision.bound(ftype, thread);
377 int nb1 = bondedThreading.workDivision.bound(ftype, thread + 1);
379 for (int i = nb0; i < nb1; i += nat1)
381 for (int a = 1; a < nat1; a++)
383 bitmask_set_bit(&mask[idef.il[ftype].iatoms[i + a] >> reduction_block_bits], thread);
390 /* Make an index of the blocks our thread touches, so we can do fast
391 * force buffer clearing.
393 f_thread->nblock_used = 0;
394 for (int b = 0; b < nblock; b++)
396 if (bitmask_is_set(mask[b], thread))
398 f_thread->block_index[f_thread->nblock_used++] = b;
403 void setup_bonded_threading(bonded_threading_t* bt, int numAtoms, bool useGpuForBondeds, const t_idef& idef)
407 assert(bt->nthreads >= 1);
409 /* Divide the bonded interaction over the threads */
410 divide_bondeds_over_threads(bt, useGpuForBondeds, idef);
412 if (!bt->haveBondeds)
414 /* We don't have bondeds, so there is nothing to reduce */
418 /* Determine to which blocks each thread's bonded force calculation
419 * contributes. Store this as a mask for each thread.
421 #pragma omp parallel for num_threads(bt->nthreads) schedule(static)
422 for (int t = 0; t < bt->nthreads; t++)
426 calc_bonded_reduction_mask(numAtoms, bt->f_t[t].get(), idef, t, *bt);
428 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
431 /* Reduce the masks over the threads and determine which blocks
432 * we need to reduce over.
434 int nblock_tot = (numAtoms + reduction_block_size - 1) >> reduction_block_bits;
435 /* Ensure we have sufficient space for all blocks */
436 if (static_cast<size_t>(nblock_tot) > bt->block_index.size())
438 bt->block_index.resize(nblock_tot);
440 if (static_cast<size_t>(nblock_tot) > bt->mask.size())
442 bt->mask.resize(nblock_tot);
445 for (int b = 0; b < nblock_tot; b++)
447 gmx_bitmask_t* mask = &bt->mask[b];
449 /* Generate the union over the threads of the bitmask */
451 for (int t = 0; t < bt->nthreads; t++)
453 bitmask_union(mask, bt->f_t[t]->mask[b]);
455 if (!bitmask_is_zero(*mask))
457 bt->block_index[bt->nblock_used++] = b;
463 for (int t = 0; t < bt->nthreads; t++)
465 if (bitmask_is_set(*mask, t))
474 fprintf(debug, "block %d flags %s count %d\n", b, to_hex_string(*mask).c_str(), c);
480 fprintf(debug, "Number of %d atom blocks to reduce: %d\n", reduction_block_size, bt->nblock_used);
481 fprintf(debug, "Reduction density %.2f for touched blocks only %.2f\n",
482 ctot * reduction_block_size / static_cast<double>(numAtoms),
483 ctot / static_cast<double>(bt->nblock_used));
487 void tear_down_bonded_threading(bonded_threading_t* bt)
492 f_thread_t::f_thread_t(int numEnergyGroups) : grpp(numEnergyGroups)
494 snew(fshift, SHIFTS);
497 f_thread_t::~f_thread_t()
505 bonded_threading_t::bonded_threading_t(const int numThreads, const int numEnergyGroups) :
506 nthreads(numThreads),
509 workDivision(nthreads),
510 foreignLambdaWorkDivision(1)
512 f_t.resize(numThreads);
513 #pragma omp parallel for num_threads(nthreads) schedule(static)
514 for (int t = 0; t < nthreads; t++)
518 /* Note that thread 0 uses the global fshift and energy arrays,
519 * but to keep the code simple, we initialize all data here.
521 f_t[t] = std::make_unique<f_thread_t>(numEnergyGroups);
523 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
527 bonded_threading_t* init_bonded_threading(FILE* fplog, const int nenergrp)
529 /* These thread local data structures are used for bondeds only.
531 * Note that we also use there structures when running single-threaded.
532 * This is because the bonded force buffer uses type rvec4, whereas
533 * the normal force buffer is uses type rvec. This leads to a little
534 * reduction overhead, but the speed gain in the bonded calculations
535 * of doing transposeScatterIncr/DecrU with aligment 4 instead of 3
536 * is much larger than the reduction overhead.
538 bonded_threading_t* bt = new bonded_threading_t(gmx_omp_nthreads_get(emntBonded), nenergrp);
540 /* The optimal value after which to switch from uniform to localized
541 * bonded interaction distribution is 3, 4 or 5 depending on the system
544 const int max_nthread_uniform = 4;
547 if ((ptr = getenv("GMX_BONDED_NTHREAD_UNIFORM")) != nullptr)
549 sscanf(ptr, "%d", &bt->max_nthread_uniform);
550 if (fplog != nullptr)
552 fprintf(fplog, "\nMax threads for uniform bonded distribution set to %d by env.var.\n",
553 bt->max_nthread_uniform);
558 bt->max_nthread_uniform = max_nthread_uniform;