2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
46 #include "gromacs/compat/make_unique.h"
47 #include "gromacs/domdec/domdec.h"
48 #include "gromacs/domdec/domdec_struct.h"
49 #include "gromacs/gmxlib/network.h"
50 #include "gromacs/gmxlib/nrnb.h"
51 #include "gromacs/math/functions.h"
52 #include "gromacs/math/vec.h"
53 #include "gromacs/mdlib/gmx_omp_nthreads.h"
54 #include "gromacs/mdtypes/commrec.h"
55 #include "gromacs/mdtypes/mdatom.h"
56 #include "gromacs/pbcutil/ishift.h"
57 #include "gromacs/pbcutil/mshift.h"
58 #include "gromacs/pbcutil/pbc.h"
59 #include "gromacs/timing/wallcycle.h"
60 #include "gromacs/topology/ifunc.h"
61 #include "gromacs/topology/mtop_util.h"
62 #include "gromacs/topology/topology.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/gmxomp.h"
68 /* The strategy used here for assigning virtual sites to (thread-)tasks
71 * We divide the atom range that vsites operate on (natoms_local with DD,
72 * 0 - last atom involved in vsites without DD) equally over all threads.
74 * Vsites in the local range constructed from atoms in the local range
75 * and/or other vsites that are fully local are assigned to a simple,
78 * Vsites that are not assigned after using the above criterion get assigned
79 * to a so called "interdependent" thread task when none of the constructing
80 * atoms is a vsite. These tasks are called interdependent, because one task
81 * accesses atoms assigned to a different task/thread.
82 * Note that this option is turned off with large (local) atom counts
83 * to avoid high memory usage.
85 * Any remaining vsites are assigned to a separate master thread task.
90 static void init_ilist(t_ilist *ilist)
92 for (int i = 0; i < F_NRE; i++)
96 ilist[i].iatoms = nullptr;
100 /*! \brief List of atom indices belonging to a task */
102 //! List of atom indices
103 std::vector<int> atom;
106 /*! \brief Data structure for thread tasks that use constructing atoms outside their own atom range */
107 struct InterdependentTask
109 //! The interaction lists, only vsite entries are used
110 t_ilist ilist[F_NRE];
111 //! Thread/task-local force buffer
112 std::vector<RVec> force;
113 //! The atom indices of the vsites of our task
114 std::vector<int> vsite;
115 //! Flags if elements in force are spread to or not
116 std::vector<bool> use;
117 //! The number of entries set to true in use
119 //! Array of atoms indices, size nthreads, covering all nuse set elements in use
120 std::vector<AtomIndex> atomIndex;
121 //! List of tasks (force blocks) this task spread forces to
122 std::vector<int> spreadTask;
123 //! List of tasks that write to this tasks force block range
124 std::vector<int> reduceTask;
133 /*! \brief Vsite thread task data structure */
136 //! Start of atom range of this task
138 //! End of atom range of this task
140 //! The interaction lists, only vsite entries are used
141 t_ilist ilist[F_NRE];
142 //! Local fshift accumulation buffer
144 //! Local virial dx*df accumulation buffer
146 //! Tells if interdependent task idTask should be used (in addition to the rest of this task), this bool has the same value on all threads
147 bool useInterdependentTask;
148 //! Data for vsites that involve constructing atoms in the atom range of other threads/tasks
149 InterdependentTask idTask;
151 /*! \brief Constructor */
157 clear_rvecs(SHIFTS, fshift);
159 useInterdependentTask = false;
163 /*! \brief Returns the sum of the vsite ilist sizes over all vsite types
165 * \param[in] ilist The interaction list
167 static int vsiteIlistNrCount(const t_ilist *ilist)
170 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
172 nr += ilist[ftype].nr;
178 static int pbc_rvec_sub(const t_pbc *pbc, const rvec xi, const rvec xj, rvec dx)
182 return pbc_dx_aiuc(pbc, xi, xj, dx);
186 rvec_sub(xi, xj, dx);
191 /* Vsite construction routines */
193 static void constr_vsite2(const rvec xi, const rvec xj, rvec x, real a, const t_pbc *pbc)
201 pbc_dx_aiuc(pbc, xj, xi, dx);
202 x[XX] = xi[XX] + a*dx[XX];
203 x[YY] = xi[YY] + a*dx[YY];
204 x[ZZ] = xi[ZZ] + a*dx[ZZ];
208 x[XX] = b*xi[XX] + a*xj[XX];
209 x[YY] = b*xi[YY] + a*xj[YY];
210 x[ZZ] = b*xi[ZZ] + a*xj[ZZ];
214 /* TOTAL: 10 flops */
217 static void constr_vsite3(const rvec xi, const rvec xj, const rvec xk, rvec x, real a, real b,
227 pbc_dx_aiuc(pbc, xj, xi, dxj);
228 pbc_dx_aiuc(pbc, xk, xi, dxk);
229 x[XX] = xi[XX] + a*dxj[XX] + b*dxk[XX];
230 x[YY] = xi[YY] + a*dxj[YY] + b*dxk[YY];
231 x[ZZ] = xi[ZZ] + a*dxj[ZZ] + b*dxk[ZZ];
235 x[XX] = c*xi[XX] + a*xj[XX] + b*xk[XX];
236 x[YY] = c*xi[YY] + a*xj[YY] + b*xk[YY];
237 x[ZZ] = c*xi[ZZ] + a*xj[ZZ] + b*xk[ZZ];
241 /* TOTAL: 17 flops */
244 static void constr_vsite3FD(const rvec xi, const rvec xj, const rvec xk, rvec x, real a, real b,
250 pbc_rvec_sub(pbc, xj, xi, xij);
251 pbc_rvec_sub(pbc, xk, xj, xjk);
254 /* temp goes from i to a point on the line jk */
255 temp[XX] = xij[XX] + a*xjk[XX];
256 temp[YY] = xij[YY] + a*xjk[YY];
257 temp[ZZ] = xij[ZZ] + a*xjk[ZZ];
260 c = b*gmx::invsqrt(iprod(temp, temp));
263 x[XX] = xi[XX] + c*temp[XX];
264 x[YY] = xi[YY] + c*temp[YY];
265 x[ZZ] = xi[ZZ] + c*temp[ZZ];
268 /* TOTAL: 34 flops */
271 static void constr_vsite3FAD(const rvec xi, const rvec xj, const rvec xk, rvec x, real a, real b, const t_pbc *pbc)
274 real a1, b1, c1, invdij;
276 pbc_rvec_sub(pbc, xj, xi, xij);
277 pbc_rvec_sub(pbc, xk, xj, xjk);
280 invdij = gmx::invsqrt(iprod(xij, xij));
281 c1 = invdij * invdij * iprod(xij, xjk);
282 xp[XX] = xjk[XX] - c1*xij[XX];
283 xp[YY] = xjk[YY] - c1*xij[YY];
284 xp[ZZ] = xjk[ZZ] - c1*xij[ZZ];
286 b1 = b*gmx::invsqrt(iprod(xp, xp));
289 x[XX] = xi[XX] + a1*xij[XX] + b1*xp[XX];
290 x[YY] = xi[YY] + a1*xij[YY] + b1*xp[YY];
291 x[ZZ] = xi[ZZ] + a1*xij[ZZ] + b1*xp[ZZ];
294 /* TOTAL: 63 flops */
297 static void constr_vsite3OUT(const rvec xi, const rvec xj, const rvec xk, rvec x,
298 real a, real b, real c, const t_pbc *pbc)
302 pbc_rvec_sub(pbc, xj, xi, xij);
303 pbc_rvec_sub(pbc, xk, xi, xik);
304 cprod(xij, xik, temp);
307 x[XX] = xi[XX] + a*xij[XX] + b*xik[XX] + c*temp[XX];
308 x[YY] = xi[YY] + a*xij[YY] + b*xik[YY] + c*temp[YY];
309 x[ZZ] = xi[ZZ] + a*xij[ZZ] + b*xik[ZZ] + c*temp[ZZ];
312 /* TOTAL: 33 flops */
315 static void constr_vsite4FD(const rvec xi, const rvec xj, const rvec xk, const rvec xl, rvec x,
316 real a, real b, real c, const t_pbc *pbc)
318 rvec xij, xjk, xjl, temp;
321 pbc_rvec_sub(pbc, xj, xi, xij);
322 pbc_rvec_sub(pbc, xk, xj, xjk);
323 pbc_rvec_sub(pbc, xl, xj, xjl);
326 /* temp goes from i to a point on the plane jkl */
327 temp[XX] = xij[XX] + a*xjk[XX] + b*xjl[XX];
328 temp[YY] = xij[YY] + a*xjk[YY] + b*xjl[YY];
329 temp[ZZ] = xij[ZZ] + a*xjk[ZZ] + b*xjl[ZZ];
332 d = c*gmx::invsqrt(iprod(temp, temp));
335 x[XX] = xi[XX] + d*temp[XX];
336 x[YY] = xi[YY] + d*temp[YY];
337 x[ZZ] = xi[ZZ] + d*temp[ZZ];
340 /* TOTAL: 43 flops */
343 static void constr_vsite4FDN(const rvec xi, const rvec xj, const rvec xk, const rvec xl, rvec x,
344 real a, real b, real c, const t_pbc *pbc)
346 rvec xij, xik, xil, ra, rb, rja, rjb, rm;
349 pbc_rvec_sub(pbc, xj, xi, xij);
350 pbc_rvec_sub(pbc, xk, xi, xik);
351 pbc_rvec_sub(pbc, xl, xi, xil);
364 rvec_sub(ra, xij, rja);
365 rvec_sub(rb, xij, rjb);
371 d = c*gmx::invsqrt(norm2(rm));
374 x[XX] = xi[XX] + d*rm[XX];
375 x[YY] = xi[YY] + d*rm[YY];
376 x[ZZ] = xi[ZZ] + d*rm[ZZ];
379 /* TOTAL: 47 flops */
383 static int constr_vsiten(const t_iatom *ia, const t_iparams ip[],
384 rvec *x, const t_pbc *pbc)
391 n3 = 3*ip[ia[0]].vsiten.n;
394 copy_rvec(x[ai], x1);
396 for (int i = 3; i < n3; i += 3)
399 a = ip[ia[i]].vsiten.a;
402 pbc_dx_aiuc(pbc, x[ai], x1, dx);
406 rvec_sub(x[ai], x1, dx);
408 dsum[XX] += a*dx[XX];
409 dsum[YY] += a*dx[YY];
410 dsum[ZZ] += a*dx[ZZ];
414 x[av][XX] = x1[XX] + dsum[XX];
415 x[av][YY] = x1[YY] + dsum[YY];
416 x[av][ZZ] = x1[ZZ] + dsum[ZZ];
421 /*! \brief PBC modes for vsite construction and spreading */
424 all, // Apply normal, simple PBC for all vsites
425 chargeGroup, // Keep vsite in the same periodic image as the rest of it's charge group
426 none // No PBC treatment needed
429 /*! \brief Returns the PBC mode based on the system PBC and vsite properties
431 * \param[in] pbcPtr A pointer to a PBC struct or nullptr when no PBC treatment is required
432 * \param[in] vsite A pointer to the vsite struct, can be nullptr
434 static PbcMode getPbcMode(const t_pbc *pbcPtr,
435 const gmx_vsite_t *vsite)
437 if (pbcPtr == nullptr)
439 return PbcMode::none;
441 else if (vsite != nullptr && vsite->bHaveChargeGroups)
443 return PbcMode::chargeGroup;
451 static void construct_vsites_thread(const gmx_vsite_t *vsite,
454 const t_iparams ip[], const t_ilist ilist[],
455 const t_pbc *pbc_null)
467 const PbcMode pbcMode = getPbcMode(pbc_null, vsite);
468 /* We need another pbc pointer, as with charge groups we switch per vsite */
469 const t_pbc *pbc_null2 = pbc_null;
470 gmx::ArrayRef<const int> vsite_pbc;
472 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
474 if (ilist[ftype].nr == 0)
480 int nra = interaction_function[ftype].nratoms;
482 int nr = ilist[ftype].nr;
484 const t_iatom *ia = ilist[ftype].iatoms;
486 if (pbcMode == PbcMode::chargeGroup)
488 vsite_pbc = (*vsite->vsite_pbc_loc)[ftype - c_ftypeVsiteStart];
491 for (int i = 0; i < nr; )
494 /* The vsite and constructing atoms */
497 /* Constants for constructing vsites */
498 real a1 = ip[tp].vsite.a;
499 /* Check what kind of pbc we need to use */
502 if (pbcMode == PbcMode::all)
504 /* No charge groups, vsite follows its own pbc */
506 copy_rvec(x[avsite], xpbc);
508 else if (pbcMode == PbcMode::chargeGroup)
510 pbc_atom = vsite_pbc[i/(1 + nra)];
515 /* We need to copy the coordinates here,
516 * single for single atom cg's pbc_atom
517 * is the vsite itself.
519 copy_rvec(x[pbc_atom], xpbc);
521 pbc_null2 = pbc_null;
532 /* Copy the old position */
534 copy_rvec(x[avsite], xv);
536 /* Construct the vsite depending on type */
543 constr_vsite2(x[ai], x[aj], x[avsite], a1, pbc_null2);
549 constr_vsite3(x[ai], x[aj], x[ak], x[avsite], a1, b1, pbc_null2);
555 constr_vsite3FD(x[ai], x[aj], x[ak], x[avsite], a1, b1, pbc_null2);
561 constr_vsite3FAD(x[ai], x[aj], x[ak], x[avsite], a1, b1, pbc_null2);
568 constr_vsite3OUT(x[ai], x[aj], x[ak], x[avsite], a1, b1, c1, pbc_null2);
576 constr_vsite4FD(x[ai], x[aj], x[ak], x[al], x[avsite], a1, b1, c1,
585 constr_vsite4FDN(x[ai], x[aj], x[ak], x[al], x[avsite], a1, b1, c1,
589 inc = constr_vsiten(ia, ip, x, pbc_null2);
592 gmx_fatal(FARGS, "No such vsite type %d in %s, line %d",
593 ftype, __FILE__, __LINE__);
598 /* Match the pbc of this vsite to the rest of its charge group */
600 int ishift = pbc_dx_aiuc(pbc_null, x[avsite], xpbc, dx);
601 if (ishift != CENTRAL)
603 rvec_add(xpbc, dx, x[avsite]);
608 /* Calculate velocity of vsite... */
610 rvec_sub(x[avsite], xv, vv);
611 svmul(inv_dt, vv, v[avsite]);
614 /* Increment loop variables */
622 void construct_vsites(const gmx_vsite_t *vsite,
625 const t_iparams ip[], const t_ilist ilist[],
626 int ePBC, gmx_bool bMolPBC,
630 const bool useDomdec = (vsite != nullptr && vsite->useDomdec);
631 GMX_ASSERT(!useDomdec || (cr != nullptr && DOMAINDECOMP(cr)), "When vsites are set up with domain decomposition, we need a valid commrec");
632 // TODO: Remove this assertion when we remove charge groups
633 GMX_ASSERT(vsite != nullptr || ePBC == epbcNONE, "Without a vsite struct we can not do PBC (in case we have charge groups)");
635 t_pbc pbc, *pbc_null;
637 /* We only need to do pbc when we have inter-cg vsites.
638 * Note that with domain decomposition we do not need to apply PBC here
639 * when we have at least 3 domains along each dimension. Currently we
640 * do not optimize this case.
642 if (ePBC != epbcNONE && (useDomdec || bMolPBC) &&
643 !(vsite != nullptr && vsite->n_intercg_vsite == 0))
645 /* This is wasting some CPU time as we now do this multiple times
649 clear_ivec(null_ivec);
650 pbc_null = set_pbc_dd(&pbc, ePBC,
651 useDomdec ? cr->dd->nc : null_ivec,
661 dd_move_x_vsites(cr->dd, box, x);
664 if (vsite == nullptr || vsite->nthreads == 1)
666 construct_vsites_thread(vsite,
673 #pragma omp parallel num_threads(vsite->nthreads)
677 const int th = gmx_omp_get_thread_num();
678 const VsiteThread &tData = *vsite->tData[th];
679 GMX_ASSERT(tData.rangeStart >= 0, "The thread data should be initialized before calling construct_vsites");
681 construct_vsites_thread(vsite,
685 if (tData.useInterdependentTask)
687 /* Here we don't need a barrier (unlike the spreading),
688 * since both tasks only construct vsites from particles,
689 * or local vsites, not from non-local vsites.
691 construct_vsites_thread(vsite,
693 ip, tData.idTask.ilist,
697 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
699 /* Now we can construct the vsites that might depend on other vsites */
700 construct_vsites_thread(vsite,
702 ip, vsite->tData[vsite->nthreads]->ilist,
707 static void spread_vsite2(const t_iatom ia[], real a,
709 rvec f[], rvec fshift[],
710 const t_pbc *pbc, const t_graph *g)
721 svmul(1 - a, f[av], fi);
722 svmul( a, f[av], fj);
731 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, av), di);
733 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), di);
738 siv = pbc_dx_aiuc(pbc, x[ai], x[av], dx);
739 sij = pbc_dx_aiuc(pbc, x[ai], x[aj], dx);
747 if (fshift && (siv != CENTRAL || sij != CENTRAL))
749 rvec_inc(fshift[siv], f[av]);
750 rvec_dec(fshift[CENTRAL], fi);
751 rvec_dec(fshift[sij], fj);
754 /* TOTAL: 13 flops */
757 void constructVsitesGlobal(const gmx_mtop_t &mtop,
758 gmx::ArrayRef<gmx::RVec> x)
760 GMX_ASSERT(x.size() >= static_cast<gmx::index>(mtop.natoms), "x should contain the whole system");
761 GMX_ASSERT(!mtop.moleculeBlockIndices.empty(), "molblock indices are needed in constructVsitesGlobal");
763 for (size_t mb = 0; mb < mtop.molblock.size(); mb++)
765 const gmx_molblock_t &molb = mtop.molblock[mb];
766 const gmx_moltype_t &molt = mtop.moltype[molb.type];
767 if (vsiteIlistNrCount(molt.ilist) > 0)
769 int atomOffset = mtop.moleculeBlockIndices[mb].globalAtomStart;
770 for (int mol = 0; mol < molb.nmol; mol++)
772 construct_vsites(nullptr, as_rvec_array(x.data()) + atomOffset,
774 mtop.ffparams.iparams, molt.ilist,
775 epbcNONE, TRUE, nullptr, nullptr);
776 atomOffset += molt.atoms.nr;
782 static void spread_vsite3(const t_iatom ia[], real a, real b,
784 rvec f[], rvec fshift[],
785 const t_pbc *pbc, const t_graph *g)
797 svmul(1 - a - b, f[av], fi);
798 svmul( a, f[av], fj);
799 svmul( b, f[av], fk);
809 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, ia[1]), di);
811 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), di);
813 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, ak), di);
818 siv = pbc_dx_aiuc(pbc, x[ai], x[av], dx);
819 sij = pbc_dx_aiuc(pbc, x[ai], x[aj], dx);
820 sik = pbc_dx_aiuc(pbc, x[ai], x[ak], dx);
829 if (fshift && (siv != CENTRAL || sij != CENTRAL || sik != CENTRAL))
831 rvec_inc(fshift[siv], f[av]);
832 rvec_dec(fshift[CENTRAL], fi);
833 rvec_dec(fshift[sij], fj);
834 rvec_dec(fshift[sik], fk);
837 /* TOTAL: 20 flops */
840 static void spread_vsite3FD(const t_iatom ia[], real a, real b,
842 rvec f[], rvec fshift[],
843 gmx_bool VirCorr, matrix dxdf,
844 const t_pbc *pbc, const t_graph *g)
846 real c, invl, fproj, a1;
847 rvec xvi, xij, xjk, xix, fv, temp;
848 t_iatom av, ai, aj, ak;
856 copy_rvec(f[av], fv);
858 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
859 skj = pbc_rvec_sub(pbc, x[ak], x[aj], xjk);
862 /* xix goes from i to point x on the line jk */
863 xix[XX] = xij[XX]+a*xjk[XX];
864 xix[YY] = xij[YY]+a*xjk[YY];
865 xix[ZZ] = xij[ZZ]+a*xjk[ZZ];
868 invl = gmx::invsqrt(iprod(xix, xix));
872 fproj = iprod(xix, fv)*invl*invl; /* = (xix . f)/(xix . xix) */
874 temp[XX] = c*(fv[XX]-fproj*xix[XX]);
875 temp[YY] = c*(fv[YY]-fproj*xix[YY]);
876 temp[ZZ] = c*(fv[ZZ]-fproj*xix[ZZ]);
879 /* c is already calculated in constr_vsite3FD
880 storing c somewhere will save 26 flops! */
883 f[ai][XX] += fv[XX] - temp[XX];
884 f[ai][YY] += fv[YY] - temp[YY];
885 f[ai][ZZ] += fv[ZZ] - temp[ZZ];
886 f[aj][XX] += a1*temp[XX];
887 f[aj][YY] += a1*temp[YY];
888 f[aj][ZZ] += a1*temp[ZZ];
889 f[ak][XX] += a*temp[XX];
890 f[ak][YY] += a*temp[YY];
891 f[ak][ZZ] += a*temp[ZZ];
896 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
898 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
900 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, aj), di);
905 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
912 if (fshift && (svi != CENTRAL || sji != CENTRAL || skj != CENTRAL))
914 rvec_dec(fshift[svi], fv);
915 fshift[CENTRAL][XX] += fv[XX] - (1 + a)*temp[XX];
916 fshift[CENTRAL][YY] += fv[YY] - (1 + a)*temp[YY];
917 fshift[CENTRAL][ZZ] += fv[ZZ] - (1 + a)*temp[ZZ];
918 fshift[ sji][XX] += temp[XX];
919 fshift[ sji][YY] += temp[YY];
920 fshift[ sji][ZZ] += temp[ZZ];
921 fshift[ skj][XX] += a*temp[XX];
922 fshift[ skj][YY] += a*temp[YY];
923 fshift[ skj][ZZ] += a*temp[ZZ];
928 /* When VirCorr=TRUE, the virial for the current forces is not
929 * calculated from the redistributed forces. This means that
930 * the effect of non-linear virtual site constructions on the virial
931 * needs to be added separately. This contribution can be calculated
932 * in many ways, but the simplest and cheapest way is to use
933 * the first constructing atom ai as a reference position in space:
934 * subtract (xv-xi)*fv and add (xj-xi)*fj + (xk-xi)*fk.
938 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
940 for (int i = 0; i < DIM; i++)
942 for (int j = 0; j < DIM; j++)
944 /* As xix is a linear combination of j and k, use that here */
945 dxdf[i][j] += -xiv[i]*fv[j] + xix[i]*temp[j];
950 /* TOTAL: 61 flops */
953 static void spread_vsite3FAD(const t_iatom ia[], real a, real b,
955 rvec f[], rvec fshift[],
956 gmx_bool VirCorr, matrix dxdf,
957 const t_pbc *pbc, const t_graph *g)
959 rvec xvi, xij, xjk, xperp, Fpij, Fppp, fv, f1, f2, f3;
960 real a1, b1, c1, c2, invdij, invdij2, invdp, fproj;
961 t_iatom av, ai, aj, ak;
962 int svi, sji, skj, d;
969 copy_rvec(f[ia[1]], fv);
971 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
972 skj = pbc_rvec_sub(pbc, x[ak], x[aj], xjk);
975 invdij = gmx::invsqrt(iprod(xij, xij));
976 invdij2 = invdij * invdij;
977 c1 = iprod(xij, xjk) * invdij2;
978 xperp[XX] = xjk[XX] - c1*xij[XX];
979 xperp[YY] = xjk[YY] - c1*xij[YY];
980 xperp[ZZ] = xjk[ZZ] - c1*xij[ZZ];
981 /* xperp in plane ijk, perp. to ij */
982 invdp = gmx::invsqrt(iprod(xperp, xperp));
987 /* a1, b1 and c1 are already calculated in constr_vsite3FAD
988 storing them somewhere will save 45 flops! */
990 fproj = iprod(xij, fv)*invdij2;
991 svmul(fproj, xij, Fpij); /* proj. f on xij */
992 svmul(iprod(xperp, fv)*invdp*invdp, xperp, Fppp); /* proj. f on xperp */
993 svmul(b1*fproj, xperp, f3);
996 rvec_sub(fv, Fpij, f1); /* f1 = f - Fpij */
997 rvec_sub(f1, Fppp, f2); /* f2 = f - Fpij - Fppp */
998 for (d = 0; (d < DIM); d++)
1006 f[ai][XX] += fv[XX] - f1[XX] + c1*f2[XX] + f3[XX];
1007 f[ai][YY] += fv[YY] - f1[YY] + c1*f2[YY] + f3[YY];
1008 f[ai][ZZ] += fv[ZZ] - f1[ZZ] + c1*f2[ZZ] + f3[ZZ];
1009 f[aj][XX] += f1[XX] - c2*f2[XX] - f3[XX];
1010 f[aj][YY] += f1[YY] - c2*f2[YY] - f3[YY];
1011 f[aj][ZZ] += f1[ZZ] - c2*f2[ZZ] - f3[ZZ];
1012 f[ak][XX] += f2[XX];
1013 f[ak][YY] += f2[YY];
1014 f[ak][ZZ] += f2[ZZ];
1019 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
1021 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
1023 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, aj), di);
1028 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1035 if (fshift && (svi != CENTRAL || sji != CENTRAL || skj != CENTRAL))
1037 rvec_dec(fshift[svi], fv);
1038 fshift[CENTRAL][XX] += fv[XX] - f1[XX] - (1-c1)*f2[XX] + f3[XX];
1039 fshift[CENTRAL][YY] += fv[YY] - f1[YY] - (1-c1)*f2[YY] + f3[YY];
1040 fshift[CENTRAL][ZZ] += fv[ZZ] - f1[ZZ] - (1-c1)*f2[ZZ] + f3[ZZ];
1041 fshift[ sji][XX] += f1[XX] - c1 *f2[XX] - f3[XX];
1042 fshift[ sji][YY] += f1[YY] - c1 *f2[YY] - f3[YY];
1043 fshift[ sji][ZZ] += f1[ZZ] - c1 *f2[ZZ] - f3[ZZ];
1044 fshift[ skj][XX] += f2[XX];
1045 fshift[ skj][YY] += f2[YY];
1046 fshift[ skj][ZZ] += f2[ZZ];
1054 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1056 for (i = 0; i < DIM; i++)
1058 for (j = 0; j < DIM; j++)
1060 /* Note that xik=xij+xjk, so we have to add xij*f2 */
1063 + xij[i]*(f1[j] + (1 - c2)*f2[j] - f3[j])
1069 /* TOTAL: 113 flops */
1072 static void spread_vsite3OUT(const t_iatom ia[], real a, real b, real c,
1074 rvec f[], rvec fshift[],
1075 gmx_bool VirCorr, matrix dxdf,
1076 const t_pbc *pbc, const t_graph *g)
1078 rvec xvi, xij, xik, fv, fj, fk;
1089 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1090 ski = pbc_rvec_sub(pbc, x[ak], x[ai], xik);
1093 copy_rvec(f[av], fv);
1100 fj[XX] = a*fv[XX] - xik[ZZ]*cfy + xik[YY]*cfz;
1101 fj[YY] = xik[ZZ]*cfx + a*fv[YY] - xik[XX]*cfz;
1102 fj[ZZ] = -xik[YY]*cfx + xik[XX]*cfy + a*fv[ZZ];
1104 fk[XX] = b*fv[XX] + xij[ZZ]*cfy - xij[YY]*cfz;
1105 fk[YY] = -xij[ZZ]*cfx + b*fv[YY] + xij[XX]*cfz;
1106 fk[ZZ] = xij[YY]*cfx - xij[XX]*cfy + b*fv[ZZ];
1109 f[ai][XX] += fv[XX] - fj[XX] - fk[XX];
1110 f[ai][YY] += fv[YY] - fj[YY] - fk[YY];
1111 f[ai][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ];
1112 rvec_inc(f[aj], fj);
1113 rvec_inc(f[ak], fk);
1118 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
1120 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
1122 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, ai), di);
1127 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1134 if (fshift && (svi != CENTRAL || sji != CENTRAL || ski != CENTRAL))
1136 rvec_dec(fshift[svi], fv);
1137 fshift[CENTRAL][XX] += fv[XX] - fj[XX] - fk[XX];
1138 fshift[CENTRAL][YY] += fv[YY] - fj[YY] - fk[YY];
1139 fshift[CENTRAL][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ];
1140 rvec_inc(fshift[sji], fj);
1141 rvec_inc(fshift[ski], fk);
1148 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1150 for (int i = 0; i < DIM; i++)
1152 for (int j = 0; j < DIM; j++)
1154 dxdf[i][j] += -xiv[i]*fv[j] + xij[i]*fj[j] + xik[i]*fk[j];
1159 /* TOTAL: 54 flops */
1162 static void spread_vsite4FD(const t_iatom ia[], real a, real b, real c,
1164 rvec f[], rvec fshift[],
1165 gmx_bool VirCorr, matrix dxdf,
1166 const t_pbc *pbc, const t_graph *g)
1168 real d, invl, fproj, a1;
1169 rvec xvi, xij, xjk, xjl, xix, fv, temp;
1170 int av, ai, aj, ak, al;
1172 int svi, sji, skj, slj, m;
1180 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1181 skj = pbc_rvec_sub(pbc, x[ak], x[aj], xjk);
1182 slj = pbc_rvec_sub(pbc, x[al], x[aj], xjl);
1185 /* xix goes from i to point x on the plane jkl */
1186 for (m = 0; m < DIM; m++)
1188 xix[m] = xij[m] + a*xjk[m] + b*xjl[m];
1192 invl = gmx::invsqrt(iprod(xix, xix));
1194 /* 4 + ?10? flops */
1196 copy_rvec(f[av], fv);
1198 fproj = iprod(xix, fv)*invl*invl; /* = (xix . f)/(xix . xix) */
1200 for (m = 0; m < DIM; m++)
1202 temp[m] = d*(fv[m] - fproj*xix[m]);
1206 /* c is already calculated in constr_vsite3FD
1207 storing c somewhere will save 35 flops! */
1210 for (m = 0; m < DIM; m++)
1212 f[ai][m] += fv[m] - temp[m];
1213 f[aj][m] += a1*temp[m];
1214 f[ak][m] += a*temp[m];
1215 f[al][m] += b*temp[m];
1221 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
1223 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
1225 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, aj), di);
1227 ivec_sub(SHIFT_IVEC(g, al), SHIFT_IVEC(g, aj), di);
1232 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1240 (svi != CENTRAL || sji != CENTRAL || skj != CENTRAL || slj != CENTRAL))
1242 rvec_dec(fshift[svi], fv);
1243 for (m = 0; m < DIM; m++)
1245 fshift[CENTRAL][m] += fv[m] - (1 + a + b)*temp[m];
1246 fshift[ sji][m] += temp[m];
1247 fshift[ skj][m] += a*temp[m];
1248 fshift[ slj][m] += b*temp[m];
1257 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1259 for (i = 0; i < DIM; i++)
1261 for (j = 0; j < DIM; j++)
1263 dxdf[i][j] += -xiv[i]*fv[j] + xix[i]*temp[j];
1268 /* TOTAL: 77 flops */
1272 static void spread_vsite4FDN(const t_iatom ia[], real a, real b, real c,
1274 rvec f[], rvec fshift[],
1275 gmx_bool VirCorr, matrix dxdf,
1276 const t_pbc *pbc, const t_graph *g)
1278 rvec xvi, xij, xik, xil, ra, rb, rja, rjb, rab, rm, rt;
1279 rvec fv, fj, fk, fl;
1283 int av, ai, aj, ak, al;
1284 int svi, sij, sik, sil;
1286 /* DEBUG: check atom indices */
1293 copy_rvec(f[av], fv);
1295 sij = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1296 sik = pbc_rvec_sub(pbc, x[ak], x[ai], xik);
1297 sil = pbc_rvec_sub(pbc, x[al], x[ai], xil);
1310 rvec_sub(ra, xij, rja);
1311 rvec_sub(rb, xij, rjb);
1312 rvec_sub(rb, ra, rab);
1315 cprod(rja, rjb, rm);
1318 invrm = gmx::invsqrt(norm2(rm));
1319 denom = invrm*invrm;
1322 cfx = c*invrm*fv[XX];
1323 cfy = c*invrm*fv[YY];
1324 cfz = c*invrm*fv[ZZ];
1335 fj[XX] = ( -rm[XX]*rt[XX]) * cfx + ( rab[ZZ]-rm[YY]*rt[XX]) * cfy + (-rab[YY]-rm[ZZ]*rt[XX]) * cfz;
1336 fj[YY] = (-rab[ZZ]-rm[XX]*rt[YY]) * cfx + ( -rm[YY]*rt[YY]) * cfy + ( rab[XX]-rm[ZZ]*rt[YY]) * cfz;
1337 fj[ZZ] = ( rab[YY]-rm[XX]*rt[ZZ]) * cfx + (-rab[XX]-rm[YY]*rt[ZZ]) * cfy + ( -rm[ZZ]*rt[ZZ]) * cfz;
1348 fk[XX] = ( -rm[XX]*rt[XX]) * cfx + (-a*rjb[ZZ]-rm[YY]*rt[XX]) * cfy + ( a*rjb[YY]-rm[ZZ]*rt[XX]) * cfz;
1349 fk[YY] = ( a*rjb[ZZ]-rm[XX]*rt[YY]) * cfx + ( -rm[YY]*rt[YY]) * cfy + (-a*rjb[XX]-rm[ZZ]*rt[YY]) * cfz;
1350 fk[ZZ] = (-a*rjb[YY]-rm[XX]*rt[ZZ]) * cfx + ( a*rjb[XX]-rm[YY]*rt[ZZ]) * cfy + ( -rm[ZZ]*rt[ZZ]) * cfz;
1361 fl[XX] = ( -rm[XX]*rt[XX]) * cfx + ( b*rja[ZZ]-rm[YY]*rt[XX]) * cfy + (-b*rja[YY]-rm[ZZ]*rt[XX]) * cfz;
1362 fl[YY] = (-b*rja[ZZ]-rm[XX]*rt[YY]) * cfx + ( -rm[YY]*rt[YY]) * cfy + ( b*rja[XX]-rm[ZZ]*rt[YY]) * cfz;
1363 fl[ZZ] = ( b*rja[YY]-rm[XX]*rt[ZZ]) * cfx + (-b*rja[XX]-rm[YY]*rt[ZZ]) * cfy + ( -rm[ZZ]*rt[ZZ]) * cfz;
1366 f[ai][XX] += fv[XX] - fj[XX] - fk[XX] - fl[XX];
1367 f[ai][YY] += fv[YY] - fj[YY] - fk[YY] - fl[YY];
1368 f[ai][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ] - fl[ZZ];
1369 rvec_inc(f[aj], fj);
1370 rvec_inc(f[ak], fk);
1371 rvec_inc(f[al], fl);
1376 ivec_sub(SHIFT_IVEC(g, av), SHIFT_IVEC(g, ai), di);
1378 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
1380 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, ai), di);
1382 ivec_sub(SHIFT_IVEC(g, al), SHIFT_IVEC(g, ai), di);
1387 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1394 if (fshift && (svi != CENTRAL || sij != CENTRAL || sik != CENTRAL || sil != CENTRAL))
1396 rvec_dec(fshift[svi], fv);
1397 fshift[CENTRAL][XX] += fv[XX] - fj[XX] - fk[XX] - fl[XX];
1398 fshift[CENTRAL][YY] += fv[YY] - fj[YY] - fk[YY] - fl[YY];
1399 fshift[CENTRAL][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ] - fl[ZZ];
1400 rvec_inc(fshift[sij], fj);
1401 rvec_inc(fshift[sik], fk);
1402 rvec_inc(fshift[sil], fl);
1410 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1412 for (i = 0; i < DIM; i++)
1414 for (j = 0; j < DIM; j++)
1416 dxdf[i][j] += -xiv[i]*fv[j] + xij[i]*fj[j] + xik[i]*fk[j] + xil[i]*fl[j];
1421 /* Total: 207 flops (Yuck!) */
1425 static int spread_vsiten(const t_iatom ia[], const t_iparams ip[],
1427 rvec f[], rvec fshift[],
1428 const t_pbc *pbc, const t_graph *g)
1436 n3 = 3*ip[ia[0]].vsiten.n;
1438 copy_rvec(x[av], xv);
1440 for (i = 0; i < n3; i += 3)
1445 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, av), di);
1450 siv = pbc_dx_aiuc(pbc, x[ai], xv, dx);
1456 a = ip[ia[i]].vsiten.a;
1457 svmul(a, f[av], fi);
1458 rvec_inc(f[ai], fi);
1459 if (fshift && siv != CENTRAL)
1461 rvec_inc(fshift[siv], fi);
1462 rvec_dec(fshift[CENTRAL], fi);
1471 static int vsite_count(const t_ilist *ilist, int ftype)
1473 if (ftype == F_VSITEN)
1475 return ilist[ftype].nr/3;
1479 return ilist[ftype].nr/(1 + interaction_function[ftype].nratoms);
1483 static void spread_vsite_f_thread(const gmx_vsite_t *vsite,
1485 rvec f[], rvec *fshift,
1486 gmx_bool VirCorr, matrix dxdf,
1487 t_iparams ip[], const t_ilist ilist[],
1488 const t_graph *g, const t_pbc *pbc_null)
1490 const PbcMode pbcMode = getPbcMode(pbc_null, vsite);
1491 /* We need another pbc pointer, as with charge groups we switch per vsite */
1492 const t_pbc *pbc_null2 = pbc_null;
1493 gmx::ArrayRef<const int> vsite_pbc;
1495 /* this loop goes backwards to be able to build *
1496 * higher type vsites from lower types */
1497 for (int ftype = c_ftypeVsiteEnd - 1; ftype >= c_ftypeVsiteStart; ftype--)
1499 if (ilist[ftype].nr == 0)
1505 int nra = interaction_function[ftype].nratoms;
1507 int nr = ilist[ftype].nr;
1509 const t_iatom *ia = ilist[ftype].iatoms;
1511 if (pbcMode == PbcMode::all)
1513 pbc_null2 = pbc_null;
1515 else if (pbcMode == PbcMode::chargeGroup)
1517 if (vsite->vsite_pbc_loc)
1519 vsite_pbc = (*vsite->vsite_pbc_loc)[ftype - c_ftypeVsiteStart];
1523 for (int i = 0; i < nr; )
1525 if (!vsite_pbc.empty())
1527 if (vsite_pbc[i/(1 + nra)] > -2)
1529 pbc_null2 = pbc_null;
1533 pbc_null2 = nullptr;
1539 /* Constants for constructing */
1541 a1 = ip[tp].vsite.a;
1542 /* Construct the vsite depending on type */
1546 spread_vsite2(ia, a1, x, f, fshift, pbc_null2, g);
1549 b1 = ip[tp].vsite.b;
1550 spread_vsite3(ia, a1, b1, x, f, fshift, pbc_null2, g);
1553 b1 = ip[tp].vsite.b;
1554 spread_vsite3FD(ia, a1, b1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1557 b1 = ip[tp].vsite.b;
1558 spread_vsite3FAD(ia, a1, b1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1561 b1 = ip[tp].vsite.b;
1562 c1 = ip[tp].vsite.c;
1563 spread_vsite3OUT(ia, a1, b1, c1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1566 b1 = ip[tp].vsite.b;
1567 c1 = ip[tp].vsite.c;
1568 spread_vsite4FD(ia, a1, b1, c1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1571 b1 = ip[tp].vsite.b;
1572 c1 = ip[tp].vsite.c;
1573 spread_vsite4FDN(ia, a1, b1, c1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1576 inc = spread_vsiten(ia, ip, x, f, fshift, pbc_null2, g);
1579 gmx_fatal(FARGS, "No such vsite type %d in %s, line %d",
1580 ftype, __FILE__, __LINE__);
1582 clear_rvec(f[ia[1]]);
1584 /* Increment loop variables */
1592 /*! \brief Clears the task force buffer elements that are written by task idTask */
1593 static void clearTaskForceBufferUsedElements(InterdependentTask *idTask)
1595 int ntask = idTask->spreadTask.size();
1596 for (int ti = 0; ti < ntask; ti++)
1598 const AtomIndex *atomList = &idTask->atomIndex[idTask->spreadTask[ti]];
1599 int natom = atomList->atom.size();
1600 RVec *force = idTask->force.data();
1601 for (int i = 0; i < natom; i++)
1603 clear_rvec(force[atomList->atom[i]]);
1608 void spread_vsite_f(const gmx_vsite_t *vsite,
1609 const rvec * gmx_restrict x,
1610 rvec * gmx_restrict f, rvec * gmx_restrict fshift,
1611 gmx_bool VirCorr, matrix vir,
1612 t_nrnb *nrnb, const t_idef *idef,
1613 int ePBC, gmx_bool bMolPBC, const t_graph *g, const matrix box,
1614 const t_commrec *cr, gmx_wallcycle *wcycle)
1616 wallcycle_start(wcycle, ewcVSITESPREAD);
1617 const bool useDomdec = vsite->useDomdec;
1618 GMX_ASSERT(!useDomdec || (cr != nullptr && DOMAINDECOMP(cr)), "When vsites are set up with domain decomposition, we need a valid commrec");
1620 t_pbc pbc, *pbc_null;
1622 /* We only need to do pbc when we have inter-cg vsites */
1623 if ((useDomdec || bMolPBC) && vsite->n_intercg_vsite)
1625 /* This is wasting some CPU time as we now do this multiple times
1628 pbc_null = set_pbc_dd(&pbc, ePBC, useDomdec ? cr->dd->nc : nullptr, FALSE, box);
1637 dd_clear_f_vsites(cr->dd, f);
1640 if (vsite->nthreads == 1)
1647 spread_vsite_f_thread(vsite,
1650 idef->iparams, idef->il,
1655 for (int i = 0; i < DIM; i++)
1657 for (int j = 0; j < DIM; j++)
1659 vir[i][j] += -0.5*dxdf[i][j];
1666 /* First spread the vsites that might depend on non-local vsites */
1669 clear_mat(vsite->tData[vsite->nthreads]->dxdf);
1671 spread_vsite_f_thread(vsite,
1673 VirCorr, vsite->tData[vsite->nthreads]->dxdf,
1675 vsite->tData[vsite->nthreads]->ilist,
1678 #pragma omp parallel num_threads(vsite->nthreads)
1682 int thread = gmx_omp_get_thread_num();
1683 VsiteThread &tData = *vsite->tData[thread];
1686 if (thread == 0 || fshift == nullptr)
1692 fshift_t = tData.fshift;
1694 for (int i = 0; i < SHIFTS; i++)
1696 clear_rvec(fshift_t[i]);
1701 clear_mat(tData.dxdf);
1704 if (tData.useInterdependentTask)
1706 /* Spread the vsites that spread outside our local range.
1707 * This is done using a thread-local force buffer force.
1708 * First we need to copy the input vsite forces to force.
1710 InterdependentTask *idTask = &tData.idTask;
1712 /* Clear the buffer elements set by our task during
1713 * the last call to spread_vsite_f.
1715 clearTaskForceBufferUsedElements(idTask);
1717 int nvsite = idTask->vsite.size();
1718 for (int i = 0; i < nvsite; i++)
1720 copy_rvec(f[idTask->vsite[i]],
1721 idTask->force[idTask->vsite[i]]);
1723 spread_vsite_f_thread(vsite,
1724 x, as_rvec_array(idTask->force.data()), fshift_t,
1725 VirCorr, tData.dxdf,
1730 /* We need a barrier before reducing forces below
1731 * that have been produced by a different thread above.
1735 /* Loop over all thread task and reduce forces they
1736 * produced on atoms that fall in our range.
1737 * Note that atomic reduction would be a simpler solution,
1738 * but that might not have good support on all platforms.
1740 int ntask = idTask->reduceTask.size();
1741 for (int ti = 0; ti < ntask; ti++)
1743 const InterdependentTask *idt_foreign = &vsite->tData[idTask->reduceTask[ti]]->idTask;
1744 const AtomIndex *atomList = &idt_foreign->atomIndex[thread];
1745 const RVec *f_foreign = idt_foreign->force.data();
1747 int natom = atomList->atom.size();
1748 for (int i = 0; i < natom; i++)
1750 int ind = atomList->atom[i];
1751 rvec_inc(f[ind], f_foreign[ind]);
1752 /* Clearing of f_foreign is done at the next step */
1755 /* Clear the vsite forces, both in f and force */
1756 for (int i = 0; i < nvsite; i++)
1758 int ind = tData.idTask.vsite[i];
1760 clear_rvec(tData.idTask.force[ind]);
1764 /* Spread the vsites that spread locally only */
1765 spread_vsite_f_thread(vsite,
1767 VirCorr, tData.dxdf,
1772 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1775 if (fshift != nullptr)
1777 for (int th = 1; th < vsite->nthreads; th++)
1779 for (int i = 0; i < SHIFTS; i++)
1781 rvec_inc(fshift[i], vsite->tData[th]->fshift[i]);
1788 for (int th = 0; th < vsite->nthreads + 1; th++)
1790 /* MSVC doesn't like matrix references, so we use a pointer */
1791 const matrix *dxdf = &vsite->tData[th]->dxdf;
1793 for (int i = 0; i < DIM; i++)
1795 for (int j = 0; j < DIM; j++)
1797 vir[i][j] += -0.5*(*dxdf)[i][j];
1806 dd_move_f_vsites(cr->dd, f, fshift);
1809 inc_nrnb(nrnb, eNR_VSITE2, vsite_count(idef->il, F_VSITE2));
1810 inc_nrnb(nrnb, eNR_VSITE3, vsite_count(idef->il, F_VSITE3));
1811 inc_nrnb(nrnb, eNR_VSITE3FD, vsite_count(idef->il, F_VSITE3FD));
1812 inc_nrnb(nrnb, eNR_VSITE3FAD, vsite_count(idef->il, F_VSITE3FAD));
1813 inc_nrnb(nrnb, eNR_VSITE3OUT, vsite_count(idef->il, F_VSITE3OUT));
1814 inc_nrnb(nrnb, eNR_VSITE4FD, vsite_count(idef->il, F_VSITE4FD));
1815 inc_nrnb(nrnb, eNR_VSITE4FDN, vsite_count(idef->il, F_VSITE4FDN));
1816 inc_nrnb(nrnb, eNR_VSITEN, vsite_count(idef->il, F_VSITEN));
1818 wallcycle_stop(wcycle, ewcVSITESPREAD);
1821 /*! \brief Returns the an array with charge-group indices for each atom
1823 * \param[in] chargeGroups The charge group block struct
1825 static std::vector<int> atom2cg(const t_block &chargeGroups)
1827 std::vector<int> a2cg(chargeGroups.index[chargeGroups.nr], 0);
1829 for (int chargeGroup = 0; chargeGroup < chargeGroups.nr; chargeGroup++)
1831 std::fill(a2cg.begin() + chargeGroups.index[chargeGroup],
1832 a2cg.begin() + chargeGroups.index[chargeGroup + 1],
1839 int count_intercg_vsites(const gmx_mtop_t *mtop)
1841 int n_intercg_vsite = 0;
1842 for (const gmx_molblock_t &molb : mtop->molblock)
1844 const gmx_moltype_t &molt = mtop->moltype[molb.type];
1846 std::vector<int> a2cg = atom2cg(molt.cgs);
1847 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
1849 int nral = NRAL(ftype);
1850 const t_ilist &il = molt.ilist[ftype];
1851 const t_iatom *ia = il.iatoms;
1852 for (int i = 0; i < il.nr; i += 1 + nral)
1854 int cg = a2cg[ia[1+i]];
1855 for (int a = 1; a < nral; a++)
1857 if (a2cg[ia[1+a]] != cg)
1859 n_intercg_vsite += molb.nmol;
1867 return n_intercg_vsite;
1871 get_vsite_pbc(const t_iparams *iparams, const t_ilist *ilist,
1872 const t_atom *atom, const t_mdatoms *md,
1875 /* Make an atom to charge group index */
1876 std::vector<int> a2cg = atom2cg(cgs);
1878 /* Make an array that tells if the pbc of an atom is set */
1879 std::vector<bool> pbc_set(cgs.index[cgs.nr], false);
1880 /* PBC is set for all non vsites */
1881 for (int a = 0; a < cgs.index[cgs.nr]; a++)
1883 if ((atom && atom[a].ptype != eptVSite) ||
1884 (md && md->ptype[a] != eptVSite))
1892 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
1895 int nral = NRAL(ftype);
1896 const t_ilist *il = &ilist[ftype];
1897 const t_iatom *ia = il->iatoms;
1899 std::vector<int> &vsite_pbc_f = vsite_pbc[ftype - F_VSITE2];
1900 vsite_pbc_f.resize(il->nr/(1 + nral));
1905 int vsi = i/(1 + nral);
1906 t_iatom vsite = ia[i+1];
1907 int cg_v = a2cg[vsite];
1908 /* A value of -2 signals that this vsite and its contructing
1909 * atoms are all within the same cg, so no pbc is required.
1911 vsite_pbc_f[vsi] = -2;
1912 /* Check if constructing atoms are outside the vsite's cg */
1914 if (ftype == F_VSITEN)
1916 nc3 = 3*iparams[ia[i]].vsiten.n;
1917 for (int j = 0; j < nc3; j += 3)
1919 if (a2cg[ia[i+j+2]] != cg_v)
1921 vsite_pbc_f[vsi] = -1;
1927 for (int a = 1; a < nral; a++)
1929 if (a2cg[ia[i+1+a]] != cg_v)
1931 vsite_pbc_f[vsi] = -1;
1935 if (vsite_pbc_f[vsi] == -1)
1937 /* Check if this is the first processed atom of a vsite only cg */
1938 gmx_bool bViteOnlyCG_and_FirstAtom = TRUE;
1939 for (int a = cgs.index[cg_v]; a < cgs.index[cg_v + 1]; a++)
1941 /* Non-vsites already have pbc set, so simply check for pbc_set */
1944 bViteOnlyCG_and_FirstAtom = FALSE;
1948 if (bViteOnlyCG_and_FirstAtom)
1950 /* First processed atom of a vsite only charge group.
1951 * The pbc of the input coordinates to construct_vsites
1952 * should be preserved.
1954 vsite_pbc_f[vsi] = vsite;
1956 else if (cg_v != a2cg[ia[1+i+1]])
1958 /* This vsite has a different charge group index
1959 * than it's first constructing atom
1960 * and the charge group has more than one atom,
1961 * search for the first normal particle
1962 * or vsite that already had its pbc defined.
1963 * If nothing is found, use full pbc for this vsite.
1965 for (int a = cgs.index[cg_v]; a < cgs.index[cg_v + 1]; a++)
1967 if (a != vsite && pbc_set[a])
1969 vsite_pbc_f[vsi] = a;
1972 fprintf(debug, "vsite %d match pbc with atom %d\n",
1980 fprintf(debug, "vsite atom %d cg %d - %d pbc atom %d\n",
1981 vsite+1, cgs.index[cg_v] + 1, cgs.index[cg_v + 1],
1982 vsite_pbc_f[vsi] + 1);
1986 if (ftype == F_VSITEN)
1988 /* The other entries in vsite_pbc_f are not used for center vsites */
1996 /* This vsite now has its pbc defined */
1997 pbc_set[vsite] = true;
2006 std::unique_ptr<gmx_vsite_t>
2007 initVsite(const gmx_mtop_t &mtop,
2008 const t_commrec *cr)
2010 GMX_RELEASE_ASSERT(cr != nullptr, "We need a valid commrec");
2012 std::unique_ptr<gmx_vsite_t> vsite;
2014 /* check if there are vsites */
2016 for (int ftype = 0; ftype < F_NRE; ftype++)
2018 if (interaction_function[ftype].flags & IF_VSITE)
2020 GMX_ASSERT(ftype >= c_ftypeVsiteStart && ftype < c_ftypeVsiteEnd, "c_ftypeVsiteStart and/or c_ftypeVsiteEnd do not have correct values");
2022 nvsite += gmx_mtop_ftype_count(&mtop, ftype);
2026 GMX_ASSERT(ftype < c_ftypeVsiteStart || ftype >= c_ftypeVsiteEnd, "c_ftypeVsiteStart and/or c_ftypeVsiteEnd do not have correct values");
2035 vsite = gmx::compat::make_unique<gmx_vsite_t>();
2037 vsite->n_intercg_vsite = count_intercg_vsites(&mtop);
2039 vsite->bHaveChargeGroups = (ncg_mtop(&mtop) < mtop.natoms);
2041 vsite->useDomdec = (DOMAINDECOMP(cr) && cr->dd->nnodes > 1);
2043 /* If we don't have charge groups, the vsite follows its own pbc.
2045 * With charge groups, each vsite needs to follow the pbc of the charge
2046 * group. Thus we need to keep track of PBC. Here we assume that without
2047 * domain decomposition all molecules are whole (which will not be
2048 * the case with periodic molecules).
2050 if (vsite->bHaveChargeGroups &&
2051 vsite->n_intercg_vsite > 0 &&
2054 vsite->vsite_pbc_molt.resize(mtop.moltype.size());
2055 for (size_t mt = 0; mt < mtop.moltype.size(); mt++)
2057 const gmx_moltype_t &molt = mtop.moltype[mt];
2058 vsite->vsite_pbc_molt[mt] = get_vsite_pbc(mtop.ffparams.iparams,
2060 molt.atoms.atom, nullptr,
2064 vsite->vsite_pbc_loc = gmx::compat::make_unique<VsitePbc>();
2067 vsite->nthreads = gmx_omp_nthreads_get(emntVSITE);
2069 if (vsite->nthreads > 1)
2071 /* We need one extra thread data structure for the overlap vsites */
2072 vsite->tData.resize(vsite->nthreads + 1);
2073 #pragma omp parallel for num_threads(vsite->nthreads) schedule(static)
2074 for (int thread = 0; thread < vsite->nthreads; thread++)
2078 vsite->tData[thread] = gmx::compat::make_unique<VsiteThread>();
2080 InterdependentTask &idTask = vsite->tData[thread]->idTask;
2082 idTask.atomIndex.resize(vsite->nthreads);
2084 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2086 if (vsite->nthreads > 1)
2088 vsite->tData[vsite->nthreads] = gmx::compat::make_unique<VsiteThread>();
2095 gmx_vsite_t::gmx_vsite_t()
2099 gmx_vsite_t::~gmx_vsite_t()
2103 static inline void flagAtom(InterdependentTask *idTask, int atom,
2104 int thread, int nthread, int natperthread)
2106 if (!idTask->use[atom])
2108 idTask->use[atom] = true;
2109 thread = atom/natperthread;
2110 /* Assign all non-local atom force writes to thread 0 */
2111 if (thread >= nthread)
2115 idTask->atomIndex[thread].atom.push_back(atom);
2119 /*\brief Here we try to assign all vsites that are in our local range.
2121 * Our task local atom range is tData->rangeStart - tData->rangeEnd.
2122 * Vsites that depend only on local atoms, as indicated by taskIndex[]==thread,
2123 * are assigned to task tData->ilist. Vsites that depend on non-local atoms
2124 * but not on other vsites are assigned to task tData->id_task.ilist.
2125 * taskIndex[] is set for all vsites in our range, either to our local tasks
2126 * or to the single last task as taskIndex[]=2*nthreads.
2128 static void assignVsitesToThread(VsiteThread *tData,
2132 gmx::ArrayRef<int> taskIndex,
2133 const t_ilist *ilist,
2134 const t_iparams *ip,
2135 const unsigned short *ptype)
2137 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2139 tData->ilist[ftype].nr = 0;
2140 tData->idTask.ilist[ftype].nr = 0;
2142 int nral1 = 1 + NRAL(ftype);
2144 t_iatom *iat = ilist[ftype].iatoms;
2145 for (int i = 0; i < ilist[ftype].nr; )
2147 if (ftype == F_VSITEN)
2149 /* The 3 below is from 1+NRAL(ftype)=3 */
2150 inc = ip[iat[i]].vsiten.n*3;
2153 if (iat[1 + i] < tData->rangeStart ||
2154 iat[1 + i] >= tData->rangeEnd)
2156 /* This vsite belongs to a different thread */
2161 /* We would like to assign this vsite to task thread,
2162 * but it might depend on atoms outside the atom range of thread
2163 * or on another vsite not assigned to task thread.
2166 if (ftype != F_VSITEN)
2168 for (int j = i + 2; j < i + nral1; j++)
2170 /* Do a range check to avoid a harmless race on taskIndex */
2171 if (iat[j] < tData->rangeStart ||
2172 iat[j] >= tData->rangeEnd ||
2173 taskIndex[iat[j]] != thread)
2175 if (!tData->useInterdependentTask ||
2176 ptype[iat[j]] == eptVSite)
2178 /* At least one constructing atom is a vsite
2179 * that is not assigned to the same thread.
2180 * Put this vsite into a separate task.
2186 /* There are constructing atoms outside our range,
2187 * put this vsite into a second task to be executed
2188 * on the same thread. During construction no barrier
2189 * is needed between the two tasks on the same thread.
2190 * During spreading we need to run this task with
2191 * an additional thread-local intermediate force buffer
2192 * (or atomic reduction) and a barrier between the two
2195 task = nthread + thread;
2201 for (int j = i + 2; j < i + inc; j += 3)
2203 /* Do a range check to avoid a harmless race on taskIndex */
2204 if (iat[j] < tData->rangeStart ||
2205 iat[j] >= tData->rangeEnd ||
2206 taskIndex[iat[j]] != thread)
2208 GMX_ASSERT(ptype[iat[j]] != eptVSite, "A vsite to be assigned in assignVsitesToThread has a vsite as a constructing atom that does not belong to our task, such vsites should be assigned to the single 'master' task");
2210 task = nthread + thread;
2215 /* Update this vsite's thread index entry */
2216 taskIndex[iat[1+i]] = task;
2218 if (task == thread || task == nthread + thread)
2220 /* Copy this vsite to the thread data struct of thread */
2224 il_task = &tData->ilist[ftype];
2228 il_task = &tData->idTask.ilist[ftype];
2230 /* Ensure we have sufficient memory allocated */
2231 if (il_task->nr + inc > il_task->nalloc)
2233 il_task->nalloc = over_alloc_large(il_task->nr + inc);
2234 srenew(il_task->iatoms, il_task->nalloc);
2236 /* Copy the vsite data to the thread-task local array */
2237 for (int j = i; j < i + inc; j++)
2239 il_task->iatoms[il_task->nr++] = iat[j];
2241 if (task == nthread + thread)
2243 /* This vsite write outside our own task force block.
2244 * Put it into the interdependent task list and flag
2245 * the atoms involved for reduction.
2247 tData->idTask.vsite.push_back(iat[i + 1]);
2248 if (ftype != F_VSITEN)
2250 for (int j = i + 2; j < i + nral1; j++)
2252 flagAtom(&tData->idTask, iat[j],
2253 thread, nthread, natperthread);
2258 for (int j = i + 2; j < i + inc; j += 3)
2260 flagAtom(&tData->idTask, iat[j],
2261 thread, nthread, natperthread);
2272 /*! \brief Assign all vsites with taskIndex[]==task to task tData */
2273 static void assignVsitesToSingleTask(VsiteThread *tData,
2275 gmx::ArrayRef<const int> taskIndex,
2276 const t_ilist *ilist,
2277 const t_iparams *ip)
2279 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2281 tData->ilist[ftype].nr = 0;
2282 tData->idTask.ilist[ftype].nr = 0;
2284 int nral1 = 1 + NRAL(ftype);
2286 t_iatom *iat = ilist[ftype].iatoms;
2287 t_ilist *il_task = &tData->ilist[ftype];
2289 for (int i = 0; i < ilist[ftype].nr; )
2291 if (ftype == F_VSITEN)
2293 /* The 3 below is from 1+NRAL(ftype)=3 */
2294 inc = ip[iat[i]].vsiten.n*3;
2296 /* Check if the vsite is assigned to our task */
2297 if (taskIndex[iat[1 + i]] == task)
2299 /* Ensure we have sufficient memory allocated */
2300 if (il_task->nr + inc > il_task->nalloc)
2302 il_task->nalloc = over_alloc_large(il_task->nr + inc);
2303 srenew(il_task->iatoms, il_task->nalloc);
2305 /* Copy the vsite data to the thread-task local array */
2306 for (int j = i; j < i + inc; j++)
2308 il_task->iatoms[il_task->nr++] = iat[j];
2317 void split_vsites_over_threads(const t_ilist *ilist,
2318 const t_iparams *ip,
2319 const t_mdatoms *mdatoms,
2322 int vsite_atom_range, natperthread;
2324 if (vsite->nthreads == 1)
2330 /* The current way of distributing the vsites over threads in primitive.
2331 * We divide the atom range 0 - natoms_in_vsite uniformly over threads,
2332 * without taking into account how the vsites are distributed.
2333 * Without domain decomposition we at least tighten the upper bound
2334 * of the range (useful for common systems such as a vsite-protein
2336 * With domain decomposition, as long as the vsites are distributed
2337 * uniformly in each domain along the major dimension, usually x,
2338 * it will also perform well.
2340 if (!vsite->useDomdec)
2342 vsite_atom_range = -1;
2343 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2346 if (ftype != F_VSITEN)
2348 int nral1 = 1 + NRAL(ftype);
2349 const t_iatom *iat = ilist[ftype].iatoms;
2350 for (int i = 0; i < ilist[ftype].nr; i += nral1)
2352 for (int j = i + 1; j < i + nral1; j++)
2354 vsite_atom_range = std::max(vsite_atom_range, iat[j]);
2362 const t_iatom *iat = ilist[ftype].iatoms;
2365 while (i < ilist[ftype].nr)
2367 /* The 3 below is from 1+NRAL(ftype)=3 */
2368 vs_ind_end = i + ip[iat[i]].vsiten.n*3;
2370 vsite_atom_range = std::max(vsite_atom_range, iat[i+1]);
2371 while (i < vs_ind_end)
2373 vsite_atom_range = std::max(vsite_atom_range, iat[i+2]);
2381 natperthread = (vsite_atom_range + vsite->nthreads - 1)/vsite->nthreads;
2385 /* Any local or not local atom could be involved in virtual sites.
2386 * But since we usually have very few non-local virtual sites
2387 * (only non-local vsites that depend on local vsites),
2388 * we distribute the local atom range equally over the threads.
2389 * When assigning vsites to threads, we should take care that the last
2390 * threads also covers the non-local range.
2392 vsite_atom_range = mdatoms->nr;
2393 natperthread = (mdatoms->homenr + vsite->nthreads - 1)/vsite->nthreads;
2398 fprintf(debug, "virtual site thread dist: natoms %d, range %d, natperthread %d\n", mdatoms->nr, vsite_atom_range, natperthread);
2401 /* To simplify the vsite assignment, we make an index which tells us
2402 * to which task particles, both non-vsites and vsites, are assigned.
2404 vsite->taskIndex.resize(mdatoms->nr);
2406 /* Initialize the task index array. Here we assign the non-vsite
2407 * particles to task=thread, so we easily figure out if vsites
2408 * depend on local and/or non-local particles in assignVsitesToThread.
2410 gmx::ArrayRef<int> taskIndex = vsite->taskIndex;
2413 for (int i = 0; i < mdatoms->nr; i++)
2415 if (mdatoms->ptype[i] == eptVSite)
2417 /* vsites are not assigned to a task yet */
2422 /* assign non-vsite particles to task thread */
2423 taskIndex[i] = thread;
2425 if (i == (thread + 1)*natperthread && thread < vsite->nthreads)
2432 #pragma omp parallel num_threads(vsite->nthreads)
2436 int thread = gmx_omp_get_thread_num();
2437 VsiteThread &tData = *vsite->tData[thread];
2439 /* Clear the buffer use flags that were set before */
2440 if (tData.useInterdependentTask)
2442 InterdependentTask &idTask = tData.idTask;
2444 /* To avoid an extra OpenMP barrier in spread_vsite_f,
2445 * we clear the force buffer at the next step,
2446 * so we need to do it here as well.
2448 clearTaskForceBufferUsedElements(&idTask);
2450 idTask.vsite.resize(0);
2451 for (int t = 0; t < vsite->nthreads; t++)
2453 AtomIndex &atomIndex = idTask.atomIndex[t];
2454 int natom = atomIndex.atom.size();
2455 for (int i = 0; i < natom; i++)
2457 idTask.use[atomIndex.atom[i]] = false;
2459 atomIndex.atom.resize(0);
2464 /* To avoid large f_buf allocations of #threads*vsite_atom_range
2465 * we don't use task2 with more than 200000 atoms. This doesn't
2466 * affect performance, since with such a large range relatively few
2467 * vsites will end up in the separate task.
2468 * Note that useTask2 should be the same for all threads.
2470 tData.useInterdependentTask = (vsite_atom_range <= 200000);
2471 if (tData.useInterdependentTask)
2473 size_t natoms_use_in_vsites = vsite_atom_range;
2474 InterdependentTask &idTask = tData.idTask;
2475 /* To avoid resizing and re-clearing every nstlist steps,
2476 * we never down size the force buffer.
2478 if (natoms_use_in_vsites > idTask.force.size() ||
2479 natoms_use_in_vsites > idTask.use.size())
2481 idTask.force.resize(natoms_use_in_vsites, { 0, 0, 0 });
2482 idTask.use.resize(natoms_use_in_vsites, false);
2486 /* Assign all vsites that can execute independently on threads */
2487 tData.rangeStart = thread *natperthread;
2488 if (thread < vsite->nthreads - 1)
2490 tData.rangeEnd = (thread + 1)*natperthread;
2494 /* The last thread should cover up to the end of the range */
2495 tData.rangeEnd = mdatoms->nr;
2497 assignVsitesToThread(&tData,
2498 thread, vsite->nthreads,
2501 ilist, ip, mdatoms->ptype);
2503 if (tData.useInterdependentTask)
2505 /* In the worst case, all tasks write to force ranges of
2506 * all other tasks, leading to #tasks^2 scaling (this is only
2507 * the overhead, the actual flops remain constant).
2508 * But in most cases there is far less coupling. To improve
2509 * scaling at high thread counts we therefore construct
2510 * an index to only loop over the actually affected tasks.
2512 InterdependentTask &idTask = tData.idTask;
2514 /* Ensure assignVsitesToThread finished on other threads */
2517 idTask.spreadTask.resize(0);
2518 idTask.reduceTask.resize(0);
2519 for (int t = 0; t < vsite->nthreads; t++)
2521 /* Do we write to the force buffer of task t? */
2522 if (!idTask.atomIndex[t].atom.empty())
2524 idTask.spreadTask.push_back(t);
2526 /* Does task t write to our force buffer? */
2527 if (!vsite->tData[t]->idTask.atomIndex[thread].atom.empty())
2529 idTask.reduceTask.push_back(t);
2534 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2536 /* Assign all remaining vsites, that will have taskIndex[]=2*vsite->nthreads,
2537 * to a single task that will not run in parallel with other tasks.
2539 assignVsitesToSingleTask(vsite->tData[vsite->nthreads].get(),
2544 if (debug && vsite->nthreads > 1)
2546 fprintf(debug, "virtual site useInterdependentTask %d, nuse:\n",
2547 static_cast<int>(vsite->tData[0]->useInterdependentTask));
2548 for (int th = 0; th < vsite->nthreads + 1; th++)
2550 fprintf(debug, " %4d", vsite->tData[th]->idTask.nuse);
2552 fprintf(debug, "\n");
2554 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2556 if (ilist[ftype].nr > 0)
2558 fprintf(debug, "%-20s thread dist:",
2559 interaction_function[ftype].longname);
2560 for (int th = 0; th < vsite->nthreads + 1; th++)
2562 fprintf(debug, " %4d %4d ",
2563 vsite->tData[th]->ilist[ftype].nr,
2564 vsite->tData[th]->idTask.ilist[ftype].nr);
2566 fprintf(debug, "\n");
2572 int nrOrig = vsiteIlistNrCount(ilist);
2574 for (int th = 0; th < vsite->nthreads + 1; th++)
2577 vsiteIlistNrCount(vsite->tData[th]->ilist) +
2578 vsiteIlistNrCount(vsite->tData[th]->idTask.ilist);
2580 GMX_ASSERT(nrThreaded == nrOrig, "The number of virtual sites assigned to all thread task has to match the total number of virtual sites");
2584 void set_vsite_top(gmx_vsite_t *vsite,
2585 const gmx_localtop_t *top,
2586 const t_mdatoms *md)
2588 if (vsite->n_intercg_vsite > 0 && vsite->bHaveChargeGroups)
2590 vsite->vsite_pbc_loc = gmx::compat::make_unique<VsitePbc>();
2591 *vsite->vsite_pbc_loc = get_vsite_pbc(top->idef.iparams,
2592 top->idef.il, nullptr, md,
2596 if (vsite->nthreads > 1)
2598 if (vsite->bHaveChargeGroups)
2600 gmx_fatal(FARGS, "The combination of threading, virtual sites and charge groups is not implemented");
2603 split_vsites_over_threads(top->idef.il, top->idef.iparams,