2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
47 #include "gromacs/domdec/domdec.h"
48 #include "gromacs/domdec/domdec_struct.h"
49 #include "gromacs/gmxlib/network.h"
50 #include "gromacs/gmxlib/nrnb.h"
51 #include "gromacs/math/functions.h"
52 #include "gromacs/math/vec.h"
53 #include "gromacs/mdlib/gmx_omp_nthreads.h"
54 #include "gromacs/mdtypes/commrec.h"
55 #include "gromacs/mdtypes/mdatom.h"
56 #include "gromacs/pbcutil/ishift.h"
57 #include "gromacs/pbcutil/mshift.h"
58 #include "gromacs/pbcutil/pbc.h"
59 #include "gromacs/timing/wallcycle.h"
60 #include "gromacs/topology/ifunc.h"
61 #include "gromacs/topology/mtop_util.h"
62 #include "gromacs/topology/topology.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/gmxomp.h"
68 /* The strategy used here for assigning virtual sites to (thread-)tasks
71 * We divide the atom range that vsites operate on (natoms_local with DD,
72 * 0 - last atom involved in vsites without DD) equally over all threads.
74 * Vsites in the local range constructed from atoms in the local range
75 * and/or other vsites that are fully local are assigned to a simple,
78 * Vsites that are not assigned after using the above criterion get assigned
79 * to a so called "interdependent" thread task when none of the constructing
80 * atoms is a vsite. These tasks are called interdependent, because one task
81 * accesses atoms assigned to a different task/thread.
82 * Note that this option is turned off with large (local) atom counts
83 * to avoid high memory usage.
85 * Any remaining vsites are assigned to a separate master thread task.
90 static void init_ilist(t_ilist *ilist)
92 for (int i = 0; i < F_NRE; i++)
96 ilist[i].iatoms = nullptr;
100 /*! \brief List of atom indices belonging to a task */
102 //! List of atom indices
103 std::vector<int> atom;
106 /*! \brief Data structure for thread tasks that use constructing atoms outside their own atom range */
107 struct InterdependentTask
109 //! The interaction lists, only vsite entries are used
110 t_ilist ilist[F_NRE];
111 //! Thread/task-local force buffer
112 std::vector<RVec> force;
113 //! The atom indices of the vsites of our task
114 std::vector<int> vsite;
115 //! Flags if elements in force are spread to or not
116 std::vector<bool> use;
117 //! The number of entries set to true in use
119 //! Array of atoms indices, size nthreads, covering all nuse set elements in use
120 std::vector<AtomIndex> atomIndex;
121 //! List of tasks (force blocks) this task spread forces to
122 std::vector<int> spreadTask;
123 //! List of tasks that write to this tasks force block range
124 std::vector<int> reduceTask;
133 /*! \brief Vsite thread task data structure */
136 //! Start of atom range of this task
138 //! End of atom range of this task
140 //! The interaction lists, only vsite entries are used
141 t_ilist ilist[F_NRE];
142 //! Local fshift accumulation buffer
144 //! Local virial dx*df accumulation buffer
146 //! Tells if interdependent task idTask should be used (in addition to the rest of this task), this bool has the same value on all threads
147 bool useInterdependentTask;
148 //! Data for vsites that involve constructing atoms in the atom range of other threads/tasks
149 InterdependentTask idTask;
151 /*! \brief Constructor */
157 clear_rvecs(SHIFTS, fshift);
159 useInterdependentTask = false;
163 /*! \brief Returns the sum of the vsite ilist sizes over all vsite types
165 * \param[in] ilist The interaction list
167 template <typename T>
168 static int vsiteIlistNrCount(const T *ilist)
171 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
173 nr += ilist[ftype].size();
179 static int pbc_rvec_sub(const t_pbc *pbc, const rvec xi, const rvec xj, rvec dx)
183 return pbc_dx_aiuc(pbc, xi, xj, dx);
187 rvec_sub(xi, xj, dx);
192 static inline real inverseNorm(const rvec x)
194 return gmx::invsqrt(iprod(x, x));
197 /* Vsite construction routines */
199 static void constr_vsite2(const rvec xi, const rvec xj, rvec x, real a, const t_pbc *pbc)
207 pbc_dx_aiuc(pbc, xj, xi, dx);
208 x[XX] = xi[XX] + a*dx[XX];
209 x[YY] = xi[YY] + a*dx[YY];
210 x[ZZ] = xi[ZZ] + a*dx[ZZ];
214 x[XX] = b*xi[XX] + a*xj[XX];
215 x[YY] = b*xi[YY] + a*xj[YY];
216 x[ZZ] = b*xi[ZZ] + a*xj[ZZ];
220 /* TOTAL: 10 flops */
223 static void constr_vsite2FD(const rvec xi, const rvec xj, rvec x, real a,
227 pbc_rvec_sub(pbc, xj, xi, xij);
230 const real b = a*inverseNorm(xij);
233 x[XX] = xi[XX] + b*xij[XX];
234 x[YY] = xi[YY] + b*xij[YY];
235 x[ZZ] = xi[ZZ] + b*xij[ZZ];
238 /* TOTAL: 25 flops */
241 static void constr_vsite3(const rvec xi, const rvec xj, const rvec xk, rvec x, real a, real b,
251 pbc_dx_aiuc(pbc, xj, xi, dxj);
252 pbc_dx_aiuc(pbc, xk, xi, dxk);
253 x[XX] = xi[XX] + a*dxj[XX] + b*dxk[XX];
254 x[YY] = xi[YY] + a*dxj[YY] + b*dxk[YY];
255 x[ZZ] = xi[ZZ] + a*dxj[ZZ] + b*dxk[ZZ];
259 x[XX] = c*xi[XX] + a*xj[XX] + b*xk[XX];
260 x[YY] = c*xi[YY] + a*xj[YY] + b*xk[YY];
261 x[ZZ] = c*xi[ZZ] + a*xj[ZZ] + b*xk[ZZ];
265 /* TOTAL: 17 flops */
268 static void constr_vsite3FD(const rvec xi, const rvec xj, const rvec xk, rvec x, real a, real b,
274 pbc_rvec_sub(pbc, xj, xi, xij);
275 pbc_rvec_sub(pbc, xk, xj, xjk);
278 /* temp goes from i to a point on the line jk */
279 temp[XX] = xij[XX] + a*xjk[XX];
280 temp[YY] = xij[YY] + a*xjk[YY];
281 temp[ZZ] = xij[ZZ] + a*xjk[ZZ];
284 c = b*inverseNorm(temp);
287 x[XX] = xi[XX] + c*temp[XX];
288 x[YY] = xi[YY] + c*temp[YY];
289 x[ZZ] = xi[ZZ] + c*temp[ZZ];
292 /* TOTAL: 34 flops */
295 static void constr_vsite3FAD(const rvec xi, const rvec xj, const rvec xk, rvec x, real a, real b, const t_pbc *pbc)
298 real a1, b1, c1, invdij;
300 pbc_rvec_sub(pbc, xj, xi, xij);
301 pbc_rvec_sub(pbc, xk, xj, xjk);
304 invdij = inverseNorm(xij);
305 c1 = invdij * invdij * iprod(xij, xjk);
306 xp[XX] = xjk[XX] - c1*xij[XX];
307 xp[YY] = xjk[YY] - c1*xij[YY];
308 xp[ZZ] = xjk[ZZ] - c1*xij[ZZ];
310 b1 = b*inverseNorm(xp);
313 x[XX] = xi[XX] + a1*xij[XX] + b1*xp[XX];
314 x[YY] = xi[YY] + a1*xij[YY] + b1*xp[YY];
315 x[ZZ] = xi[ZZ] + a1*xij[ZZ] + b1*xp[ZZ];
318 /* TOTAL: 63 flops */
321 static void constr_vsite3OUT(const rvec xi, const rvec xj, const rvec xk, rvec x,
322 real a, real b, real c, const t_pbc *pbc)
326 pbc_rvec_sub(pbc, xj, xi, xij);
327 pbc_rvec_sub(pbc, xk, xi, xik);
328 cprod(xij, xik, temp);
331 x[XX] = xi[XX] + a*xij[XX] + b*xik[XX] + c*temp[XX];
332 x[YY] = xi[YY] + a*xij[YY] + b*xik[YY] + c*temp[YY];
333 x[ZZ] = xi[ZZ] + a*xij[ZZ] + b*xik[ZZ] + c*temp[ZZ];
336 /* TOTAL: 33 flops */
339 static void constr_vsite4FD(const rvec xi, const rvec xj, const rvec xk, const rvec xl, rvec x,
340 real a, real b, real c, const t_pbc *pbc)
342 rvec xij, xjk, xjl, temp;
345 pbc_rvec_sub(pbc, xj, xi, xij);
346 pbc_rvec_sub(pbc, xk, xj, xjk);
347 pbc_rvec_sub(pbc, xl, xj, xjl);
350 /* temp goes from i to a point on the plane jkl */
351 temp[XX] = xij[XX] + a*xjk[XX] + b*xjl[XX];
352 temp[YY] = xij[YY] + a*xjk[YY] + b*xjl[YY];
353 temp[ZZ] = xij[ZZ] + a*xjk[ZZ] + b*xjl[ZZ];
356 d = c*inverseNorm(temp);
359 x[XX] = xi[XX] + d*temp[XX];
360 x[YY] = xi[YY] + d*temp[YY];
361 x[ZZ] = xi[ZZ] + d*temp[ZZ];
364 /* TOTAL: 43 flops */
367 static void constr_vsite4FDN(const rvec xi, const rvec xj, const rvec xk, const rvec xl, rvec x,
368 real a, real b, real c, const t_pbc *pbc)
370 rvec xij, xik, xil, ra, rb, rja, rjb, rm;
373 pbc_rvec_sub(pbc, xj, xi, xij);
374 pbc_rvec_sub(pbc, xk, xi, xik);
375 pbc_rvec_sub(pbc, xl, xi, xil);
388 rvec_sub(ra, xij, rja);
389 rvec_sub(rb, xij, rjb);
395 d = c*inverseNorm(rm);
398 x[XX] = xi[XX] + d*rm[XX];
399 x[YY] = xi[YY] + d*rm[YY];
400 x[ZZ] = xi[ZZ] + d*rm[ZZ];
403 /* TOTAL: 47 flops */
407 static int constr_vsiten(const t_iatom *ia, const t_iparams ip[],
408 rvec *x, const t_pbc *pbc)
415 n3 = 3*ip[ia[0]].vsiten.n;
418 copy_rvec(x[ai], x1);
420 for (int i = 3; i < n3; i += 3)
423 a = ip[ia[i]].vsiten.a;
426 pbc_dx_aiuc(pbc, x[ai], x1, dx);
430 rvec_sub(x[ai], x1, dx);
432 dsum[XX] += a*dx[XX];
433 dsum[YY] += a*dx[YY];
434 dsum[ZZ] += a*dx[ZZ];
438 x[av][XX] = x1[XX] + dsum[XX];
439 x[av][YY] = x1[YY] + dsum[YY];
440 x[av][ZZ] = x1[ZZ] + dsum[ZZ];
445 /*! \brief PBC modes for vsite construction and spreading */
448 all, // Apply normal, simple PBC for all vsites
449 none // No PBC treatment needed
452 /*! \brief Returns the PBC mode based on the system PBC and vsite properties
454 * \param[in] pbcPtr A pointer to a PBC struct or nullptr when no PBC treatment is required
456 static PbcMode getPbcMode(const t_pbc *pbcPtr)
458 if (pbcPtr == nullptr)
460 return PbcMode::none;
468 static void construct_vsites_thread(rvec x[],
470 const t_iparams ip[], const t_ilist ilist[],
471 const t_pbc *pbc_null)
483 const PbcMode pbcMode = getPbcMode(pbc_null);
484 /* We need another pbc pointer, as with charge groups we switch per vsite */
485 const t_pbc *pbc_null2 = pbc_null;
487 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
489 if (ilist[ftype].nr == 0)
495 int nra = interaction_function[ftype].nratoms;
497 int nr = ilist[ftype].nr;
499 const t_iatom *ia = ilist[ftype].iatoms;
501 for (int i = 0; i < nr; )
504 /* The vsite and constructing atoms */
507 /* Constants for constructing vsites */
508 real a1 = ip[tp].vsite.a;
509 /* Copy the old position */
511 copy_rvec(x[avsite], xv);
513 /* Construct the vsite depending on type */
520 constr_vsite2(x[ai], x[aj], x[avsite], a1, pbc_null2);
524 constr_vsite2FD(x[ai], x[aj], x[avsite], a1, pbc_null2);
530 constr_vsite3(x[ai], x[aj], x[ak], x[avsite], a1, b1, pbc_null2);
536 constr_vsite3FD(x[ai], x[aj], x[ak], x[avsite], a1, b1, pbc_null2);
542 constr_vsite3FAD(x[ai], x[aj], x[ak], x[avsite], a1, b1, pbc_null2);
549 constr_vsite3OUT(x[ai], x[aj], x[ak], x[avsite], a1, b1, c1, pbc_null2);
557 constr_vsite4FD(x[ai], x[aj], x[ak], x[al], x[avsite], a1, b1, c1,
566 constr_vsite4FDN(x[ai], x[aj], x[ak], x[al], x[avsite], a1, b1, c1,
570 inc = constr_vsiten(ia, ip, x, pbc_null2);
573 gmx_fatal(FARGS, "No such vsite type %d in %s, line %d",
574 ftype, __FILE__, __LINE__);
577 if (pbcMode == PbcMode::all)
579 /* Keep the vsite in the same periodic image as before */
581 int ishift = pbc_dx_aiuc(pbc_null, x[avsite], xv, dx);
582 if (ishift != CENTRAL)
584 rvec_add(xv, dx, x[avsite]);
589 /* Calculate velocity of vsite... */
591 rvec_sub(x[avsite], xv, vv);
592 svmul(inv_dt, vv, v[avsite]);
595 /* Increment loop variables */
603 void construct_vsites(const gmx_vsite_t *vsite,
606 const t_iparams ip[], const t_ilist ilist[],
607 int ePBC, gmx_bool bMolPBC,
611 const bool useDomdec = (vsite != nullptr && vsite->useDomdec);
612 GMX_ASSERT(!useDomdec || (cr != nullptr && DOMAINDECOMP(cr)), "When vsites are set up with domain decomposition, we need a valid commrec");
613 // TODO: Remove this assertion when we remove charge groups
614 GMX_ASSERT(vsite != nullptr || ePBC == epbcNONE, "Without a vsite struct we can not do PBC (in case we have charge groups)");
616 t_pbc pbc, *pbc_null;
618 /* We only need to do pbc when we have inter-cg vsites.
619 * Note that with domain decomposition we do not need to apply PBC here
620 * when we have at least 3 domains along each dimension. Currently we
621 * do not optimize this case.
623 if (ePBC != epbcNONE && (useDomdec || bMolPBC) &&
624 !(vsite != nullptr && vsite->numInterUpdategroupVsites == 0))
626 /* This is wasting some CPU time as we now do this multiple times
630 clear_ivec(null_ivec);
631 pbc_null = set_pbc_dd(&pbc, ePBC,
632 useDomdec ? cr->dd->nc : null_ivec,
642 dd_move_x_vsites(cr->dd, box, x);
645 if (vsite == nullptr || vsite->nthreads == 1)
647 construct_vsites_thread(x, dt, v,
653 #pragma omp parallel num_threads(vsite->nthreads)
657 const int th = gmx_omp_get_thread_num();
658 const VsiteThread &tData = *vsite->tData[th];
659 GMX_ASSERT(tData.rangeStart >= 0, "The thread data should be initialized before calling construct_vsites");
661 construct_vsites_thread(x, dt, v,
664 if (tData.useInterdependentTask)
666 /* Here we don't need a barrier (unlike the spreading),
667 * since both tasks only construct vsites from particles,
668 * or local vsites, not from non-local vsites.
670 construct_vsites_thread(x, dt, v,
671 ip, tData.idTask.ilist,
675 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
677 /* Now we can construct the vsites that might depend on other vsites */
678 construct_vsites_thread(x, dt, v,
679 ip, vsite->tData[vsite->nthreads]->ilist,
684 static void spread_vsite2(const t_iatom ia[], real a,
686 rvec f[], rvec fshift[],
687 const t_pbc *pbc, const t_graph *g)
698 svmul(1 - a, f[av], fi);
699 svmul( a, f[av], fj);
708 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, av), di);
710 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), di);
715 siv = pbc_dx_aiuc(pbc, x[ai], x[av], dx);
716 sij = pbc_dx_aiuc(pbc, x[ai], x[aj], dx);
724 if (fshift && (siv != CENTRAL || sij != CENTRAL))
726 rvec_inc(fshift[siv], f[av]);
727 rvec_dec(fshift[CENTRAL], fi);
728 rvec_dec(fshift[sij], fj);
731 /* TOTAL: 13 flops */
734 void constructVsitesGlobal(const gmx_mtop_t &mtop,
735 gmx::ArrayRef<gmx::RVec> x)
737 GMX_ASSERT(x.ssize() >= mtop.natoms, "x should contain the whole system");
738 GMX_ASSERT(!mtop.moleculeBlockIndices.empty(), "molblock indices are needed in constructVsitesGlobal");
740 for (size_t mb = 0; mb < mtop.molblock.size(); mb++)
742 const gmx_molblock_t &molb = mtop.molblock[mb];
743 const gmx_moltype_t &molt = mtop.moltype[molb.type];
744 if (vsiteIlistNrCount(molt.ilist.data()) > 0)
746 int atomOffset = mtop.moleculeBlockIndices[mb].globalAtomStart;
747 for (int mol = 0; mol < molb.nmol; mol++)
749 t_ilist ilist[F_NRE];
750 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
752 ilist[ftype].nr = molt.ilist[ftype].size();
753 ilist[ftype].iatoms = const_cast<t_iatom *>(molt.ilist[ftype].iatoms.data());
756 construct_vsites(nullptr, as_rvec_array(x.data()) + atomOffset,
758 mtop.ffparams.iparams.data(), ilist,
759 epbcNONE, TRUE, nullptr, nullptr);
760 atomOffset += molt.atoms.nr;
766 static void spread_vsite2FD(const t_iatom ia[], real a,
768 rvec f[], rvec fshift[],
769 gmx_bool VirCorr, matrix dxdf,
770 const t_pbc *pbc, const t_graph *g)
772 const int av = ia[1];
773 const int ai = ia[2];
774 const int aj = ia[3];
776 copy_rvec(f[av], fv);
779 int sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
782 const real invDistance = inverseNorm(xij);
783 const real b = a*invDistance;
786 const real fproj = iprod(xij, fv)*invDistance*invDistance;
789 fj[XX] = b*(fv[XX] - fproj*xij[XX]);
790 fj[YY] = b*(fv[YY] - fproj*xij[YY]);
791 fj[ZZ] = b*(fv[ZZ] - fproj*xij[ZZ]);
794 /* b is already calculated in constr_vsite2FD
795 storing b somewhere will save flops. */
797 f[ai][XX] += fv[XX] - fj[XX];
798 f[ai][YY] += fv[YY] - fj[YY];
799 f[ai][ZZ] += fv[ZZ] - fj[ZZ];
811 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
813 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
819 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
826 if (svi != CENTRAL || sji != CENTRAL)
828 rvec_dec(fshift[svi], fv);
829 fshift[CENTRAL][XX] += fv[XX] - fj[XX];
830 fshift[CENTRAL][YY] += fv[YY] - fj[YY];
831 fshift[CENTRAL][ZZ] += fv[ZZ] - fj[ZZ];
832 fshift[ sji][XX] += fj[XX];
833 fshift[ sji][YY] += fj[YY];
834 fshift[ sji][ZZ] += fj[ZZ];
840 /* When VirCorr=TRUE, the virial for the current forces is not
841 * calculated from the redistributed forces. This means that
842 * the effect of non-linear virtual site constructions on the virial
843 * needs to be added separately. This contribution can be calculated
844 * in many ways, but the simplest and cheapest way is to use
845 * the first constructing atom ai as a reference position in space:
846 * subtract (xv-xi)*fv and add (xj-xi)*fj.
850 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
852 for (int i = 0; i < DIM; i++)
854 for (int j = 0; j < DIM; j++)
856 /* As xix is a linear combination of j and k, use that here */
857 dxdf[i][j] += -xiv[i]*fv[j] + xij[i]*fj[j];
862 /* TOTAL: 38 flops */
865 static void spread_vsite3(const t_iatom ia[], real a, real b,
867 rvec f[], rvec fshift[],
868 const t_pbc *pbc, const t_graph *g)
880 svmul(1 - a - b, f[av], fi);
881 svmul( a, f[av], fj);
882 svmul( b, f[av], fk);
892 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, ia[1]), di);
894 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), di);
896 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, ak), di);
901 siv = pbc_dx_aiuc(pbc, x[ai], x[av], dx);
902 sij = pbc_dx_aiuc(pbc, x[ai], x[aj], dx);
903 sik = pbc_dx_aiuc(pbc, x[ai], x[ak], dx);
912 if (fshift && (siv != CENTRAL || sij != CENTRAL || sik != CENTRAL))
914 rvec_inc(fshift[siv], f[av]);
915 rvec_dec(fshift[CENTRAL], fi);
916 rvec_dec(fshift[sij], fj);
917 rvec_dec(fshift[sik], fk);
920 /* TOTAL: 20 flops */
923 static void spread_vsite3FD(const t_iatom ia[], real a, real b,
925 rvec f[], rvec fshift[],
926 gmx_bool VirCorr, matrix dxdf,
927 const t_pbc *pbc, const t_graph *g)
930 rvec xvi, xij, xjk, xix, fv, temp;
931 t_iatom av, ai, aj, ak;
939 copy_rvec(f[av], fv);
941 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
942 skj = pbc_rvec_sub(pbc, x[ak], x[aj], xjk);
945 /* xix goes from i to point x on the line jk */
946 xix[XX] = xij[XX]+a*xjk[XX];
947 xix[YY] = xij[YY]+a*xjk[YY];
948 xix[ZZ] = xij[ZZ]+a*xjk[ZZ];
951 const real invDistance = inverseNorm(xix);
952 const real c = b*invDistance;
955 fproj = iprod(xix, fv)*invDistance*invDistance; /* = (xix . f)/(xix . xix) */
957 temp[XX] = c*(fv[XX]-fproj*xix[XX]);
958 temp[YY] = c*(fv[YY]-fproj*xix[YY]);
959 temp[ZZ] = c*(fv[ZZ]-fproj*xix[ZZ]);
962 /* c is already calculated in constr_vsite3FD
963 storing c somewhere will save 26 flops! */
966 f[ai][XX] += fv[XX] - temp[XX];
967 f[ai][YY] += fv[YY] - temp[YY];
968 f[ai][ZZ] += fv[ZZ] - temp[ZZ];
969 f[aj][XX] += a1*temp[XX];
970 f[aj][YY] += a1*temp[YY];
971 f[aj][ZZ] += a1*temp[ZZ];
972 f[ak][XX] += a*temp[XX];
973 f[ak][YY] += a*temp[YY];
974 f[ak][ZZ] += a*temp[ZZ];
979 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
981 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
983 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, aj), di);
988 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
995 if (fshift && (svi != CENTRAL || sji != CENTRAL || skj != CENTRAL))
997 rvec_dec(fshift[svi], fv);
998 fshift[CENTRAL][XX] += fv[XX] - (1 + a)*temp[XX];
999 fshift[CENTRAL][YY] += fv[YY] - (1 + a)*temp[YY];
1000 fshift[CENTRAL][ZZ] += fv[ZZ] - (1 + a)*temp[ZZ];
1001 fshift[ sji][XX] += temp[XX];
1002 fshift[ sji][YY] += temp[YY];
1003 fshift[ sji][ZZ] += temp[ZZ];
1004 fshift[ skj][XX] += a*temp[XX];
1005 fshift[ skj][YY] += a*temp[YY];
1006 fshift[ skj][ZZ] += a*temp[ZZ];
1011 /* When VirCorr=TRUE, the virial for the current forces is not
1012 * calculated from the redistributed forces. This means that
1013 * the effect of non-linear virtual site constructions on the virial
1014 * needs to be added separately. This contribution can be calculated
1015 * in many ways, but the simplest and cheapest way is to use
1016 * the first constructing atom ai as a reference position in space:
1017 * subtract (xv-xi)*fv and add (xj-xi)*fj + (xk-xi)*fk.
1021 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1023 for (int i = 0; i < DIM; i++)
1025 for (int j = 0; j < DIM; j++)
1027 /* As xix is a linear combination of j and k, use that here */
1028 dxdf[i][j] += -xiv[i]*fv[j] + xix[i]*temp[j];
1033 /* TOTAL: 61 flops */
1036 static void spread_vsite3FAD(const t_iatom ia[], real a, real b,
1038 rvec f[], rvec fshift[],
1039 gmx_bool VirCorr, matrix dxdf,
1040 const t_pbc *pbc, const t_graph *g)
1042 rvec xvi, xij, xjk, xperp, Fpij, Fppp, fv, f1, f2, f3;
1043 real a1, b1, c1, c2, invdij, invdij2, invdp, fproj;
1044 t_iatom av, ai, aj, ak;
1045 int svi, sji, skj, d;
1052 copy_rvec(f[ia[1]], fv);
1054 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1055 skj = pbc_rvec_sub(pbc, x[ak], x[aj], xjk);
1058 invdij = inverseNorm(xij);
1059 invdij2 = invdij * invdij;
1060 c1 = iprod(xij, xjk) * invdij2;
1061 xperp[XX] = xjk[XX] - c1*xij[XX];
1062 xperp[YY] = xjk[YY] - c1*xij[YY];
1063 xperp[ZZ] = xjk[ZZ] - c1*xij[ZZ];
1064 /* xperp in plane ijk, perp. to ij */
1065 invdp = inverseNorm(xperp);
1070 /* a1, b1 and c1 are already calculated in constr_vsite3FAD
1071 storing them somewhere will save 45 flops! */
1073 fproj = iprod(xij, fv)*invdij2;
1074 svmul(fproj, xij, Fpij); /* proj. f on xij */
1075 svmul(iprod(xperp, fv)*invdp*invdp, xperp, Fppp); /* proj. f on xperp */
1076 svmul(b1*fproj, xperp, f3);
1079 rvec_sub(fv, Fpij, f1); /* f1 = f - Fpij */
1080 rvec_sub(f1, Fppp, f2); /* f2 = f - Fpij - Fppp */
1081 for (d = 0; (d < DIM); d++)
1089 f[ai][XX] += fv[XX] - f1[XX] + c1*f2[XX] + f3[XX];
1090 f[ai][YY] += fv[YY] - f1[YY] + c1*f2[YY] + f3[YY];
1091 f[ai][ZZ] += fv[ZZ] - f1[ZZ] + c1*f2[ZZ] + f3[ZZ];
1092 f[aj][XX] += f1[XX] - c2*f2[XX] - f3[XX];
1093 f[aj][YY] += f1[YY] - c2*f2[YY] - f3[YY];
1094 f[aj][ZZ] += f1[ZZ] - c2*f2[ZZ] - f3[ZZ];
1095 f[ak][XX] += f2[XX];
1096 f[ak][YY] += f2[YY];
1097 f[ak][ZZ] += f2[ZZ];
1102 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
1104 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
1106 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, aj), di);
1111 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1118 if (fshift && (svi != CENTRAL || sji != CENTRAL || skj != CENTRAL))
1120 rvec_dec(fshift[svi], fv);
1121 fshift[CENTRAL][XX] += fv[XX] - f1[XX] - (1-c1)*f2[XX] + f3[XX];
1122 fshift[CENTRAL][YY] += fv[YY] - f1[YY] - (1-c1)*f2[YY] + f3[YY];
1123 fshift[CENTRAL][ZZ] += fv[ZZ] - f1[ZZ] - (1-c1)*f2[ZZ] + f3[ZZ];
1124 fshift[ sji][XX] += f1[XX] - c1 *f2[XX] - f3[XX];
1125 fshift[ sji][YY] += f1[YY] - c1 *f2[YY] - f3[YY];
1126 fshift[ sji][ZZ] += f1[ZZ] - c1 *f2[ZZ] - f3[ZZ];
1127 fshift[ skj][XX] += f2[XX];
1128 fshift[ skj][YY] += f2[YY];
1129 fshift[ skj][ZZ] += f2[ZZ];
1137 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1139 for (i = 0; i < DIM; i++)
1141 for (j = 0; j < DIM; j++)
1143 /* Note that xik=xij+xjk, so we have to add xij*f2 */
1146 + xij[i]*(f1[j] + (1 - c2)*f2[j] - f3[j])
1152 /* TOTAL: 113 flops */
1155 static void spread_vsite3OUT(const t_iatom ia[], real a, real b, real c,
1157 rvec f[], rvec fshift[],
1158 gmx_bool VirCorr, matrix dxdf,
1159 const t_pbc *pbc, const t_graph *g)
1161 rvec xvi, xij, xik, fv, fj, fk;
1172 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1173 ski = pbc_rvec_sub(pbc, x[ak], x[ai], xik);
1176 copy_rvec(f[av], fv);
1183 fj[XX] = a*fv[XX] - xik[ZZ]*cfy + xik[YY]*cfz;
1184 fj[YY] = xik[ZZ]*cfx + a*fv[YY] - xik[XX]*cfz;
1185 fj[ZZ] = -xik[YY]*cfx + xik[XX]*cfy + a*fv[ZZ];
1187 fk[XX] = b*fv[XX] + xij[ZZ]*cfy - xij[YY]*cfz;
1188 fk[YY] = -xij[ZZ]*cfx + b*fv[YY] + xij[XX]*cfz;
1189 fk[ZZ] = xij[YY]*cfx - xij[XX]*cfy + b*fv[ZZ];
1192 f[ai][XX] += fv[XX] - fj[XX] - fk[XX];
1193 f[ai][YY] += fv[YY] - fj[YY] - fk[YY];
1194 f[ai][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ];
1195 rvec_inc(f[aj], fj);
1196 rvec_inc(f[ak], fk);
1201 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
1203 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
1205 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, ai), di);
1210 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1217 if (fshift && (svi != CENTRAL || sji != CENTRAL || ski != CENTRAL))
1219 rvec_dec(fshift[svi], fv);
1220 fshift[CENTRAL][XX] += fv[XX] - fj[XX] - fk[XX];
1221 fshift[CENTRAL][YY] += fv[YY] - fj[YY] - fk[YY];
1222 fshift[CENTRAL][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ];
1223 rvec_inc(fshift[sji], fj);
1224 rvec_inc(fshift[ski], fk);
1231 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1233 for (int i = 0; i < DIM; i++)
1235 for (int j = 0; j < DIM; j++)
1237 dxdf[i][j] += -xiv[i]*fv[j] + xij[i]*fj[j] + xik[i]*fk[j];
1242 /* TOTAL: 54 flops */
1245 static void spread_vsite4FD(const t_iatom ia[], real a, real b, real c,
1247 rvec f[], rvec fshift[],
1248 gmx_bool VirCorr, matrix dxdf,
1249 const t_pbc *pbc, const t_graph *g)
1252 rvec xvi, xij, xjk, xjl, xix, fv, temp;
1253 int av, ai, aj, ak, al;
1255 int svi, sji, skj, slj, m;
1263 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1264 skj = pbc_rvec_sub(pbc, x[ak], x[aj], xjk);
1265 slj = pbc_rvec_sub(pbc, x[al], x[aj], xjl);
1268 /* xix goes from i to point x on the plane jkl */
1269 for (m = 0; m < DIM; m++)
1271 xix[m] = xij[m] + a*xjk[m] + b*xjl[m];
1275 const real invDistance = inverseNorm(xix);
1276 const real d = c*invDistance;
1277 /* 4 + ?10? flops */
1279 copy_rvec(f[av], fv);
1281 fproj = iprod(xix, fv)*invDistance*invDistance; /* = (xix . f)/(xix . xix) */
1283 for (m = 0; m < DIM; m++)
1285 temp[m] = d*(fv[m] - fproj*xix[m]);
1289 /* c is already calculated in constr_vsite3FD
1290 storing c somewhere will save 35 flops! */
1293 for (m = 0; m < DIM; m++)
1295 f[ai][m] += fv[m] - temp[m];
1296 f[aj][m] += a1*temp[m];
1297 f[ak][m] += a*temp[m];
1298 f[al][m] += b*temp[m];
1304 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
1306 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
1308 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, aj), di);
1310 ivec_sub(SHIFT_IVEC(g, al), SHIFT_IVEC(g, aj), di);
1315 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1323 (svi != CENTRAL || sji != CENTRAL || skj != CENTRAL || slj != CENTRAL))
1325 rvec_dec(fshift[svi], fv);
1326 for (m = 0; m < DIM; m++)
1328 fshift[CENTRAL][m] += fv[m] - (1 + a + b)*temp[m];
1329 fshift[ sji][m] += temp[m];
1330 fshift[ skj][m] += a*temp[m];
1331 fshift[ slj][m] += b*temp[m];
1340 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1342 for (i = 0; i < DIM; i++)
1344 for (j = 0; j < DIM; j++)
1346 dxdf[i][j] += -xiv[i]*fv[j] + xix[i]*temp[j];
1351 /* TOTAL: 77 flops */
1355 static void spread_vsite4FDN(const t_iatom ia[], real a, real b, real c,
1357 rvec f[], rvec fshift[],
1358 gmx_bool VirCorr, matrix dxdf,
1359 const t_pbc *pbc, const t_graph *g)
1361 rvec xvi, xij, xik, xil, ra, rb, rja, rjb, rab, rm, rt;
1362 rvec fv, fj, fk, fl;
1366 int av, ai, aj, ak, al;
1367 int svi, sij, sik, sil;
1369 /* DEBUG: check atom indices */
1376 copy_rvec(f[av], fv);
1378 sij = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1379 sik = pbc_rvec_sub(pbc, x[ak], x[ai], xik);
1380 sil = pbc_rvec_sub(pbc, x[al], x[ai], xil);
1393 rvec_sub(ra, xij, rja);
1394 rvec_sub(rb, xij, rjb);
1395 rvec_sub(rb, ra, rab);
1398 cprod(rja, rjb, rm);
1401 invrm = inverseNorm(rm);
1402 denom = invrm*invrm;
1405 cfx = c*invrm*fv[XX];
1406 cfy = c*invrm*fv[YY];
1407 cfz = c*invrm*fv[ZZ];
1418 fj[XX] = ( -rm[XX]*rt[XX]) * cfx + ( rab[ZZ]-rm[YY]*rt[XX]) * cfy + (-rab[YY]-rm[ZZ]*rt[XX]) * cfz;
1419 fj[YY] = (-rab[ZZ]-rm[XX]*rt[YY]) * cfx + ( -rm[YY]*rt[YY]) * cfy + ( rab[XX]-rm[ZZ]*rt[YY]) * cfz;
1420 fj[ZZ] = ( rab[YY]-rm[XX]*rt[ZZ]) * cfx + (-rab[XX]-rm[YY]*rt[ZZ]) * cfy + ( -rm[ZZ]*rt[ZZ]) * cfz;
1431 fk[XX] = ( -rm[XX]*rt[XX]) * cfx + (-a*rjb[ZZ]-rm[YY]*rt[XX]) * cfy + ( a*rjb[YY]-rm[ZZ]*rt[XX]) * cfz;
1432 fk[YY] = ( a*rjb[ZZ]-rm[XX]*rt[YY]) * cfx + ( -rm[YY]*rt[YY]) * cfy + (-a*rjb[XX]-rm[ZZ]*rt[YY]) * cfz;
1433 fk[ZZ] = (-a*rjb[YY]-rm[XX]*rt[ZZ]) * cfx + ( a*rjb[XX]-rm[YY]*rt[ZZ]) * cfy + ( -rm[ZZ]*rt[ZZ]) * cfz;
1444 fl[XX] = ( -rm[XX]*rt[XX]) * cfx + ( b*rja[ZZ]-rm[YY]*rt[XX]) * cfy + (-b*rja[YY]-rm[ZZ]*rt[XX]) * cfz;
1445 fl[YY] = (-b*rja[ZZ]-rm[XX]*rt[YY]) * cfx + ( -rm[YY]*rt[YY]) * cfy + ( b*rja[XX]-rm[ZZ]*rt[YY]) * cfz;
1446 fl[ZZ] = ( b*rja[YY]-rm[XX]*rt[ZZ]) * cfx + (-b*rja[XX]-rm[YY]*rt[ZZ]) * cfy + ( -rm[ZZ]*rt[ZZ]) * cfz;
1449 f[ai][XX] += fv[XX] - fj[XX] - fk[XX] - fl[XX];
1450 f[ai][YY] += fv[YY] - fj[YY] - fk[YY] - fl[YY];
1451 f[ai][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ] - fl[ZZ];
1452 rvec_inc(f[aj], fj);
1453 rvec_inc(f[ak], fk);
1454 rvec_inc(f[al], fl);
1459 ivec_sub(SHIFT_IVEC(g, av), SHIFT_IVEC(g, ai), di);
1461 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
1463 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, ai), di);
1465 ivec_sub(SHIFT_IVEC(g, al), SHIFT_IVEC(g, ai), di);
1470 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1477 if (fshift && (svi != CENTRAL || sij != CENTRAL || sik != CENTRAL || sil != CENTRAL))
1479 rvec_dec(fshift[svi], fv);
1480 fshift[CENTRAL][XX] += fv[XX] - fj[XX] - fk[XX] - fl[XX];
1481 fshift[CENTRAL][YY] += fv[YY] - fj[YY] - fk[YY] - fl[YY];
1482 fshift[CENTRAL][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ] - fl[ZZ];
1483 rvec_inc(fshift[sij], fj);
1484 rvec_inc(fshift[sik], fk);
1485 rvec_inc(fshift[sil], fl);
1493 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1495 for (i = 0; i < DIM; i++)
1497 for (j = 0; j < DIM; j++)
1499 dxdf[i][j] += -xiv[i]*fv[j] + xij[i]*fj[j] + xik[i]*fk[j] + xil[i]*fl[j];
1504 /* Total: 207 flops (Yuck!) */
1508 static int spread_vsiten(const t_iatom ia[], const t_iparams ip[],
1510 rvec f[], rvec fshift[],
1511 const t_pbc *pbc, const t_graph *g)
1519 n3 = 3*ip[ia[0]].vsiten.n;
1521 copy_rvec(x[av], xv);
1523 for (i = 0; i < n3; i += 3)
1528 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, av), di);
1533 siv = pbc_dx_aiuc(pbc, x[ai], xv, dx);
1539 a = ip[ia[i]].vsiten.a;
1540 svmul(a, f[av], fi);
1541 rvec_inc(f[ai], fi);
1542 if (fshift && siv != CENTRAL)
1544 rvec_inc(fshift[siv], fi);
1545 rvec_dec(fshift[CENTRAL], fi);
1554 static int vsite_count(const t_ilist *ilist, int ftype)
1556 if (ftype == F_VSITEN)
1558 return ilist[ftype].nr/3;
1562 return ilist[ftype].nr/(1 + interaction_function[ftype].nratoms);
1566 static void spread_vsite_f_thread(const rvec x[],
1567 rvec f[], rvec *fshift,
1568 gmx_bool VirCorr, matrix dxdf,
1569 t_iparams ip[], const t_ilist ilist[],
1570 const t_graph *g, const t_pbc *pbc_null)
1572 const PbcMode pbcMode = getPbcMode(pbc_null);
1573 /* We need another pbc pointer, as with charge groups we switch per vsite */
1574 const t_pbc *pbc_null2 = pbc_null;
1575 gmx::ArrayRef<const int> vsite_pbc;
1577 /* this loop goes backwards to be able to build *
1578 * higher type vsites from lower types */
1579 for (int ftype = c_ftypeVsiteEnd - 1; ftype >= c_ftypeVsiteStart; ftype--)
1581 if (ilist[ftype].nr == 0)
1587 int nra = interaction_function[ftype].nratoms;
1589 int nr = ilist[ftype].nr;
1591 const t_iatom *ia = ilist[ftype].iatoms;
1593 if (pbcMode == PbcMode::all)
1595 pbc_null2 = pbc_null;
1598 for (int i = 0; i < nr; )
1602 /* Constants for constructing */
1604 a1 = ip[tp].vsite.a;
1605 /* Construct the vsite depending on type */
1609 spread_vsite2(ia, a1, x, f, fshift, pbc_null2, g);
1612 spread_vsite2FD(ia, a1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1615 b1 = ip[tp].vsite.b;
1616 spread_vsite3(ia, a1, b1, x, f, fshift, pbc_null2, g);
1619 b1 = ip[tp].vsite.b;
1620 spread_vsite3FD(ia, a1, b1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1623 b1 = ip[tp].vsite.b;
1624 spread_vsite3FAD(ia, a1, b1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1627 b1 = ip[tp].vsite.b;
1628 c1 = ip[tp].vsite.c;
1629 spread_vsite3OUT(ia, a1, b1, c1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1632 b1 = ip[tp].vsite.b;
1633 c1 = ip[tp].vsite.c;
1634 spread_vsite4FD(ia, a1, b1, c1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1637 b1 = ip[tp].vsite.b;
1638 c1 = ip[tp].vsite.c;
1639 spread_vsite4FDN(ia, a1, b1, c1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1642 inc = spread_vsiten(ia, ip, x, f, fshift, pbc_null2, g);
1645 gmx_fatal(FARGS, "No such vsite type %d in %s, line %d",
1646 ftype, __FILE__, __LINE__);
1648 clear_rvec(f[ia[1]]);
1650 /* Increment loop variables */
1658 /*! \brief Clears the task force buffer elements that are written by task idTask */
1659 static void clearTaskForceBufferUsedElements(InterdependentTask *idTask)
1661 int ntask = idTask->spreadTask.size();
1662 for (int ti = 0; ti < ntask; ti++)
1664 const AtomIndex *atomList = &idTask->atomIndex[idTask->spreadTask[ti]];
1665 int natom = atomList->atom.size();
1666 RVec *force = idTask->force.data();
1667 for (int i = 0; i < natom; i++)
1669 clear_rvec(force[atomList->atom[i]]);
1674 void spread_vsite_f(const gmx_vsite_t *vsite,
1675 const rvec * gmx_restrict x,
1676 rvec * gmx_restrict f, rvec * gmx_restrict fshift,
1677 gmx_bool VirCorr, matrix vir,
1678 t_nrnb *nrnb, const t_idef *idef,
1679 int ePBC, gmx_bool bMolPBC, const t_graph *g, const matrix box,
1680 const t_commrec *cr, gmx_wallcycle *wcycle)
1682 wallcycle_start(wcycle, ewcVSITESPREAD);
1683 const bool useDomdec = vsite->useDomdec;
1684 GMX_ASSERT(!useDomdec || (cr != nullptr && DOMAINDECOMP(cr)), "When vsites are set up with domain decomposition, we need a valid commrec");
1686 t_pbc pbc, *pbc_null;
1688 /* We only need to do pbc when we have inter-cg vsites */
1689 if ((useDomdec || bMolPBC) && vsite->numInterUpdategroupVsites)
1691 /* This is wasting some CPU time as we now do this multiple times
1694 pbc_null = set_pbc_dd(&pbc, ePBC, useDomdec ? cr->dd->nc : nullptr, FALSE, box);
1703 dd_clear_f_vsites(cr->dd, f);
1706 if (vsite->nthreads == 1)
1713 spread_vsite_f_thread(x, f, fshift,
1715 idef->iparams, idef->il,
1720 for (int i = 0; i < DIM; i++)
1722 for (int j = 0; j < DIM; j++)
1724 vir[i][j] += -0.5*dxdf[i][j];
1731 /* First spread the vsites that might depend on non-local vsites */
1734 clear_mat(vsite->tData[vsite->nthreads]->dxdf);
1736 spread_vsite_f_thread(x, f, fshift,
1737 VirCorr, vsite->tData[vsite->nthreads]->dxdf,
1739 vsite->tData[vsite->nthreads]->ilist,
1742 #pragma omp parallel num_threads(vsite->nthreads)
1746 int thread = gmx_omp_get_thread_num();
1747 VsiteThread &tData = *vsite->tData[thread];
1750 if (thread == 0 || fshift == nullptr)
1756 fshift_t = tData.fshift;
1758 for (int i = 0; i < SHIFTS; i++)
1760 clear_rvec(fshift_t[i]);
1765 clear_mat(tData.dxdf);
1768 if (tData.useInterdependentTask)
1770 /* Spread the vsites that spread outside our local range.
1771 * This is done using a thread-local force buffer force.
1772 * First we need to copy the input vsite forces to force.
1774 InterdependentTask *idTask = &tData.idTask;
1776 /* Clear the buffer elements set by our task during
1777 * the last call to spread_vsite_f.
1779 clearTaskForceBufferUsedElements(idTask);
1781 int nvsite = idTask->vsite.size();
1782 for (int i = 0; i < nvsite; i++)
1784 copy_rvec(f[idTask->vsite[i]],
1785 idTask->force[idTask->vsite[i]]);
1787 spread_vsite_f_thread(x, as_rvec_array(idTask->force.data()), fshift_t,
1788 VirCorr, tData.dxdf,
1793 /* We need a barrier before reducing forces below
1794 * that have been produced by a different thread above.
1798 /* Loop over all thread task and reduce forces they
1799 * produced on atoms that fall in our range.
1800 * Note that atomic reduction would be a simpler solution,
1801 * but that might not have good support on all platforms.
1803 int ntask = idTask->reduceTask.size();
1804 for (int ti = 0; ti < ntask; ti++)
1806 const InterdependentTask *idt_foreign = &vsite->tData[idTask->reduceTask[ti]]->idTask;
1807 const AtomIndex *atomList = &idt_foreign->atomIndex[thread];
1808 const RVec *f_foreign = idt_foreign->force.data();
1810 int natom = atomList->atom.size();
1811 for (int i = 0; i < natom; i++)
1813 int ind = atomList->atom[i];
1814 rvec_inc(f[ind], f_foreign[ind]);
1815 /* Clearing of f_foreign is done at the next step */
1818 /* Clear the vsite forces, both in f and force */
1819 for (int i = 0; i < nvsite; i++)
1821 int ind = tData.idTask.vsite[i];
1823 clear_rvec(tData.idTask.force[ind]);
1827 /* Spread the vsites that spread locally only */
1828 spread_vsite_f_thread(x, f, fshift_t,
1829 VirCorr, tData.dxdf,
1834 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1837 if (fshift != nullptr)
1839 for (int th = 1; th < vsite->nthreads; th++)
1841 for (int i = 0; i < SHIFTS; i++)
1843 rvec_inc(fshift[i], vsite->tData[th]->fshift[i]);
1850 for (int th = 0; th < vsite->nthreads + 1; th++)
1852 /* MSVC doesn't like matrix references, so we use a pointer */
1853 const matrix *dxdf = &vsite->tData[th]->dxdf;
1855 for (int i = 0; i < DIM; i++)
1857 for (int j = 0; j < DIM; j++)
1859 vir[i][j] += -0.5*(*dxdf)[i][j];
1868 dd_move_f_vsites(cr->dd, f, fshift);
1871 inc_nrnb(nrnb, eNR_VSITE2, vsite_count(idef->il, F_VSITE2));
1872 inc_nrnb(nrnb, eNR_VSITE2FD, vsite_count(idef->il, F_VSITE2FD));
1873 inc_nrnb(nrnb, eNR_VSITE3, vsite_count(idef->il, F_VSITE3));
1874 inc_nrnb(nrnb, eNR_VSITE3FD, vsite_count(idef->il, F_VSITE3FD));
1875 inc_nrnb(nrnb, eNR_VSITE3FAD, vsite_count(idef->il, F_VSITE3FAD));
1876 inc_nrnb(nrnb, eNR_VSITE3OUT, vsite_count(idef->il, F_VSITE3OUT));
1877 inc_nrnb(nrnb, eNR_VSITE4FD, vsite_count(idef->il, F_VSITE4FD));
1878 inc_nrnb(nrnb, eNR_VSITE4FDN, vsite_count(idef->il, F_VSITE4FDN));
1879 inc_nrnb(nrnb, eNR_VSITEN, vsite_count(idef->il, F_VSITEN));
1881 wallcycle_stop(wcycle, ewcVSITESPREAD);
1884 /*! \brief Returns the an array with group indices for each atom
1886 * \param[in] grouping The paritioning of the atom range into atom groups
1888 static std::vector<int> makeAtomToGroupMapping(const gmx::RangePartitioning &grouping)
1890 std::vector<int> atomToGroup(grouping.fullRange().end(), 0);
1892 for (int group = 0; group < grouping.numBlocks(); group++)
1894 auto block = grouping.block(group);
1895 std::fill(atomToGroup.begin() + block.begin(),
1896 atomToGroup.begin() + block.end(),
1903 int countNonlinearVsites(const gmx_mtop_t &mtop)
1905 int numNonlinearVsites = 0;
1906 for (const gmx_molblock_t &molb : mtop.molblock)
1908 const gmx_moltype_t &molt = mtop.moltype[molb.type];
1910 for (const auto &ilist : extractILists(molt.ilist, IF_VSITE))
1912 if (ilist.functionType != F_VSITE2 &&
1913 ilist.functionType != F_VSITE3 &&
1914 ilist.functionType != F_VSITEN)
1916 numNonlinearVsites += molb.nmol*ilist.iatoms.size()/(1 + NRAL(ilist.functionType));
1921 return numNonlinearVsites;
1924 int countInterUpdategroupVsites(const gmx_mtop_t &mtop,
1925 gmx::ArrayRef<const gmx::RangePartitioning> updateGroupingPerMoleculetype)
1927 int n_intercg_vsite = 0;
1928 for (const gmx_molblock_t &molb : mtop.molblock)
1930 const gmx_moltype_t &molt = mtop.moltype[molb.type];
1932 std::vector<int> atomToGroup;
1933 if (!updateGroupingPerMoleculetype.empty())
1935 atomToGroup = makeAtomToGroupMapping(updateGroupingPerMoleculetype[molb.type]);
1937 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
1939 const int nral = NRAL(ftype);
1940 const InteractionList &il = molt.ilist[ftype];
1941 for (int i = 0; i < il.size(); i += 1 + nral)
1943 bool isInterGroup = atomToGroup.empty();
1946 const int group = atomToGroup[il.iatoms[1 + i]];
1947 for (int a = 1; a < nral; a++)
1949 if (atomToGroup[il.iatoms[1 + a]] != group)
1951 isInterGroup = true;
1958 n_intercg_vsite += molb.nmol;
1964 return n_intercg_vsite;
1967 std::unique_ptr<gmx_vsite_t>
1968 initVsite(const gmx_mtop_t &mtop,
1969 const t_commrec *cr)
1971 GMX_RELEASE_ASSERT(cr != nullptr, "We need a valid commrec");
1973 std::unique_ptr<gmx_vsite_t> vsite;
1975 /* check if there are vsites */
1977 for (int ftype = 0; ftype < F_NRE; ftype++)
1979 if (interaction_function[ftype].flags & IF_VSITE)
1981 GMX_ASSERT(ftype >= c_ftypeVsiteStart && ftype < c_ftypeVsiteEnd, "c_ftypeVsiteStart and/or c_ftypeVsiteEnd do not have correct values");
1983 nvsite += gmx_mtop_ftype_count(&mtop, ftype);
1987 GMX_ASSERT(ftype < c_ftypeVsiteStart || ftype >= c_ftypeVsiteEnd, "c_ftypeVsiteStart and/or c_ftypeVsiteEnd do not have correct values");
1996 vsite = std::make_unique<gmx_vsite_t>();
1998 gmx::ArrayRef<const gmx::RangePartitioning> updateGroupingPerMoleculetype;
1999 if (DOMAINDECOMP(cr))
2001 updateGroupingPerMoleculetype = getUpdateGroupingPerMoleculetype(*cr->dd);
2003 vsite->numInterUpdategroupVsites = countInterUpdategroupVsites(mtop, updateGroupingPerMoleculetype);
2005 vsite->useDomdec = (DOMAINDECOMP(cr) && cr->dd->nnodes > 1);
2007 vsite->nthreads = gmx_omp_nthreads_get(emntVSITE);
2009 if (vsite->nthreads > 1)
2011 /* We need one extra thread data structure for the overlap vsites */
2012 vsite->tData.resize(vsite->nthreads + 1);
2013 #pragma omp parallel for num_threads(vsite->nthreads) schedule(static)
2014 for (int thread = 0; thread < vsite->nthreads; thread++)
2018 vsite->tData[thread] = std::make_unique<VsiteThread>();
2020 InterdependentTask &idTask = vsite->tData[thread]->idTask;
2022 idTask.atomIndex.resize(vsite->nthreads);
2024 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2026 if (vsite->nthreads > 1)
2028 vsite->tData[vsite->nthreads] = std::make_unique<VsiteThread>();
2035 gmx_vsite_t::gmx_vsite_t()
2039 gmx_vsite_t::~gmx_vsite_t()
2043 static inline void flagAtom(InterdependentTask *idTask, int atom,
2044 int thread, int nthread, int natperthread)
2046 if (!idTask->use[atom])
2048 idTask->use[atom] = true;
2049 thread = atom/natperthread;
2050 /* Assign all non-local atom force writes to thread 0 */
2051 if (thread >= nthread)
2055 idTask->atomIndex[thread].atom.push_back(atom);
2059 /*\brief Here we try to assign all vsites that are in our local range.
2061 * Our task local atom range is tData->rangeStart - tData->rangeEnd.
2062 * Vsites that depend only on local atoms, as indicated by taskIndex[]==thread,
2063 * are assigned to task tData->ilist. Vsites that depend on non-local atoms
2064 * but not on other vsites are assigned to task tData->id_task.ilist.
2065 * taskIndex[] is set for all vsites in our range, either to our local tasks
2066 * or to the single last task as taskIndex[]=2*nthreads.
2068 static void assignVsitesToThread(VsiteThread *tData,
2072 gmx::ArrayRef<int> taskIndex,
2073 const t_ilist *ilist,
2074 const t_iparams *ip,
2075 const unsigned short *ptype)
2077 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2079 tData->ilist[ftype].nr = 0;
2080 tData->idTask.ilist[ftype].nr = 0;
2082 int nral1 = 1 + NRAL(ftype);
2084 t_iatom *iat = ilist[ftype].iatoms;
2085 for (int i = 0; i < ilist[ftype].nr; )
2087 if (ftype == F_VSITEN)
2089 /* The 3 below is from 1+NRAL(ftype)=3 */
2090 inc = ip[iat[i]].vsiten.n*3;
2093 if (iat[1 + i] < tData->rangeStart ||
2094 iat[1 + i] >= tData->rangeEnd)
2096 /* This vsite belongs to a different thread */
2101 /* We would like to assign this vsite to task thread,
2102 * but it might depend on atoms outside the atom range of thread
2103 * or on another vsite not assigned to task thread.
2106 if (ftype != F_VSITEN)
2108 for (int j = i + 2; j < i + nral1; j++)
2110 /* Do a range check to avoid a harmless race on taskIndex */
2111 if (iat[j] < tData->rangeStart ||
2112 iat[j] >= tData->rangeEnd ||
2113 taskIndex[iat[j]] != thread)
2115 if (!tData->useInterdependentTask ||
2116 ptype[iat[j]] == eptVSite)
2118 /* At least one constructing atom is a vsite
2119 * that is not assigned to the same thread.
2120 * Put this vsite into a separate task.
2126 /* There are constructing atoms outside our range,
2127 * put this vsite into a second task to be executed
2128 * on the same thread. During construction no barrier
2129 * is needed between the two tasks on the same thread.
2130 * During spreading we need to run this task with
2131 * an additional thread-local intermediate force buffer
2132 * (or atomic reduction) and a barrier between the two
2135 task = nthread + thread;
2141 for (int j = i + 2; j < i + inc; j += 3)
2143 /* Do a range check to avoid a harmless race on taskIndex */
2144 if (iat[j] < tData->rangeStart ||
2145 iat[j] >= tData->rangeEnd ||
2146 taskIndex[iat[j]] != thread)
2148 GMX_ASSERT(ptype[iat[j]] != eptVSite, "A vsite to be assigned in assignVsitesToThread has a vsite as a constructing atom that does not belong to our task, such vsites should be assigned to the single 'master' task");
2150 task = nthread + thread;
2155 /* Update this vsite's thread index entry */
2156 taskIndex[iat[1+i]] = task;
2158 if (task == thread || task == nthread + thread)
2160 /* Copy this vsite to the thread data struct of thread */
2164 il_task = &tData->ilist[ftype];
2168 il_task = &tData->idTask.ilist[ftype];
2170 /* Ensure we have sufficient memory allocated */
2171 if (il_task->nr + inc > il_task->nalloc)
2173 il_task->nalloc = over_alloc_large(il_task->nr + inc);
2174 srenew(il_task->iatoms, il_task->nalloc);
2176 /* Copy the vsite data to the thread-task local array */
2177 for (int j = i; j < i + inc; j++)
2179 il_task->iatoms[il_task->nr++] = iat[j];
2181 if (task == nthread + thread)
2183 /* This vsite write outside our own task force block.
2184 * Put it into the interdependent task list and flag
2185 * the atoms involved for reduction.
2187 tData->idTask.vsite.push_back(iat[i + 1]);
2188 if (ftype != F_VSITEN)
2190 for (int j = i + 2; j < i + nral1; j++)
2192 flagAtom(&tData->idTask, iat[j],
2193 thread, nthread, natperthread);
2198 for (int j = i + 2; j < i + inc; j += 3)
2200 flagAtom(&tData->idTask, iat[j],
2201 thread, nthread, natperthread);
2212 /*! \brief Assign all vsites with taskIndex[]==task to task tData */
2213 static void assignVsitesToSingleTask(VsiteThread *tData,
2215 gmx::ArrayRef<const int> taskIndex,
2216 const t_ilist *ilist,
2217 const t_iparams *ip)
2219 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2221 tData->ilist[ftype].nr = 0;
2222 tData->idTask.ilist[ftype].nr = 0;
2224 int nral1 = 1 + NRAL(ftype);
2226 t_iatom *iat = ilist[ftype].iatoms;
2227 t_ilist *il_task = &tData->ilist[ftype];
2229 for (int i = 0; i < ilist[ftype].nr; )
2231 if (ftype == F_VSITEN)
2233 /* The 3 below is from 1+NRAL(ftype)=3 */
2234 inc = ip[iat[i]].vsiten.n*3;
2236 /* Check if the vsite is assigned to our task */
2237 if (taskIndex[iat[1 + i]] == task)
2239 /* Ensure we have sufficient memory allocated */
2240 if (il_task->nr + inc > il_task->nalloc)
2242 il_task->nalloc = over_alloc_large(il_task->nr + inc);
2243 srenew(il_task->iatoms, il_task->nalloc);
2245 /* Copy the vsite data to the thread-task local array */
2246 for (int j = i; j < i + inc; j++)
2248 il_task->iatoms[il_task->nr++] = iat[j];
2257 void split_vsites_over_threads(const t_ilist *ilist,
2258 const t_iparams *ip,
2259 const t_mdatoms *mdatoms,
2262 int vsite_atom_range, natperthread;
2264 if (vsite->nthreads == 1)
2270 /* The current way of distributing the vsites over threads in primitive.
2271 * We divide the atom range 0 - natoms_in_vsite uniformly over threads,
2272 * without taking into account how the vsites are distributed.
2273 * Without domain decomposition we at least tighten the upper bound
2274 * of the range (useful for common systems such as a vsite-protein
2276 * With domain decomposition, as long as the vsites are distributed
2277 * uniformly in each domain along the major dimension, usually x,
2278 * it will also perform well.
2280 if (!vsite->useDomdec)
2282 vsite_atom_range = -1;
2283 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2286 if (ftype != F_VSITEN)
2288 int nral1 = 1 + NRAL(ftype);
2289 const t_iatom *iat = ilist[ftype].iatoms;
2290 for (int i = 0; i < ilist[ftype].nr; i += nral1)
2292 for (int j = i + 1; j < i + nral1; j++)
2294 vsite_atom_range = std::max(vsite_atom_range, iat[j]);
2302 const t_iatom *iat = ilist[ftype].iatoms;
2305 while (i < ilist[ftype].nr)
2307 /* The 3 below is from 1+NRAL(ftype)=3 */
2308 vs_ind_end = i + ip[iat[i]].vsiten.n*3;
2310 vsite_atom_range = std::max(vsite_atom_range, iat[i+1]);
2311 while (i < vs_ind_end)
2313 vsite_atom_range = std::max(vsite_atom_range, iat[i+2]);
2321 natperthread = (vsite_atom_range + vsite->nthreads - 1)/vsite->nthreads;
2325 /* Any local or not local atom could be involved in virtual sites.
2326 * But since we usually have very few non-local virtual sites
2327 * (only non-local vsites that depend on local vsites),
2328 * we distribute the local atom range equally over the threads.
2329 * When assigning vsites to threads, we should take care that the last
2330 * threads also covers the non-local range.
2332 vsite_atom_range = mdatoms->nr;
2333 natperthread = (mdatoms->homenr + vsite->nthreads - 1)/vsite->nthreads;
2338 fprintf(debug, "virtual site thread dist: natoms %d, range %d, natperthread %d\n", mdatoms->nr, vsite_atom_range, natperthread);
2341 /* To simplify the vsite assignment, we make an index which tells us
2342 * to which task particles, both non-vsites and vsites, are assigned.
2344 vsite->taskIndex.resize(mdatoms->nr);
2346 /* Initialize the task index array. Here we assign the non-vsite
2347 * particles to task=thread, so we easily figure out if vsites
2348 * depend on local and/or non-local particles in assignVsitesToThread.
2350 gmx::ArrayRef<int> taskIndex = vsite->taskIndex;
2353 for (int i = 0; i < mdatoms->nr; i++)
2355 if (mdatoms->ptype[i] == eptVSite)
2357 /* vsites are not assigned to a task yet */
2362 /* assign non-vsite particles to task thread */
2363 taskIndex[i] = thread;
2365 if (i == (thread + 1)*natperthread && thread < vsite->nthreads)
2372 #pragma omp parallel num_threads(vsite->nthreads)
2376 int thread = gmx_omp_get_thread_num();
2377 VsiteThread &tData = *vsite->tData[thread];
2379 /* Clear the buffer use flags that were set before */
2380 if (tData.useInterdependentTask)
2382 InterdependentTask &idTask = tData.idTask;
2384 /* To avoid an extra OpenMP barrier in spread_vsite_f,
2385 * we clear the force buffer at the next step,
2386 * so we need to do it here as well.
2388 clearTaskForceBufferUsedElements(&idTask);
2390 idTask.vsite.resize(0);
2391 for (int t = 0; t < vsite->nthreads; t++)
2393 AtomIndex &atomIndex = idTask.atomIndex[t];
2394 int natom = atomIndex.atom.size();
2395 for (int i = 0; i < natom; i++)
2397 idTask.use[atomIndex.atom[i]] = false;
2399 atomIndex.atom.resize(0);
2404 /* To avoid large f_buf allocations of #threads*vsite_atom_range
2405 * we don't use task2 with more than 200000 atoms. This doesn't
2406 * affect performance, since with such a large range relatively few
2407 * vsites will end up in the separate task.
2408 * Note that useTask2 should be the same for all threads.
2410 tData.useInterdependentTask = (vsite_atom_range <= 200000);
2411 if (tData.useInterdependentTask)
2413 size_t natoms_use_in_vsites = vsite_atom_range;
2414 InterdependentTask &idTask = tData.idTask;
2415 /* To avoid resizing and re-clearing every nstlist steps,
2416 * we never down size the force buffer.
2418 if (natoms_use_in_vsites > idTask.force.size() ||
2419 natoms_use_in_vsites > idTask.use.size())
2421 idTask.force.resize(natoms_use_in_vsites, { 0, 0, 0 });
2422 idTask.use.resize(natoms_use_in_vsites, false);
2426 /* Assign all vsites that can execute independently on threads */
2427 tData.rangeStart = thread *natperthread;
2428 if (thread < vsite->nthreads - 1)
2430 tData.rangeEnd = (thread + 1)*natperthread;
2434 /* The last thread should cover up to the end of the range */
2435 tData.rangeEnd = mdatoms->nr;
2437 assignVsitesToThread(&tData,
2438 thread, vsite->nthreads,
2441 ilist, ip, mdatoms->ptype);
2443 if (tData.useInterdependentTask)
2445 /* In the worst case, all tasks write to force ranges of
2446 * all other tasks, leading to #tasks^2 scaling (this is only
2447 * the overhead, the actual flops remain constant).
2448 * But in most cases there is far less coupling. To improve
2449 * scaling at high thread counts we therefore construct
2450 * an index to only loop over the actually affected tasks.
2452 InterdependentTask &idTask = tData.idTask;
2454 /* Ensure assignVsitesToThread finished on other threads */
2457 idTask.spreadTask.resize(0);
2458 idTask.reduceTask.resize(0);
2459 for (int t = 0; t < vsite->nthreads; t++)
2461 /* Do we write to the force buffer of task t? */
2462 if (!idTask.atomIndex[t].atom.empty())
2464 idTask.spreadTask.push_back(t);
2466 /* Does task t write to our force buffer? */
2467 if (!vsite->tData[t]->idTask.atomIndex[thread].atom.empty())
2469 idTask.reduceTask.push_back(t);
2474 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2476 /* Assign all remaining vsites, that will have taskIndex[]=2*vsite->nthreads,
2477 * to a single task that will not run in parallel with other tasks.
2479 assignVsitesToSingleTask(vsite->tData[vsite->nthreads].get(),
2484 if (debug && vsite->nthreads > 1)
2486 fprintf(debug, "virtual site useInterdependentTask %d, nuse:\n",
2487 static_cast<int>(vsite->tData[0]->useInterdependentTask));
2488 for (int th = 0; th < vsite->nthreads + 1; th++)
2490 fprintf(debug, " %4d", vsite->tData[th]->idTask.nuse);
2492 fprintf(debug, "\n");
2494 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2496 if (ilist[ftype].nr > 0)
2498 fprintf(debug, "%-20s thread dist:",
2499 interaction_function[ftype].longname);
2500 for (int th = 0; th < vsite->nthreads + 1; th++)
2502 fprintf(debug, " %4d %4d ",
2503 vsite->tData[th]->ilist[ftype].nr,
2504 vsite->tData[th]->idTask.ilist[ftype].nr);
2506 fprintf(debug, "\n");
2512 int nrOrig = vsiteIlistNrCount(ilist);
2514 for (int th = 0; th < vsite->nthreads + 1; th++)
2517 vsiteIlistNrCount(vsite->tData[th]->ilist) +
2518 vsiteIlistNrCount(vsite->tData[th]->idTask.ilist);
2520 GMX_ASSERT(nrThreaded == nrOrig, "The number of virtual sites assigned to all thread task has to match the total number of virtual sites");
2524 void set_vsite_top(gmx_vsite_t *vsite,
2525 const gmx_localtop_t *top,
2526 const t_mdatoms *md)
2528 if (vsite->nthreads > 1)
2530 split_vsites_over_threads(top->idef.il, top->idef.iparams,