2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
47 #include "gromacs/domdec/domdec.h"
48 #include "gromacs/domdec/domdec_struct.h"
49 #include "gromacs/gmxlib/network.h"
50 #include "gromacs/gmxlib/nrnb.h"
51 #include "gromacs/math/functions.h"
52 #include "gromacs/math/vec.h"
53 #include "gromacs/mdlib/gmx_omp_nthreads.h"
54 #include "gromacs/mdtypes/commrec.h"
55 #include "gromacs/mdtypes/mdatom.h"
56 #include "gromacs/pbcutil/ishift.h"
57 #include "gromacs/pbcutil/mshift.h"
58 #include "gromacs/pbcutil/pbc.h"
59 #include "gromacs/timing/wallcycle.h"
60 #include "gromacs/topology/ifunc.h"
61 #include "gromacs/topology/mtop_util.h"
62 #include "gromacs/topology/topology.h"
63 #include "gromacs/utility/exceptions.h"
64 #include "gromacs/utility/fatalerror.h"
65 #include "gromacs/utility/gmxassert.h"
66 #include "gromacs/utility/gmxomp.h"
68 /* The strategy used here for assigning virtual sites to (thread-)tasks
71 * We divide the atom range that vsites operate on (natoms_local with DD,
72 * 0 - last atom involved in vsites without DD) equally over all threads.
74 * Vsites in the local range constructed from atoms in the local range
75 * and/or other vsites that are fully local are assigned to a simple,
78 * Vsites that are not assigned after using the above criterion get assigned
79 * to a so called "interdependent" thread task when none of the constructing
80 * atoms is a vsite. These tasks are called interdependent, because one task
81 * accesses atoms assigned to a different task/thread.
82 * Note that this option is turned off with large (local) atom counts
83 * to avoid high memory usage.
85 * Any remaining vsites are assigned to a separate master thread task.
90 static void init_ilist(t_ilist* ilist)
92 for (int i = 0; i < F_NRE; i++)
96 ilist[i].iatoms = nullptr;
100 /*! \brief List of atom indices belonging to a task */
103 //! List of atom indices
104 std::vector<int> atom;
107 /*! \brief Data structure for thread tasks that use constructing atoms outside their own atom range */
108 struct InterdependentTask
110 //! The interaction lists, only vsite entries are used
111 t_ilist ilist[F_NRE];
112 //! Thread/task-local force buffer
113 std::vector<RVec> force;
114 //! The atom indices of the vsites of our task
115 std::vector<int> vsite;
116 //! Flags if elements in force are spread to or not
117 std::vector<bool> use;
118 //! The number of entries set to true in use
120 //! Array of atoms indices, size nthreads, covering all nuse set elements in use
121 std::vector<AtomIndex> atomIndex;
122 //! List of tasks (force blocks) this task spread forces to
123 std::vector<int> spreadTask;
124 //! List of tasks that write to this tasks force block range
125 std::vector<int> reduceTask;
134 /*! \brief Vsite thread task data structure */
137 //! Start of atom range of this task
139 //! End of atom range of this task
141 //! The interaction lists, only vsite entries are used
142 t_ilist ilist[F_NRE];
143 //! Local fshift accumulation buffer
145 //! Local virial dx*df accumulation buffer
147 //! Tells if interdependent task idTask should be used (in addition to the rest of this task), this bool has the same value on all threads
148 bool useInterdependentTask;
149 //! Data for vsites that involve constructing atoms in the atom range of other threads/tasks
150 InterdependentTask idTask;
152 /*! \brief Constructor */
158 clear_rvecs(SHIFTS, fshift);
160 useInterdependentTask = false;
164 /*! \brief Returns the sum of the vsite ilist sizes over all vsite types
166 * \param[in] ilist The interaction list
169 static int vsiteIlistNrCount(const T* ilist)
172 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
174 nr += ilist[ftype].size();
180 static int pbc_rvec_sub(const t_pbc* pbc, const rvec xi, const rvec xj, rvec dx)
184 return pbc_dx_aiuc(pbc, xi, xj, dx);
188 rvec_sub(xi, xj, dx);
193 static inline real inverseNorm(const rvec x)
195 return gmx::invsqrt(iprod(x, x));
198 /* Vsite construction routines */
200 static void constr_vsite2(const rvec xi, const rvec xj, rvec x, real a, const t_pbc* pbc)
208 pbc_dx_aiuc(pbc, xj, xi, dx);
209 x[XX] = xi[XX] + a * dx[XX];
210 x[YY] = xi[YY] + a * dx[YY];
211 x[ZZ] = xi[ZZ] + a * dx[ZZ];
215 x[XX] = b * xi[XX] + a * xj[XX];
216 x[YY] = b * xi[YY] + a * xj[YY];
217 x[ZZ] = b * xi[ZZ] + a * xj[ZZ];
221 /* TOTAL: 10 flops */
224 static void constr_vsite2FD(const rvec xi, const rvec xj, rvec x, real a, const t_pbc* pbc)
227 pbc_rvec_sub(pbc, xj, xi, xij);
230 const real b = a * inverseNorm(xij);
233 x[XX] = xi[XX] + b * xij[XX];
234 x[YY] = xi[YY] + b * xij[YY];
235 x[ZZ] = xi[ZZ] + b * xij[ZZ];
238 /* TOTAL: 25 flops */
241 static void constr_vsite3(const rvec xi, const rvec xj, const rvec xk, rvec x, real a, real b, const t_pbc* pbc)
250 pbc_dx_aiuc(pbc, xj, xi, dxj);
251 pbc_dx_aiuc(pbc, xk, xi, dxk);
252 x[XX] = xi[XX] + a * dxj[XX] + b * dxk[XX];
253 x[YY] = xi[YY] + a * dxj[YY] + b * dxk[YY];
254 x[ZZ] = xi[ZZ] + a * dxj[ZZ] + b * dxk[ZZ];
258 x[XX] = c * xi[XX] + a * xj[XX] + b * xk[XX];
259 x[YY] = c * xi[YY] + a * xj[YY] + b * xk[YY];
260 x[ZZ] = c * xi[ZZ] + a * xj[ZZ] + b * xk[ZZ];
264 /* TOTAL: 17 flops */
267 static void constr_vsite3FD(const rvec xi, const rvec xj, const rvec xk, rvec x, real a, real b, const t_pbc* pbc)
272 pbc_rvec_sub(pbc, xj, xi, xij);
273 pbc_rvec_sub(pbc, xk, xj, xjk);
276 /* temp goes from i to a point on the line jk */
277 temp[XX] = xij[XX] + a * xjk[XX];
278 temp[YY] = xij[YY] + a * xjk[YY];
279 temp[ZZ] = xij[ZZ] + a * xjk[ZZ];
282 c = b * inverseNorm(temp);
285 x[XX] = xi[XX] + c * temp[XX];
286 x[YY] = xi[YY] + c * temp[YY];
287 x[ZZ] = xi[ZZ] + c * temp[ZZ];
290 /* TOTAL: 34 flops */
293 static void constr_vsite3FAD(const rvec xi, const rvec xj, const rvec xk, rvec x, real a, real b, const t_pbc* pbc)
296 real a1, b1, c1, invdij;
298 pbc_rvec_sub(pbc, xj, xi, xij);
299 pbc_rvec_sub(pbc, xk, xj, xjk);
302 invdij = inverseNorm(xij);
303 c1 = invdij * invdij * iprod(xij, xjk);
304 xp[XX] = xjk[XX] - c1 * xij[XX];
305 xp[YY] = xjk[YY] - c1 * xij[YY];
306 xp[ZZ] = xjk[ZZ] - c1 * xij[ZZ];
308 b1 = b * inverseNorm(xp);
311 x[XX] = xi[XX] + a1 * xij[XX] + b1 * xp[XX];
312 x[YY] = xi[YY] + a1 * xij[YY] + b1 * xp[YY];
313 x[ZZ] = xi[ZZ] + a1 * xij[ZZ] + b1 * xp[ZZ];
316 /* TOTAL: 63 flops */
320 constr_vsite3OUT(const rvec xi, const rvec xj, const rvec xk, rvec x, real a, real b, real c, const t_pbc* pbc)
324 pbc_rvec_sub(pbc, xj, xi, xij);
325 pbc_rvec_sub(pbc, xk, xi, xik);
326 cprod(xij, xik, temp);
329 x[XX] = xi[XX] + a * xij[XX] + b * xik[XX] + c * temp[XX];
330 x[YY] = xi[YY] + a * xij[YY] + b * xik[YY] + c * temp[YY];
331 x[ZZ] = xi[ZZ] + a * xij[ZZ] + b * xik[ZZ] + c * temp[ZZ];
334 /* TOTAL: 33 flops */
337 static void constr_vsite4FD(const rvec xi,
347 rvec xij, xjk, xjl, temp;
350 pbc_rvec_sub(pbc, xj, xi, xij);
351 pbc_rvec_sub(pbc, xk, xj, xjk);
352 pbc_rvec_sub(pbc, xl, xj, xjl);
355 /* temp goes from i to a point on the plane jkl */
356 temp[XX] = xij[XX] + a * xjk[XX] + b * xjl[XX];
357 temp[YY] = xij[YY] + a * xjk[YY] + b * xjl[YY];
358 temp[ZZ] = xij[ZZ] + a * xjk[ZZ] + b * xjl[ZZ];
361 d = c * inverseNorm(temp);
364 x[XX] = xi[XX] + d * temp[XX];
365 x[YY] = xi[YY] + d * temp[YY];
366 x[ZZ] = xi[ZZ] + d * temp[ZZ];
369 /* TOTAL: 43 flops */
372 static void constr_vsite4FDN(const rvec xi,
382 rvec xij, xik, xil, ra, rb, rja, rjb, rm;
385 pbc_rvec_sub(pbc, xj, xi, xij);
386 pbc_rvec_sub(pbc, xk, xi, xik);
387 pbc_rvec_sub(pbc, xl, xi, xil);
390 ra[XX] = a * xik[XX];
391 ra[YY] = a * xik[YY];
392 ra[ZZ] = a * xik[ZZ];
394 rb[XX] = b * xil[XX];
395 rb[YY] = b * xil[YY];
396 rb[ZZ] = b * xil[ZZ];
400 rvec_sub(ra, xij, rja);
401 rvec_sub(rb, xij, rjb);
407 d = c * inverseNorm(rm);
410 x[XX] = xi[XX] + d * rm[XX];
411 x[YY] = xi[YY] + d * rm[YY];
412 x[ZZ] = xi[ZZ] + d * rm[ZZ];
415 /* TOTAL: 47 flops */
419 static int constr_vsiten(const t_iatom* ia, const t_iparams ip[], rvec* x, const t_pbc* pbc)
426 n3 = 3 * ip[ia[0]].vsiten.n;
429 copy_rvec(x[ai], x1);
431 for (int i = 3; i < n3; i += 3)
434 a = ip[ia[i]].vsiten.a;
437 pbc_dx_aiuc(pbc, x[ai], x1, dx);
441 rvec_sub(x[ai], x1, dx);
443 dsum[XX] += a * dx[XX];
444 dsum[YY] += a * dx[YY];
445 dsum[ZZ] += a * dx[ZZ];
449 x[av][XX] = x1[XX] + dsum[XX];
450 x[av][YY] = x1[YY] + dsum[YY];
451 x[av][ZZ] = x1[ZZ] + dsum[ZZ];
456 /*! \brief PBC modes for vsite construction and spreading */
459 all, // Apply normal, simple PBC for all vsites
460 none // No PBC treatment needed
463 /*! \brief Returns the PBC mode based on the system PBC and vsite properties
465 * \param[in] pbcPtr A pointer to a PBC struct or nullptr when no PBC treatment is required
467 static PbcMode getPbcMode(const t_pbc* pbcPtr)
469 if (pbcPtr == nullptr)
471 return PbcMode::none;
479 static void construct_vsites_thread(rvec x[],
482 const t_iparams ip[],
483 const t_ilist ilist[],
484 const t_pbc* pbc_null)
496 const PbcMode pbcMode = getPbcMode(pbc_null);
497 /* We need another pbc pointer, as with charge groups we switch per vsite */
498 const t_pbc* pbc_null2 = pbc_null;
500 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
502 if (ilist[ftype].nr == 0)
508 int nra = interaction_function[ftype].nratoms;
510 int nr = ilist[ftype].nr;
512 const t_iatom* ia = ilist[ftype].iatoms;
514 for (int i = 0; i < nr;)
517 /* The vsite and constructing atoms */
520 /* Constants for constructing vsites */
521 real a1 = ip[tp].vsite.a;
522 /* Copy the old position */
524 copy_rvec(x[avsite], xv);
526 /* Construct the vsite depending on type */
533 constr_vsite2(x[ai], x[aj], x[avsite], a1, pbc_null2);
537 constr_vsite2FD(x[ai], x[aj], x[avsite], a1, pbc_null2);
543 constr_vsite3(x[ai], x[aj], x[ak], x[avsite], a1, b1, pbc_null2);
549 constr_vsite3FD(x[ai], x[aj], x[ak], x[avsite], a1, b1, pbc_null2);
555 constr_vsite3FAD(x[ai], x[aj], x[ak], x[avsite], a1, b1, pbc_null2);
562 constr_vsite3OUT(x[ai], x[aj], x[ak], x[avsite], a1, b1, c1, pbc_null2);
570 constr_vsite4FD(x[ai], x[aj], x[ak], x[al], x[avsite], a1, b1, c1, pbc_null2);
578 constr_vsite4FDN(x[ai], x[aj], x[ak], x[al], x[avsite], a1, b1, c1, pbc_null2);
580 case F_VSITEN: inc = constr_vsiten(ia, ip, x, pbc_null2); break;
582 gmx_fatal(FARGS, "No such vsite type %d in %s, line %d", ftype, __FILE__, __LINE__);
585 if (pbcMode == PbcMode::all)
587 /* Keep the vsite in the same periodic image as before */
589 int ishift = pbc_dx_aiuc(pbc_null, x[avsite], xv, dx);
590 if (ishift != CENTRAL)
592 rvec_add(xv, dx, x[avsite]);
597 /* Calculate velocity of vsite... */
599 rvec_sub(x[avsite], xv, vv);
600 svmul(inv_dt, vv, v[avsite]);
603 /* Increment loop variables */
611 void construct_vsites(const gmx_vsite_t* vsite,
615 const t_iparams ip[],
616 const t_ilist ilist[],
622 const bool useDomdec = (vsite != nullptr && vsite->useDomdec);
623 GMX_ASSERT(!useDomdec || (cr != nullptr && DOMAINDECOMP(cr)),
624 "When vsites are set up with domain decomposition, we need a valid commrec");
625 // TODO: Remove this assertion when we remove charge groups
626 GMX_ASSERT(vsite != nullptr || ePBC == epbcNONE,
627 "Without a vsite struct we can not do PBC (in case we have charge groups)");
629 t_pbc pbc, *pbc_null;
631 /* We only need to do pbc when we have inter-cg vsites.
632 * Note that with domain decomposition we do not need to apply PBC here
633 * when we have at least 3 domains along each dimension. Currently we
634 * do not optimize this case.
636 if (ePBC != epbcNONE && (useDomdec || bMolPBC)
637 && !(vsite != nullptr && vsite->numInterUpdategroupVsites == 0))
639 /* This is wasting some CPU time as we now do this multiple times
643 clear_ivec(null_ivec);
644 pbc_null = set_pbc_dd(&pbc, ePBC, useDomdec ? cr->dd->numCells : null_ivec, FALSE, box);
653 dd_move_x_vsites(cr->dd, box, x);
656 if (vsite == nullptr || vsite->nthreads == 1)
658 construct_vsites_thread(x, dt, v, ip, ilist, pbc_null);
662 #pragma omp parallel num_threads(vsite->nthreads)
666 const int th = gmx_omp_get_thread_num();
667 const VsiteThread& tData = *vsite->tData[th];
668 GMX_ASSERT(tData.rangeStart >= 0,
669 "The thread data should be initialized before calling construct_vsites");
671 construct_vsites_thread(x, dt, v, ip, tData.ilist, pbc_null);
672 if (tData.useInterdependentTask)
674 /* Here we don't need a barrier (unlike the spreading),
675 * since both tasks only construct vsites from particles,
676 * or local vsites, not from non-local vsites.
678 construct_vsites_thread(x, dt, v, ip, tData.idTask.ilist, pbc_null);
681 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
683 /* Now we can construct the vsites that might depend on other vsites */
684 construct_vsites_thread(x, dt, v, ip, vsite->tData[vsite->nthreads]->ilist, pbc_null);
688 static void spread_vsite2(const t_iatom ia[],
705 svmul(1 - a, f[av], fi);
715 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, av), di);
717 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), di);
722 siv = pbc_dx_aiuc(pbc, x[ai], x[av], dx);
723 sij = pbc_dx_aiuc(pbc, x[ai], x[aj], dx);
731 if (fshift && (siv != CENTRAL || sij != CENTRAL))
733 rvec_inc(fshift[siv], f[av]);
734 rvec_dec(fshift[CENTRAL], fi);
735 rvec_dec(fshift[sij], fj);
738 /* TOTAL: 13 flops */
741 void constructVsitesGlobal(const gmx_mtop_t& mtop, gmx::ArrayRef<gmx::RVec> x)
743 GMX_ASSERT(x.ssize() >= mtop.natoms, "x should contain the whole system");
744 GMX_ASSERT(!mtop.moleculeBlockIndices.empty(),
745 "molblock indices are needed in constructVsitesGlobal");
747 for (size_t mb = 0; mb < mtop.molblock.size(); mb++)
749 const gmx_molblock_t& molb = mtop.molblock[mb];
750 const gmx_moltype_t& molt = mtop.moltype[molb.type];
751 if (vsiteIlistNrCount(molt.ilist.data()) > 0)
753 int atomOffset = mtop.moleculeBlockIndices[mb].globalAtomStart;
754 for (int mol = 0; mol < molb.nmol; mol++)
756 t_ilist ilist[F_NRE];
757 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
759 ilist[ftype].nr = molt.ilist[ftype].size();
760 ilist[ftype].iatoms = const_cast<t_iatom*>(molt.ilist[ftype].iatoms.data());
763 construct_vsites(nullptr, as_rvec_array(x.data()) + atomOffset, 0.0, nullptr,
764 mtop.ffparams.iparams.data(), ilist, epbcNONE, TRUE, nullptr, nullptr);
765 atomOffset += molt.atoms.nr;
771 static void spread_vsite2FD(const t_iatom ia[],
781 const int av = ia[1];
782 const int ai = ia[2];
783 const int aj = ia[3];
785 copy_rvec(f[av], fv);
788 int sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
791 const real invDistance = inverseNorm(xij);
792 const real b = a * invDistance;
795 const real fproj = iprod(xij, fv) * invDistance * invDistance;
798 fj[XX] = b * (fv[XX] - fproj * xij[XX]);
799 fj[YY] = b * (fv[YY] - fproj * xij[YY]);
800 fj[ZZ] = b * (fv[ZZ] - fproj * xij[ZZ]);
803 /* b is already calculated in constr_vsite2FD
804 storing b somewhere will save flops. */
806 f[ai][XX] += fv[XX] - fj[XX];
807 f[ai][YY] += fv[YY] - fj[YY];
808 f[ai][ZZ] += fv[ZZ] - fj[ZZ];
820 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
822 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
828 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
835 if (svi != CENTRAL || sji != CENTRAL)
837 rvec_dec(fshift[svi], fv);
838 fshift[CENTRAL][XX] += fv[XX] - fj[XX];
839 fshift[CENTRAL][YY] += fv[YY] - fj[YY];
840 fshift[CENTRAL][ZZ] += fv[ZZ] - fj[ZZ];
841 fshift[sji][XX] += fj[XX];
842 fshift[sji][YY] += fj[YY];
843 fshift[sji][ZZ] += fj[ZZ];
849 /* When VirCorr=TRUE, the virial for the current forces is not
850 * calculated from the redistributed forces. This means that
851 * the effect of non-linear virtual site constructions on the virial
852 * needs to be added separately. This contribution can be calculated
853 * in many ways, but the simplest and cheapest way is to use
854 * the first constructing atom ai as a reference position in space:
855 * subtract (xv-xi)*fv and add (xj-xi)*fj.
859 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
861 for (int i = 0; i < DIM; i++)
863 for (int j = 0; j < DIM; j++)
865 /* As xix is a linear combination of j and k, use that here */
866 dxdf[i][j] += -xiv[i] * fv[j] + xij[i] * fj[j];
871 /* TOTAL: 38 flops */
874 static void spread_vsite3(const t_iatom ia[],
893 svmul(1 - a - b, f[av], fi);
905 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, ia[1]), di);
907 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), di);
909 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, ak), di);
914 siv = pbc_dx_aiuc(pbc, x[ai], x[av], dx);
915 sij = pbc_dx_aiuc(pbc, x[ai], x[aj], dx);
916 sik = pbc_dx_aiuc(pbc, x[ai], x[ak], dx);
925 if (fshift && (siv != CENTRAL || sij != CENTRAL || sik != CENTRAL))
927 rvec_inc(fshift[siv], f[av]);
928 rvec_dec(fshift[CENTRAL], fi);
929 rvec_dec(fshift[sij], fj);
930 rvec_dec(fshift[sik], fk);
933 /* TOTAL: 20 flops */
936 static void spread_vsite3FD(const t_iatom ia[],
948 rvec xvi, xij, xjk, xix, fv, temp;
949 t_iatom av, ai, aj, ak;
957 copy_rvec(f[av], fv);
959 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
960 skj = pbc_rvec_sub(pbc, x[ak], x[aj], xjk);
963 /* xix goes from i to point x on the line jk */
964 xix[XX] = xij[XX] + a * xjk[XX];
965 xix[YY] = xij[YY] + a * xjk[YY];
966 xix[ZZ] = xij[ZZ] + a * xjk[ZZ];
969 const real invDistance = inverseNorm(xix);
970 const real c = b * invDistance;
973 fproj = iprod(xix, fv) * invDistance * invDistance; /* = (xix . f)/(xix . xix) */
975 temp[XX] = c * (fv[XX] - fproj * xix[XX]);
976 temp[YY] = c * (fv[YY] - fproj * xix[YY]);
977 temp[ZZ] = c * (fv[ZZ] - fproj * xix[ZZ]);
980 /* c is already calculated in constr_vsite3FD
981 storing c somewhere will save 26 flops! */
984 f[ai][XX] += fv[XX] - temp[XX];
985 f[ai][YY] += fv[YY] - temp[YY];
986 f[ai][ZZ] += fv[ZZ] - temp[ZZ];
987 f[aj][XX] += a1 * temp[XX];
988 f[aj][YY] += a1 * temp[YY];
989 f[aj][ZZ] += a1 * temp[ZZ];
990 f[ak][XX] += a * temp[XX];
991 f[ak][YY] += a * temp[YY];
992 f[ak][ZZ] += a * temp[ZZ];
997 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
999 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
1001 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, aj), di);
1006 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1013 if (fshift && (svi != CENTRAL || sji != CENTRAL || skj != CENTRAL))
1015 rvec_dec(fshift[svi], fv);
1016 fshift[CENTRAL][XX] += fv[XX] - (1 + a) * temp[XX];
1017 fshift[CENTRAL][YY] += fv[YY] - (1 + a) * temp[YY];
1018 fshift[CENTRAL][ZZ] += fv[ZZ] - (1 + a) * temp[ZZ];
1019 fshift[sji][XX] += temp[XX];
1020 fshift[sji][YY] += temp[YY];
1021 fshift[sji][ZZ] += temp[ZZ];
1022 fshift[skj][XX] += a * temp[XX];
1023 fshift[skj][YY] += a * temp[YY];
1024 fshift[skj][ZZ] += a * temp[ZZ];
1029 /* When VirCorr=TRUE, the virial for the current forces is not
1030 * calculated from the redistributed forces. This means that
1031 * the effect of non-linear virtual site constructions on the virial
1032 * needs to be added separately. This contribution can be calculated
1033 * in many ways, but the simplest and cheapest way is to use
1034 * the first constructing atom ai as a reference position in space:
1035 * subtract (xv-xi)*fv and add (xj-xi)*fj + (xk-xi)*fk.
1039 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1041 for (int i = 0; i < DIM; i++)
1043 for (int j = 0; j < DIM; j++)
1045 /* As xix is a linear combination of j and k, use that here */
1046 dxdf[i][j] += -xiv[i] * fv[j] + xix[i] * temp[j];
1051 /* TOTAL: 61 flops */
1054 static void spread_vsite3FAD(const t_iatom ia[],
1065 rvec xvi, xij, xjk, xperp, Fpij, Fppp, fv, f1, f2, f3;
1066 real a1, b1, c1, c2, invdij, invdij2, invdp, fproj;
1067 t_iatom av, ai, aj, ak;
1068 int svi, sji, skj, d;
1075 copy_rvec(f[ia[1]], fv);
1077 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1078 skj = pbc_rvec_sub(pbc, x[ak], x[aj], xjk);
1081 invdij = inverseNorm(xij);
1082 invdij2 = invdij * invdij;
1083 c1 = iprod(xij, xjk) * invdij2;
1084 xperp[XX] = xjk[XX] - c1 * xij[XX];
1085 xperp[YY] = xjk[YY] - c1 * xij[YY];
1086 xperp[ZZ] = xjk[ZZ] - c1 * xij[ZZ];
1087 /* xperp in plane ijk, perp. to ij */
1088 invdp = inverseNorm(xperp);
1093 /* a1, b1 and c1 are already calculated in constr_vsite3FAD
1094 storing them somewhere will save 45 flops! */
1096 fproj = iprod(xij, fv) * invdij2;
1097 svmul(fproj, xij, Fpij); /* proj. f on xij */
1098 svmul(iprod(xperp, fv) * invdp * invdp, xperp, Fppp); /* proj. f on xperp */
1099 svmul(b1 * fproj, xperp, f3);
1102 rvec_sub(fv, Fpij, f1); /* f1 = f - Fpij */
1103 rvec_sub(f1, Fppp, f2); /* f2 = f - Fpij - Fppp */
1104 for (d = 0; (d < DIM); d++)
1112 f[ai][XX] += fv[XX] - f1[XX] + c1 * f2[XX] + f3[XX];
1113 f[ai][YY] += fv[YY] - f1[YY] + c1 * f2[YY] + f3[YY];
1114 f[ai][ZZ] += fv[ZZ] - f1[ZZ] + c1 * f2[ZZ] + f3[ZZ];
1115 f[aj][XX] += f1[XX] - c2 * f2[XX] - f3[XX];
1116 f[aj][YY] += f1[YY] - c2 * f2[YY] - f3[YY];
1117 f[aj][ZZ] += f1[ZZ] - c2 * f2[ZZ] - f3[ZZ];
1118 f[ak][XX] += f2[XX];
1119 f[ak][YY] += f2[YY];
1120 f[ak][ZZ] += f2[ZZ];
1125 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
1127 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
1129 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, aj), di);
1134 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1141 if (fshift && (svi != CENTRAL || sji != CENTRAL || skj != CENTRAL))
1143 rvec_dec(fshift[svi], fv);
1144 fshift[CENTRAL][XX] += fv[XX] - f1[XX] - (1 - c1) * f2[XX] + f3[XX];
1145 fshift[CENTRAL][YY] += fv[YY] - f1[YY] - (1 - c1) * f2[YY] + f3[YY];
1146 fshift[CENTRAL][ZZ] += fv[ZZ] - f1[ZZ] - (1 - c1) * f2[ZZ] + f3[ZZ];
1147 fshift[sji][XX] += f1[XX] - c1 * f2[XX] - f3[XX];
1148 fshift[sji][YY] += f1[YY] - c1 * f2[YY] - f3[YY];
1149 fshift[sji][ZZ] += f1[ZZ] - c1 * f2[ZZ] - f3[ZZ];
1150 fshift[skj][XX] += f2[XX];
1151 fshift[skj][YY] += f2[YY];
1152 fshift[skj][ZZ] += f2[ZZ];
1160 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1162 for (i = 0; i < DIM; i++)
1164 for (j = 0; j < DIM; j++)
1166 /* Note that xik=xij+xjk, so we have to add xij*f2 */
1167 dxdf[i][j] += -xiv[i] * fv[j] + xij[i] * (f1[j] + (1 - c2) * f2[j] - f3[j])
1173 /* TOTAL: 113 flops */
1176 static void spread_vsite3OUT(const t_iatom ia[],
1188 rvec xvi, xij, xik, fv, fj, fk;
1199 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1200 ski = pbc_rvec_sub(pbc, x[ak], x[ai], xik);
1203 copy_rvec(f[av], fv);
1210 fj[XX] = a * fv[XX] - xik[ZZ] * cfy + xik[YY] * cfz;
1211 fj[YY] = xik[ZZ] * cfx + a * fv[YY] - xik[XX] * cfz;
1212 fj[ZZ] = -xik[YY] * cfx + xik[XX] * cfy + a * fv[ZZ];
1214 fk[XX] = b * fv[XX] + xij[ZZ] * cfy - xij[YY] * cfz;
1215 fk[YY] = -xij[ZZ] * cfx + b * fv[YY] + xij[XX] * cfz;
1216 fk[ZZ] = xij[YY] * cfx - xij[XX] * cfy + b * fv[ZZ];
1219 f[ai][XX] += fv[XX] - fj[XX] - fk[XX];
1220 f[ai][YY] += fv[YY] - fj[YY] - fk[YY];
1221 f[ai][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ];
1222 rvec_inc(f[aj], fj);
1223 rvec_inc(f[ak], fk);
1228 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
1230 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
1232 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, ai), di);
1237 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1244 if (fshift && (svi != CENTRAL || sji != CENTRAL || ski != CENTRAL))
1246 rvec_dec(fshift[svi], fv);
1247 fshift[CENTRAL][XX] += fv[XX] - fj[XX] - fk[XX];
1248 fshift[CENTRAL][YY] += fv[YY] - fj[YY] - fk[YY];
1249 fshift[CENTRAL][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ];
1250 rvec_inc(fshift[sji], fj);
1251 rvec_inc(fshift[ski], fk);
1258 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1260 for (int i = 0; i < DIM; i++)
1262 for (int j = 0; j < DIM; j++)
1264 dxdf[i][j] += -xiv[i] * fv[j] + xij[i] * fj[j] + xik[i] * fk[j];
1269 /* TOTAL: 54 flops */
1272 static void spread_vsite4FD(const t_iatom ia[],
1285 rvec xvi, xij, xjk, xjl, xix, fv, temp;
1286 int av, ai, aj, ak, al;
1288 int svi, sji, skj, slj, m;
1296 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1297 skj = pbc_rvec_sub(pbc, x[ak], x[aj], xjk);
1298 slj = pbc_rvec_sub(pbc, x[al], x[aj], xjl);
1301 /* xix goes from i to point x on the plane jkl */
1302 for (m = 0; m < DIM; m++)
1304 xix[m] = xij[m] + a * xjk[m] + b * xjl[m];
1308 const real invDistance = inverseNorm(xix);
1309 const real d = c * invDistance;
1310 /* 4 + ?10? flops */
1312 copy_rvec(f[av], fv);
1314 fproj = iprod(xix, fv) * invDistance * invDistance; /* = (xix . f)/(xix . xix) */
1316 for (m = 0; m < DIM; m++)
1318 temp[m] = d * (fv[m] - fproj * xix[m]);
1322 /* c is already calculated in constr_vsite3FD
1323 storing c somewhere will save 35 flops! */
1326 for (m = 0; m < DIM; m++)
1328 f[ai][m] += fv[m] - temp[m];
1329 f[aj][m] += a1 * temp[m];
1330 f[ak][m] += a * temp[m];
1331 f[al][m] += b * temp[m];
1337 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
1339 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
1341 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, aj), di);
1343 ivec_sub(SHIFT_IVEC(g, al), SHIFT_IVEC(g, aj), di);
1348 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1355 if (fshift && (svi != CENTRAL || sji != CENTRAL || skj != CENTRAL || slj != CENTRAL))
1357 rvec_dec(fshift[svi], fv);
1358 for (m = 0; m < DIM; m++)
1360 fshift[CENTRAL][m] += fv[m] - (1 + a + b) * temp[m];
1361 fshift[sji][m] += temp[m];
1362 fshift[skj][m] += a * temp[m];
1363 fshift[slj][m] += b * temp[m];
1372 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1374 for (i = 0; i < DIM; i++)
1376 for (j = 0; j < DIM; j++)
1378 dxdf[i][j] += -xiv[i] * fv[j] + xix[i] * temp[j];
1383 /* TOTAL: 77 flops */
1387 static void spread_vsite4FDN(const t_iatom ia[],
1399 rvec xvi, xij, xik, xil, ra, rb, rja, rjb, rab, rm, rt;
1400 rvec fv, fj, fk, fl;
1404 int av, ai, aj, ak, al;
1405 int svi, sij, sik, sil;
1407 /* DEBUG: check atom indices */
1414 copy_rvec(f[av], fv);
1416 sij = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1417 sik = pbc_rvec_sub(pbc, x[ak], x[ai], xik);
1418 sil = pbc_rvec_sub(pbc, x[al], x[ai], xil);
1421 ra[XX] = a * xik[XX];
1422 ra[YY] = a * xik[YY];
1423 ra[ZZ] = a * xik[ZZ];
1425 rb[XX] = b * xil[XX];
1426 rb[YY] = b * xil[YY];
1427 rb[ZZ] = b * xil[ZZ];
1431 rvec_sub(ra, xij, rja);
1432 rvec_sub(rb, xij, rjb);
1433 rvec_sub(rb, ra, rab);
1436 cprod(rja, rjb, rm);
1439 invrm = inverseNorm(rm);
1440 denom = invrm * invrm;
1443 cfx = c * invrm * fv[XX];
1444 cfy = c * invrm * fv[YY];
1445 cfz = c * invrm * fv[ZZ];
1456 fj[XX] = (-rm[XX] * rt[XX]) * cfx + (rab[ZZ] - rm[YY] * rt[XX]) * cfy
1457 + (-rab[YY] - rm[ZZ] * rt[XX]) * cfz;
1458 fj[YY] = (-rab[ZZ] - rm[XX] * rt[YY]) * cfx + (-rm[YY] * rt[YY]) * cfy
1459 + (rab[XX] - rm[ZZ] * rt[YY]) * cfz;
1460 fj[ZZ] = (rab[YY] - rm[XX] * rt[ZZ]) * cfx + (-rab[XX] - rm[YY] * rt[ZZ]) * cfy
1461 + (-rm[ZZ] * rt[ZZ]) * cfz;
1467 rt[XX] *= denom * a;
1468 rt[YY] *= denom * a;
1469 rt[ZZ] *= denom * a;
1472 fk[XX] = (-rm[XX] * rt[XX]) * cfx + (-a * rjb[ZZ] - rm[YY] * rt[XX]) * cfy
1473 + (a * rjb[YY] - rm[ZZ] * rt[XX]) * cfz;
1474 fk[YY] = (a * rjb[ZZ] - rm[XX] * rt[YY]) * cfx + (-rm[YY] * rt[YY]) * cfy
1475 + (-a * rjb[XX] - rm[ZZ] * rt[YY]) * cfz;
1476 fk[ZZ] = (-a * rjb[YY] - rm[XX] * rt[ZZ]) * cfx + (a * rjb[XX] - rm[YY] * rt[ZZ]) * cfy
1477 + (-rm[ZZ] * rt[ZZ]) * cfz;
1483 rt[XX] *= denom * b;
1484 rt[YY] *= denom * b;
1485 rt[ZZ] *= denom * b;
1488 fl[XX] = (-rm[XX] * rt[XX]) * cfx + (b * rja[ZZ] - rm[YY] * rt[XX]) * cfy
1489 + (-b * rja[YY] - rm[ZZ] * rt[XX]) * cfz;
1490 fl[YY] = (-b * rja[ZZ] - rm[XX] * rt[YY]) * cfx + (-rm[YY] * rt[YY]) * cfy
1491 + (b * rja[XX] - rm[ZZ] * rt[YY]) * cfz;
1492 fl[ZZ] = (b * rja[YY] - rm[XX] * rt[ZZ]) * cfx + (-b * rja[XX] - rm[YY] * rt[ZZ]) * cfy
1493 + (-rm[ZZ] * rt[ZZ]) * cfz;
1496 f[ai][XX] += fv[XX] - fj[XX] - fk[XX] - fl[XX];
1497 f[ai][YY] += fv[YY] - fj[YY] - fk[YY] - fl[YY];
1498 f[ai][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ] - fl[ZZ];
1499 rvec_inc(f[aj], fj);
1500 rvec_inc(f[ak], fk);
1501 rvec_inc(f[al], fl);
1506 ivec_sub(SHIFT_IVEC(g, av), SHIFT_IVEC(g, ai), di);
1508 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
1510 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, ai), di);
1512 ivec_sub(SHIFT_IVEC(g, al), SHIFT_IVEC(g, ai), di);
1517 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1524 if (fshift && (svi != CENTRAL || sij != CENTRAL || sik != CENTRAL || sil != CENTRAL))
1526 rvec_dec(fshift[svi], fv);
1527 fshift[CENTRAL][XX] += fv[XX] - fj[XX] - fk[XX] - fl[XX];
1528 fshift[CENTRAL][YY] += fv[YY] - fj[YY] - fk[YY] - fl[YY];
1529 fshift[CENTRAL][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ] - fl[ZZ];
1530 rvec_inc(fshift[sij], fj);
1531 rvec_inc(fshift[sik], fk);
1532 rvec_inc(fshift[sil], fl);
1540 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1542 for (i = 0; i < DIM; i++)
1544 for (j = 0; j < DIM; j++)
1546 dxdf[i][j] += -xiv[i] * fv[j] + xij[i] * fj[j] + xik[i] * fk[j] + xil[i] * fl[j];
1551 /* Total: 207 flops (Yuck!) */
1555 static int spread_vsiten(const t_iatom ia[],
1556 const t_iparams ip[],
1569 n3 = 3 * ip[ia[0]].vsiten.n;
1571 copy_rvec(x[av], xv);
1573 for (i = 0; i < n3; i += 3)
1578 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, av), di);
1583 siv = pbc_dx_aiuc(pbc, x[ai], xv, dx);
1589 a = ip[ia[i]].vsiten.a;
1590 svmul(a, f[av], fi);
1591 rvec_inc(f[ai], fi);
1592 if (fshift && siv != CENTRAL)
1594 rvec_inc(fshift[siv], fi);
1595 rvec_dec(fshift[CENTRAL], fi);
1604 static int vsite_count(const t_ilist* ilist, int ftype)
1606 if (ftype == F_VSITEN)
1608 return ilist[ftype].nr / 3;
1612 return ilist[ftype].nr / (1 + interaction_function[ftype].nratoms);
1616 static void spread_vsite_f_thread(const rvec x[],
1622 const t_ilist ilist[],
1624 const t_pbc* pbc_null)
1626 const PbcMode pbcMode = getPbcMode(pbc_null);
1627 /* We need another pbc pointer, as with charge groups we switch per vsite */
1628 const t_pbc* pbc_null2 = pbc_null;
1629 gmx::ArrayRef<const int> vsite_pbc;
1631 /* this loop goes backwards to be able to build *
1632 * higher type vsites from lower types */
1633 for (int ftype = c_ftypeVsiteEnd - 1; ftype >= c_ftypeVsiteStart; ftype--)
1635 if (ilist[ftype].nr == 0)
1641 int nra = interaction_function[ftype].nratoms;
1643 int nr = ilist[ftype].nr;
1645 const t_iatom* ia = ilist[ftype].iatoms;
1647 if (pbcMode == PbcMode::all)
1649 pbc_null2 = pbc_null;
1652 for (int i = 0; i < nr;)
1656 /* Constants for constructing */
1658 a1 = ip[tp].vsite.a;
1659 /* Construct the vsite depending on type */
1662 case F_VSITE2: spread_vsite2(ia, a1, x, f, fshift, pbc_null2, g); break;
1664 spread_vsite2FD(ia, a1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1667 b1 = ip[tp].vsite.b;
1668 spread_vsite3(ia, a1, b1, x, f, fshift, pbc_null2, g);
1671 b1 = ip[tp].vsite.b;
1672 spread_vsite3FD(ia, a1, b1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1675 b1 = ip[tp].vsite.b;
1676 spread_vsite3FAD(ia, a1, b1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1679 b1 = ip[tp].vsite.b;
1680 c1 = ip[tp].vsite.c;
1681 spread_vsite3OUT(ia, a1, b1, c1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1684 b1 = ip[tp].vsite.b;
1685 c1 = ip[tp].vsite.c;
1686 spread_vsite4FD(ia, a1, b1, c1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1689 b1 = ip[tp].vsite.b;
1690 c1 = ip[tp].vsite.c;
1691 spread_vsite4FDN(ia, a1, b1, c1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1693 case F_VSITEN: inc = spread_vsiten(ia, ip, x, f, fshift, pbc_null2, g); break;
1695 gmx_fatal(FARGS, "No such vsite type %d in %s, line %d", ftype, __FILE__, __LINE__);
1697 clear_rvec(f[ia[1]]);
1699 /* Increment loop variables */
1707 /*! \brief Clears the task force buffer elements that are written by task idTask */
1708 static void clearTaskForceBufferUsedElements(InterdependentTask* idTask)
1710 int ntask = idTask->spreadTask.size();
1711 for (int ti = 0; ti < ntask; ti++)
1713 const AtomIndex* atomList = &idTask->atomIndex[idTask->spreadTask[ti]];
1714 int natom = atomList->atom.size();
1715 RVec* force = idTask->force.data();
1716 for (int i = 0; i < natom; i++)
1718 clear_rvec(force[atomList->atom[i]]);
1723 void spread_vsite_f(const gmx_vsite_t* vsite,
1724 const rvec* gmx_restrict x,
1725 rvec* gmx_restrict f,
1726 rvec* gmx_restrict fshift,
1735 const t_commrec* cr,
1736 gmx_wallcycle* wcycle)
1738 wallcycle_start(wcycle, ewcVSITESPREAD);
1739 const bool useDomdec = vsite->useDomdec;
1740 GMX_ASSERT(!useDomdec || (cr != nullptr && DOMAINDECOMP(cr)),
1741 "When vsites are set up with domain decomposition, we need a valid commrec");
1743 t_pbc pbc, *pbc_null;
1745 /* We only need to do pbc when we have inter-cg vsites */
1746 if ((useDomdec || bMolPBC) && vsite->numInterUpdategroupVsites)
1748 /* This is wasting some CPU time as we now do this multiple times
1751 pbc_null = set_pbc_dd(&pbc, ePBC, useDomdec ? cr->dd->numCells : nullptr, FALSE, box);
1760 dd_clear_f_vsites(cr->dd, f);
1763 if (vsite->nthreads == 1)
1770 spread_vsite_f_thread(x, f, fshift, VirCorr, dxdf, idef->iparams, idef->il, g, pbc_null);
1774 for (int i = 0; i < DIM; i++)
1776 for (int j = 0; j < DIM; j++)
1778 vir[i][j] += -0.5 * dxdf[i][j];
1785 /* First spread the vsites that might depend on non-local vsites */
1788 clear_mat(vsite->tData[vsite->nthreads]->dxdf);
1790 spread_vsite_f_thread(x, f, fshift, VirCorr, vsite->tData[vsite->nthreads]->dxdf,
1791 idef->iparams, vsite->tData[vsite->nthreads]->ilist, g, pbc_null);
1793 #pragma omp parallel num_threads(vsite->nthreads)
1797 int thread = gmx_omp_get_thread_num();
1798 VsiteThread& tData = *vsite->tData[thread];
1801 if (thread == 0 || fshift == nullptr)
1807 fshift_t = tData.fshift;
1809 for (int i = 0; i < SHIFTS; i++)
1811 clear_rvec(fshift_t[i]);
1816 clear_mat(tData.dxdf);
1819 if (tData.useInterdependentTask)
1821 /* Spread the vsites that spread outside our local range.
1822 * This is done using a thread-local force buffer force.
1823 * First we need to copy the input vsite forces to force.
1825 InterdependentTask* idTask = &tData.idTask;
1827 /* Clear the buffer elements set by our task during
1828 * the last call to spread_vsite_f.
1830 clearTaskForceBufferUsedElements(idTask);
1832 int nvsite = idTask->vsite.size();
1833 for (int i = 0; i < nvsite; i++)
1835 copy_rvec(f[idTask->vsite[i]], idTask->force[idTask->vsite[i]]);
1837 spread_vsite_f_thread(x, as_rvec_array(idTask->force.data()), fshift_t, VirCorr,
1838 tData.dxdf, idef->iparams, tData.idTask.ilist, g, pbc_null);
1840 /* We need a barrier before reducing forces below
1841 * that have been produced by a different thread above.
1845 /* Loop over all thread task and reduce forces they
1846 * produced on atoms that fall in our range.
1847 * Note that atomic reduction would be a simpler solution,
1848 * but that might not have good support on all platforms.
1850 int ntask = idTask->reduceTask.size();
1851 for (int ti = 0; ti < ntask; ti++)
1853 const InterdependentTask* idt_foreign =
1854 &vsite->tData[idTask->reduceTask[ti]]->idTask;
1855 const AtomIndex* atomList = &idt_foreign->atomIndex[thread];
1856 const RVec* f_foreign = idt_foreign->force.data();
1858 int natom = atomList->atom.size();
1859 for (int i = 0; i < natom; i++)
1861 int ind = atomList->atom[i];
1862 rvec_inc(f[ind], f_foreign[ind]);
1863 /* Clearing of f_foreign is done at the next step */
1866 /* Clear the vsite forces, both in f and force */
1867 for (int i = 0; i < nvsite; i++)
1869 int ind = tData.idTask.vsite[i];
1871 clear_rvec(tData.idTask.force[ind]);
1875 /* Spread the vsites that spread locally only */
1876 spread_vsite_f_thread(x, f, fshift_t, VirCorr, tData.dxdf, idef->iparams,
1877 tData.ilist, g, pbc_null);
1879 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1882 if (fshift != nullptr)
1884 for (int th = 1; th < vsite->nthreads; th++)
1886 for (int i = 0; i < SHIFTS; i++)
1888 rvec_inc(fshift[i], vsite->tData[th]->fshift[i]);
1895 for (int th = 0; th < vsite->nthreads + 1; th++)
1897 /* MSVC doesn't like matrix references, so we use a pointer */
1898 const matrix* dxdf = &vsite->tData[th]->dxdf;
1900 for (int i = 0; i < DIM; i++)
1902 for (int j = 0; j < DIM; j++)
1904 vir[i][j] += -0.5 * (*dxdf)[i][j];
1913 dd_move_f_vsites(cr->dd, f, fshift);
1916 inc_nrnb(nrnb, eNR_VSITE2, vsite_count(idef->il, F_VSITE2));
1917 inc_nrnb(nrnb, eNR_VSITE2FD, vsite_count(idef->il, F_VSITE2FD));
1918 inc_nrnb(nrnb, eNR_VSITE3, vsite_count(idef->il, F_VSITE3));
1919 inc_nrnb(nrnb, eNR_VSITE3FD, vsite_count(idef->il, F_VSITE3FD));
1920 inc_nrnb(nrnb, eNR_VSITE3FAD, vsite_count(idef->il, F_VSITE3FAD));
1921 inc_nrnb(nrnb, eNR_VSITE3OUT, vsite_count(idef->il, F_VSITE3OUT));
1922 inc_nrnb(nrnb, eNR_VSITE4FD, vsite_count(idef->il, F_VSITE4FD));
1923 inc_nrnb(nrnb, eNR_VSITE4FDN, vsite_count(idef->il, F_VSITE4FDN));
1924 inc_nrnb(nrnb, eNR_VSITEN, vsite_count(idef->il, F_VSITEN));
1926 wallcycle_stop(wcycle, ewcVSITESPREAD);
1929 /*! \brief Returns the an array with group indices for each atom
1931 * \param[in] grouping The paritioning of the atom range into atom groups
1933 static std::vector<int> makeAtomToGroupMapping(const gmx::RangePartitioning& grouping)
1935 std::vector<int> atomToGroup(grouping.fullRange().end(), 0);
1937 for (int group = 0; group < grouping.numBlocks(); group++)
1939 auto block = grouping.block(group);
1940 std::fill(atomToGroup.begin() + block.begin(), atomToGroup.begin() + block.end(), group);
1946 int countNonlinearVsites(const gmx_mtop_t& mtop)
1948 int numNonlinearVsites = 0;
1949 for (const gmx_molblock_t& molb : mtop.molblock)
1951 const gmx_moltype_t& molt = mtop.moltype[molb.type];
1953 for (const auto& ilist : extractILists(molt.ilist, IF_VSITE))
1955 if (ilist.functionType != F_VSITE2 && ilist.functionType != F_VSITE3
1956 && ilist.functionType != F_VSITEN)
1958 numNonlinearVsites += molb.nmol * ilist.iatoms.size() / (1 + NRAL(ilist.functionType));
1963 return numNonlinearVsites;
1966 int countInterUpdategroupVsites(const gmx_mtop_t& mtop,
1967 gmx::ArrayRef<const gmx::RangePartitioning> updateGroupingPerMoleculetype)
1969 int n_intercg_vsite = 0;
1970 for (const gmx_molblock_t& molb : mtop.molblock)
1972 const gmx_moltype_t& molt = mtop.moltype[molb.type];
1974 std::vector<int> atomToGroup;
1975 if (!updateGroupingPerMoleculetype.empty())
1977 atomToGroup = makeAtomToGroupMapping(updateGroupingPerMoleculetype[molb.type]);
1979 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
1981 const int nral = NRAL(ftype);
1982 const InteractionList& il = molt.ilist[ftype];
1983 for (int i = 0; i < il.size(); i += 1 + nral)
1985 bool isInterGroup = atomToGroup.empty();
1988 const int group = atomToGroup[il.iatoms[1 + i]];
1989 for (int a = 1; a < nral; a++)
1991 if (atomToGroup[il.iatoms[1 + a]] != group)
1993 isInterGroup = true;
2000 n_intercg_vsite += molb.nmol;
2006 return n_intercg_vsite;
2009 std::unique_ptr<gmx_vsite_t> initVsite(const gmx_mtop_t& mtop, const t_commrec* cr)
2011 GMX_RELEASE_ASSERT(cr != nullptr, "We need a valid commrec");
2013 std::unique_ptr<gmx_vsite_t> vsite;
2015 /* check if there are vsites */
2017 for (int ftype = 0; ftype < F_NRE; ftype++)
2019 if (interaction_function[ftype].flags & IF_VSITE)
2021 GMX_ASSERT(ftype >= c_ftypeVsiteStart && ftype < c_ftypeVsiteEnd,
2022 "c_ftypeVsiteStart and/or c_ftypeVsiteEnd do not have correct values");
2024 nvsite += gmx_mtop_ftype_count(&mtop, ftype);
2028 GMX_ASSERT(ftype < c_ftypeVsiteStart || ftype >= c_ftypeVsiteEnd,
2029 "c_ftypeVsiteStart and/or c_ftypeVsiteEnd do not have correct values");
2038 vsite = std::make_unique<gmx_vsite_t>();
2040 gmx::ArrayRef<const gmx::RangePartitioning> updateGroupingPerMoleculetype;
2041 if (DOMAINDECOMP(cr))
2043 updateGroupingPerMoleculetype = getUpdateGroupingPerMoleculetype(*cr->dd);
2045 vsite->numInterUpdategroupVsites = countInterUpdategroupVsites(mtop, updateGroupingPerMoleculetype);
2047 vsite->useDomdec = (DOMAINDECOMP(cr) && cr->dd->nnodes > 1);
2049 vsite->nthreads = gmx_omp_nthreads_get(emntVSITE);
2051 if (vsite->nthreads > 1)
2053 /* We need one extra thread data structure for the overlap vsites */
2054 vsite->tData.resize(vsite->nthreads + 1);
2055 #pragma omp parallel for num_threads(vsite->nthreads) schedule(static)
2056 for (int thread = 0; thread < vsite->nthreads; thread++)
2060 vsite->tData[thread] = std::make_unique<VsiteThread>();
2062 InterdependentTask& idTask = vsite->tData[thread]->idTask;
2064 idTask.atomIndex.resize(vsite->nthreads);
2066 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
2068 if (vsite->nthreads > 1)
2070 vsite->tData[vsite->nthreads] = std::make_unique<VsiteThread>();
2077 gmx_vsite_t::gmx_vsite_t() {}
2079 gmx_vsite_t::~gmx_vsite_t() {}
2081 static inline void flagAtom(InterdependentTask* idTask, int atom, int thread, int nthread, int natperthread)
2083 if (!idTask->use[atom])
2085 idTask->use[atom] = true;
2086 thread = atom / natperthread;
2087 /* Assign all non-local atom force writes to thread 0 */
2088 if (thread >= nthread)
2092 idTask->atomIndex[thread].atom.push_back(atom);
2096 /*\brief Here we try to assign all vsites that are in our local range.
2098 * Our task local atom range is tData->rangeStart - tData->rangeEnd.
2099 * Vsites that depend only on local atoms, as indicated by taskIndex[]==thread,
2100 * are assigned to task tData->ilist. Vsites that depend on non-local atoms
2101 * but not on other vsites are assigned to task tData->id_task.ilist.
2102 * taskIndex[] is set for all vsites in our range, either to our local tasks
2103 * or to the single last task as taskIndex[]=2*nthreads.
2105 static void assignVsitesToThread(VsiteThread* tData,
2109 gmx::ArrayRef<int> taskIndex,
2110 const t_ilist* ilist,
2111 const t_iparams* ip,
2112 const unsigned short* ptype)
2114 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2116 tData->ilist[ftype].nr = 0;
2117 tData->idTask.ilist[ftype].nr = 0;
2119 int nral1 = 1 + NRAL(ftype);
2121 t_iatom* iat = ilist[ftype].iatoms;
2122 for (int i = 0; i < ilist[ftype].nr;)
2124 if (ftype == F_VSITEN)
2126 /* The 3 below is from 1+NRAL(ftype)=3 */
2127 inc = ip[iat[i]].vsiten.n * 3;
2130 if (iat[1 + i] < tData->rangeStart || iat[1 + i] >= tData->rangeEnd)
2132 /* This vsite belongs to a different thread */
2137 /* We would like to assign this vsite to task thread,
2138 * but it might depend on atoms outside the atom range of thread
2139 * or on another vsite not assigned to task thread.
2142 if (ftype != F_VSITEN)
2144 for (int j = i + 2; j < i + nral1; j++)
2146 /* Do a range check to avoid a harmless race on taskIndex */
2147 if (iat[j] < tData->rangeStart || iat[j] >= tData->rangeEnd || taskIndex[iat[j]] != thread)
2149 if (!tData->useInterdependentTask || ptype[iat[j]] == eptVSite)
2151 /* At least one constructing atom is a vsite
2152 * that is not assigned to the same thread.
2153 * Put this vsite into a separate task.
2159 /* There are constructing atoms outside our range,
2160 * put this vsite into a second task to be executed
2161 * on the same thread. During construction no barrier
2162 * is needed between the two tasks on the same thread.
2163 * During spreading we need to run this task with
2164 * an additional thread-local intermediate force buffer
2165 * (or atomic reduction) and a barrier between the two
2168 task = nthread + thread;
2174 for (int j = i + 2; j < i + inc; j += 3)
2176 /* Do a range check to avoid a harmless race on taskIndex */
2177 if (iat[j] < tData->rangeStart || iat[j] >= tData->rangeEnd || taskIndex[iat[j]] != thread)
2179 GMX_ASSERT(ptype[iat[j]] != eptVSite,
2180 "A vsite to be assigned in assignVsitesToThread has a vsite as "
2181 "a constructing atom that does not belong to our task, such "
2182 "vsites should be assigned to the single 'master' task");
2184 task = nthread + thread;
2189 /* Update this vsite's thread index entry */
2190 taskIndex[iat[1 + i]] = task;
2192 if (task == thread || task == nthread + thread)
2194 /* Copy this vsite to the thread data struct of thread */
2198 il_task = &tData->ilist[ftype];
2202 il_task = &tData->idTask.ilist[ftype];
2204 /* Ensure we have sufficient memory allocated */
2205 if (il_task->nr + inc > il_task->nalloc)
2207 il_task->nalloc = over_alloc_large(il_task->nr + inc);
2208 srenew(il_task->iatoms, il_task->nalloc);
2210 /* Copy the vsite data to the thread-task local array */
2211 for (int j = i; j < i + inc; j++)
2213 il_task->iatoms[il_task->nr++] = iat[j];
2215 if (task == nthread + thread)
2217 /* This vsite write outside our own task force block.
2218 * Put it into the interdependent task list and flag
2219 * the atoms involved for reduction.
2221 tData->idTask.vsite.push_back(iat[i + 1]);
2222 if (ftype != F_VSITEN)
2224 for (int j = i + 2; j < i + nral1; j++)
2226 flagAtom(&tData->idTask, iat[j], thread, nthread, natperthread);
2231 for (int j = i + 2; j < i + inc; j += 3)
2233 flagAtom(&tData->idTask, iat[j], thread, nthread, natperthread);
2244 /*! \brief Assign all vsites with taskIndex[]==task to task tData */
2245 static void assignVsitesToSingleTask(VsiteThread* tData,
2247 gmx::ArrayRef<const int> taskIndex,
2248 const t_ilist* ilist,
2249 const t_iparams* ip)
2251 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2253 tData->ilist[ftype].nr = 0;
2254 tData->idTask.ilist[ftype].nr = 0;
2256 int nral1 = 1 + NRAL(ftype);
2258 t_iatom* iat = ilist[ftype].iatoms;
2259 t_ilist* il_task = &tData->ilist[ftype];
2261 for (int i = 0; i < ilist[ftype].nr;)
2263 if (ftype == F_VSITEN)
2265 /* The 3 below is from 1+NRAL(ftype)=3 */
2266 inc = ip[iat[i]].vsiten.n * 3;
2268 /* Check if the vsite is assigned to our task */
2269 if (taskIndex[iat[1 + i]] == task)
2271 /* Ensure we have sufficient memory allocated */
2272 if (il_task->nr + inc > il_task->nalloc)
2274 il_task->nalloc = over_alloc_large(il_task->nr + inc);
2275 srenew(il_task->iatoms, il_task->nalloc);
2277 /* Copy the vsite data to the thread-task local array */
2278 for (int j = i; j < i + inc; j++)
2280 il_task->iatoms[il_task->nr++] = iat[j];
2289 void split_vsites_over_threads(const t_ilist* ilist, const t_iparams* ip, const t_mdatoms* mdatoms, gmx_vsite_t* vsite)
2291 int vsite_atom_range, natperthread;
2293 if (vsite->nthreads == 1)
2299 /* The current way of distributing the vsites over threads in primitive.
2300 * We divide the atom range 0 - natoms_in_vsite uniformly over threads,
2301 * without taking into account how the vsites are distributed.
2302 * Without domain decomposition we at least tighten the upper bound
2303 * of the range (useful for common systems such as a vsite-protein
2305 * With domain decomposition, as long as the vsites are distributed
2306 * uniformly in each domain along the major dimension, usually x,
2307 * it will also perform well.
2309 if (!vsite->useDomdec)
2311 vsite_atom_range = -1;
2312 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2315 if (ftype != F_VSITEN)
2317 int nral1 = 1 + NRAL(ftype);
2318 const t_iatom* iat = ilist[ftype].iatoms;
2319 for (int i = 0; i < ilist[ftype].nr; i += nral1)
2321 for (int j = i + 1; j < i + nral1; j++)
2323 vsite_atom_range = std::max(vsite_atom_range, iat[j]);
2331 const t_iatom* iat = ilist[ftype].iatoms;
2334 while (i < ilist[ftype].nr)
2336 /* The 3 below is from 1+NRAL(ftype)=3 */
2337 vs_ind_end = i + ip[iat[i]].vsiten.n * 3;
2339 vsite_atom_range = std::max(vsite_atom_range, iat[i + 1]);
2340 while (i < vs_ind_end)
2342 vsite_atom_range = std::max(vsite_atom_range, iat[i + 2]);
2350 natperthread = (vsite_atom_range + vsite->nthreads - 1) / vsite->nthreads;
2354 /* Any local or not local atom could be involved in virtual sites.
2355 * But since we usually have very few non-local virtual sites
2356 * (only non-local vsites that depend on local vsites),
2357 * we distribute the local atom range equally over the threads.
2358 * When assigning vsites to threads, we should take care that the last
2359 * threads also covers the non-local range.
2361 vsite_atom_range = mdatoms->nr;
2362 natperthread = (mdatoms->homenr + vsite->nthreads - 1) / vsite->nthreads;
2367 fprintf(debug, "virtual site thread dist: natoms %d, range %d, natperthread %d\n",
2368 mdatoms->nr, vsite_atom_range, natperthread);
2371 /* To simplify the vsite assignment, we make an index which tells us
2372 * to which task particles, both non-vsites and vsites, are assigned.
2374 vsite->taskIndex.resize(mdatoms->nr);
2376 /* Initialize the task index array. Here we assign the non-vsite
2377 * particles to task=thread, so we easily figure out if vsites
2378 * depend on local and/or non-local particles in assignVsitesToThread.
2380 gmx::ArrayRef<int> taskIndex = vsite->taskIndex;
2383 for (int i = 0; i < mdatoms->nr; i++)
2385 if (mdatoms->ptype[i] == eptVSite)
2387 /* vsites are not assigned to a task yet */
2392 /* assign non-vsite particles to task thread */
2393 taskIndex[i] = thread;
2395 if (i == (thread + 1) * natperthread && thread < vsite->nthreads)
2402 #pragma omp parallel num_threads(vsite->nthreads)
2406 int thread = gmx_omp_get_thread_num();
2407 VsiteThread& tData = *vsite->tData[thread];
2409 /* Clear the buffer use flags that were set before */
2410 if (tData.useInterdependentTask)
2412 InterdependentTask& idTask = tData.idTask;
2414 /* To avoid an extra OpenMP barrier in spread_vsite_f,
2415 * we clear the force buffer at the next step,
2416 * so we need to do it here as well.
2418 clearTaskForceBufferUsedElements(&idTask);
2420 idTask.vsite.resize(0);
2421 for (int t = 0; t < vsite->nthreads; t++)
2423 AtomIndex& atomIndex = idTask.atomIndex[t];
2424 int natom = atomIndex.atom.size();
2425 for (int i = 0; i < natom; i++)
2427 idTask.use[atomIndex.atom[i]] = false;
2429 atomIndex.atom.resize(0);
2434 /* To avoid large f_buf allocations of #threads*vsite_atom_range
2435 * we don't use task2 with more than 200000 atoms. This doesn't
2436 * affect performance, since with such a large range relatively few
2437 * vsites will end up in the separate task.
2438 * Note that useTask2 should be the same for all threads.
2440 tData.useInterdependentTask = (vsite_atom_range <= 200000);
2441 if (tData.useInterdependentTask)
2443 size_t natoms_use_in_vsites = vsite_atom_range;
2444 InterdependentTask& idTask = tData.idTask;
2445 /* To avoid resizing and re-clearing every nstlist steps,
2446 * we never down size the force buffer.
2448 if (natoms_use_in_vsites > idTask.force.size() || natoms_use_in_vsites > idTask.use.size())
2450 idTask.force.resize(natoms_use_in_vsites, { 0, 0, 0 });
2451 idTask.use.resize(natoms_use_in_vsites, false);
2455 /* Assign all vsites that can execute independently on threads */
2456 tData.rangeStart = thread * natperthread;
2457 if (thread < vsite->nthreads - 1)
2459 tData.rangeEnd = (thread + 1) * natperthread;
2463 /* The last thread should cover up to the end of the range */
2464 tData.rangeEnd = mdatoms->nr;
2466 assignVsitesToThread(&tData, thread, vsite->nthreads, natperthread, taskIndex, ilist,
2467 ip, mdatoms->ptype);
2469 if (tData.useInterdependentTask)
2471 /* In the worst case, all tasks write to force ranges of
2472 * all other tasks, leading to #tasks^2 scaling (this is only
2473 * the overhead, the actual flops remain constant).
2474 * But in most cases there is far less coupling. To improve
2475 * scaling at high thread counts we therefore construct
2476 * an index to only loop over the actually affected tasks.
2478 InterdependentTask& idTask = tData.idTask;
2480 /* Ensure assignVsitesToThread finished on other threads */
2483 idTask.spreadTask.resize(0);
2484 idTask.reduceTask.resize(0);
2485 for (int t = 0; t < vsite->nthreads; t++)
2487 /* Do we write to the force buffer of task t? */
2488 if (!idTask.atomIndex[t].atom.empty())
2490 idTask.spreadTask.push_back(t);
2492 /* Does task t write to our force buffer? */
2493 if (!vsite->tData[t]->idTask.atomIndex[thread].atom.empty())
2495 idTask.reduceTask.push_back(t);
2500 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
2502 /* Assign all remaining vsites, that will have taskIndex[]=2*vsite->nthreads,
2503 * to a single task that will not run in parallel with other tasks.
2505 assignVsitesToSingleTask(vsite->tData[vsite->nthreads].get(), 2 * vsite->nthreads, taskIndex,
2508 if (debug && vsite->nthreads > 1)
2510 fprintf(debug, "virtual site useInterdependentTask %d, nuse:\n",
2511 static_cast<int>(vsite->tData[0]->useInterdependentTask));
2512 for (int th = 0; th < vsite->nthreads + 1; th++)
2514 fprintf(debug, " %4d", vsite->tData[th]->idTask.nuse);
2516 fprintf(debug, "\n");
2518 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2520 if (ilist[ftype].nr > 0)
2522 fprintf(debug, "%-20s thread dist:", interaction_function[ftype].longname);
2523 for (int th = 0; th < vsite->nthreads + 1; th++)
2525 fprintf(debug, " %4d %4d ", vsite->tData[th]->ilist[ftype].nr,
2526 vsite->tData[th]->idTask.ilist[ftype].nr);
2528 fprintf(debug, "\n");
2534 int nrOrig = vsiteIlistNrCount(ilist);
2536 for (int th = 0; th < vsite->nthreads + 1; th++)
2538 nrThreaded += vsiteIlistNrCount(vsite->tData[th]->ilist)
2539 + vsiteIlistNrCount(vsite->tData[th]->idTask.ilist);
2541 GMX_ASSERT(nrThreaded == nrOrig,
2542 "The number of virtual sites assigned to all thread task has to match the total "
2543 "number of virtual sites");
2547 void set_vsite_top(gmx_vsite_t* vsite, const gmx_localtop_t* top, const t_mdatoms* md)
2549 if (vsite->nthreads > 1)
2551 split_vsites_over_threads(top->idef.il, top->idef.iparams, md, vsite);