2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
46 #include "gromacs/domdec/domdec.h"
47 #include "gromacs/domdec/domdec_struct.h"
48 #include "gromacs/gmxlib/network.h"
49 #include "gromacs/gmxlib/nrnb.h"
50 #include "gromacs/math/functions.h"
51 #include "gromacs/math/vec.h"
52 #include "gromacs/mdlib/gmx_omp_nthreads.h"
53 #include "gromacs/mdtypes/commrec.h"
54 #include "gromacs/pbcutil/ishift.h"
55 #include "gromacs/pbcutil/mshift.h"
56 #include "gromacs/pbcutil/pbc.h"
57 #include "gromacs/topology/mtop_util.h"
58 #include "gromacs/utility/exceptions.h"
59 #include "gromacs/utility/fatalerror.h"
60 #include "gromacs/utility/gmxassert.h"
61 #include "gromacs/utility/gmxomp.h"
62 #include "gromacs/utility/smalloc.h"
65 /* The strategy used here for assigning virtual sites to (thread-)tasks
68 * We divide the atom range that vsites operate on (natoms_local with DD,
69 * 0 - last atom involved in vsites without DD) equally over all threads.
71 * Vsites in the local range constructed from atoms in the local range
72 * and/or other vsites that are fully local are assigned to a simple,
75 * Vsites that are not assigned after using the above criterion get assigned
76 * to a so called "interdependent" thread task when none of the constructing
77 * atoms is a vsite. These tasks are called interdependent, because one task
78 * accesses atoms assigned to a different task/thread.
79 * Note that this option is turned off with large (local) atom counts
80 * to avoid high memory usage.
82 * Any remaining vsites are assigned to a separate master thread task.
87 static void init_ilist(t_ilist *ilist)
89 for (int i = 0; i < F_NRE; i++)
93 ilist[i].iatoms = nullptr;
97 /*! \brief List of atom indices belonging to a task */
99 //! List of atom indices
100 std::vector<int> atom;
103 /*! \brief Data structure for thread tasks that use constructing atoms outside their own atom range */
104 struct InterdependentTask
106 //! The interaction lists, only vsite entries are used
107 t_ilist ilist[F_NRE];
108 //! Thread/task-local force buffer
109 std::vector<RVec> force;
110 //! The atom indices of the vsites of our task
111 std::vector<int> vsite;
112 //! Flags if elements in force are spread to or not
113 std::vector<bool> use;
114 //! The number of entries set to true in use
116 //! Array of atoms indices, size nthreads, covering all nuse set elements in use
117 std::vector<AtomIndex> atomIndex;
118 //! List of tasks (force blocks) this task spread forces to
119 std::vector<int> spreadTask;
120 //! List of tasks that write to this tasks force block range
121 std::vector<int> reduceTask;
130 /*! \brief Vsite thread task data structure */
132 //! Start of atom range of this task
134 //! End of atom range of this task
136 //! The interaction lists, only vsite entries are used
137 t_ilist ilist[F_NRE];
138 //! Local fshift accumulation buffer
140 //! Local virial dx*df accumulation buffer
142 //! Tells if interdependent task idTask should be used (in addition to the rest of this task), this bool has the same value on all threads
143 bool useInterdependentTask;
144 //! Data for vsites that involve constructing atoms in the atom range of other threads/tasks
145 InterdependentTask idTask;
152 clear_rvecs(SHIFTS, fshift);
154 useInterdependentTask = false;
159 /* The start and end values of for the vsite indices in the ftype enum.
160 * The validity of these values is checked in init_vsite.
161 * This is used to avoid loops over all ftypes just to get the vsite entries.
162 * (We should replace the fixed ilist array by only the used entries.)
164 static const int c_ftypeVsiteStart = F_VSITE2;
165 static const int c_ftypeVsiteEnd = F_VSITEN + 1;
168 /* Returns the sum of the vsite ilist sizes over all vsite types */
169 static int vsiteIlistNrCount(const t_ilist *ilist)
172 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
174 nr += ilist[ftype].nr;
180 static int pbc_rvec_sub(const t_pbc *pbc, const rvec xi, const rvec xj, rvec dx)
184 return pbc_dx_aiuc(pbc, xi, xj, dx);
188 rvec_sub(xi, xj, dx);
193 /* Vsite construction routines */
195 static void constr_vsite2(const rvec xi, const rvec xj, rvec x, real a, const t_pbc *pbc)
203 pbc_dx_aiuc(pbc, xj, xi, dx);
204 x[XX] = xi[XX] + a*dx[XX];
205 x[YY] = xi[YY] + a*dx[YY];
206 x[ZZ] = xi[ZZ] + a*dx[ZZ];
210 x[XX] = b*xi[XX] + a*xj[XX];
211 x[YY] = b*xi[YY] + a*xj[YY];
212 x[ZZ] = b*xi[ZZ] + a*xj[ZZ];
216 /* TOTAL: 10 flops */
219 static void constr_vsite3(const rvec xi, const rvec xj, const rvec xk, rvec x, real a, real b,
229 pbc_dx_aiuc(pbc, xj, xi, dxj);
230 pbc_dx_aiuc(pbc, xk, xi, dxk);
231 x[XX] = xi[XX] + a*dxj[XX] + b*dxk[XX];
232 x[YY] = xi[YY] + a*dxj[YY] + b*dxk[YY];
233 x[ZZ] = xi[ZZ] + a*dxj[ZZ] + b*dxk[ZZ];
237 x[XX] = c*xi[XX] + a*xj[XX] + b*xk[XX];
238 x[YY] = c*xi[YY] + a*xj[YY] + b*xk[YY];
239 x[ZZ] = c*xi[ZZ] + a*xj[ZZ] + b*xk[ZZ];
243 /* TOTAL: 17 flops */
246 static void constr_vsite3FD(const rvec xi, const rvec xj, const rvec xk, rvec x, real a, real b,
252 pbc_rvec_sub(pbc, xj, xi, xij);
253 pbc_rvec_sub(pbc, xk, xj, xjk);
256 /* temp goes from i to a point on the line jk */
257 temp[XX] = xij[XX] + a*xjk[XX];
258 temp[YY] = xij[YY] + a*xjk[YY];
259 temp[ZZ] = xij[ZZ] + a*xjk[ZZ];
262 c = b*gmx::invsqrt(iprod(temp, temp));
265 x[XX] = xi[XX] + c*temp[XX];
266 x[YY] = xi[YY] + c*temp[YY];
267 x[ZZ] = xi[ZZ] + c*temp[ZZ];
270 /* TOTAL: 34 flops */
273 static void constr_vsite3FAD(const rvec xi, const rvec xj, const rvec xk, rvec x, real a, real b, const t_pbc *pbc)
276 real a1, b1, c1, invdij;
278 pbc_rvec_sub(pbc, xj, xi, xij);
279 pbc_rvec_sub(pbc, xk, xj, xjk);
282 invdij = gmx::invsqrt(iprod(xij, xij));
283 c1 = invdij * invdij * iprod(xij, xjk);
284 xp[XX] = xjk[XX] - c1*xij[XX];
285 xp[YY] = xjk[YY] - c1*xij[YY];
286 xp[ZZ] = xjk[ZZ] - c1*xij[ZZ];
288 b1 = b*gmx::invsqrt(iprod(xp, xp));
291 x[XX] = xi[XX] + a1*xij[XX] + b1*xp[XX];
292 x[YY] = xi[YY] + a1*xij[YY] + b1*xp[YY];
293 x[ZZ] = xi[ZZ] + a1*xij[ZZ] + b1*xp[ZZ];
296 /* TOTAL: 63 flops */
299 static void constr_vsite3OUT(const rvec xi, const rvec xj, const rvec xk, rvec x,
300 real a, real b, real c, const t_pbc *pbc)
304 pbc_rvec_sub(pbc, xj, xi, xij);
305 pbc_rvec_sub(pbc, xk, xi, xik);
306 cprod(xij, xik, temp);
309 x[XX] = xi[XX] + a*xij[XX] + b*xik[XX] + c*temp[XX];
310 x[YY] = xi[YY] + a*xij[YY] + b*xik[YY] + c*temp[YY];
311 x[ZZ] = xi[ZZ] + a*xij[ZZ] + b*xik[ZZ] + c*temp[ZZ];
314 /* TOTAL: 33 flops */
317 static void constr_vsite4FD(const rvec xi, const rvec xj, const rvec xk, const rvec xl, rvec x,
318 real a, real b, real c, const t_pbc *pbc)
320 rvec xij, xjk, xjl, temp;
323 pbc_rvec_sub(pbc, xj, xi, xij);
324 pbc_rvec_sub(pbc, xk, xj, xjk);
325 pbc_rvec_sub(pbc, xl, xj, xjl);
328 /* temp goes from i to a point on the plane jkl */
329 temp[XX] = xij[XX] + a*xjk[XX] + b*xjl[XX];
330 temp[YY] = xij[YY] + a*xjk[YY] + b*xjl[YY];
331 temp[ZZ] = xij[ZZ] + a*xjk[ZZ] + b*xjl[ZZ];
334 d = c*gmx::invsqrt(iprod(temp, temp));
337 x[XX] = xi[XX] + d*temp[XX];
338 x[YY] = xi[YY] + d*temp[YY];
339 x[ZZ] = xi[ZZ] + d*temp[ZZ];
342 /* TOTAL: 43 flops */
345 static void constr_vsite4FDN(const rvec xi, const rvec xj, const rvec xk, const rvec xl, rvec x,
346 real a, real b, real c, const t_pbc *pbc)
348 rvec xij, xik, xil, ra, rb, rja, rjb, rm;
351 pbc_rvec_sub(pbc, xj, xi, xij);
352 pbc_rvec_sub(pbc, xk, xi, xik);
353 pbc_rvec_sub(pbc, xl, xi, xil);
366 rvec_sub(ra, xij, rja);
367 rvec_sub(rb, xij, rjb);
373 d = c*gmx::invsqrt(norm2(rm));
376 x[XX] = xi[XX] + d*rm[XX];
377 x[YY] = xi[YY] + d*rm[YY];
378 x[ZZ] = xi[ZZ] + d*rm[ZZ];
381 /* TOTAL: 47 flops */
385 static int constr_vsiten(const t_iatom *ia, const t_iparams ip[],
386 rvec *x, const t_pbc *pbc)
393 n3 = 3*ip[ia[0]].vsiten.n;
396 copy_rvec(x[ai], x1);
398 for (int i = 3; i < n3; i += 3)
401 a = ip[ia[i]].vsiten.a;
404 pbc_dx_aiuc(pbc, x[ai], x1, dx);
408 rvec_sub(x[ai], x1, dx);
410 dsum[XX] += a*dx[XX];
411 dsum[YY] += a*dx[YY];
412 dsum[ZZ] += a*dx[ZZ];
416 x[av][XX] = x1[XX] + dsum[XX];
417 x[av][YY] = x1[YY] + dsum[YY];
418 x[av][ZZ] = x1[ZZ] + dsum[ZZ];
424 static void construct_vsites_thread(const gmx_vsite_t *vsite,
427 const t_iparams ip[], const t_ilist ilist[],
428 const t_pbc *pbc_null)
432 const t_pbc *pbc_null2;
444 bPBCAll = (pbc_null != nullptr && !vsite->bHaveChargeGroups);
448 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
450 if (ilist[ftype].nr == 0)
456 int nra = interaction_function[ftype].nratoms;
458 int nr = ilist[ftype].nr;
460 const t_iatom *ia = ilist[ftype].iatoms;
464 pbc_null2 = pbc_null;
466 else if (pbc_null != nullptr)
468 vsite_pbc = vsite->vsite_pbc_loc[ftype - c_ftypeVsiteStart];
471 for (int i = 0; i < nr; )
474 /* The vsite and constructing atoms */
477 /* Constants for constructing vsites */
478 real a1 = ip[tp].vsite.a;
479 /* Check what kind of pbc we need to use */
484 /* No charge groups, vsite follows its own pbc */
486 copy_rvec(x[avsite], xpbc);
488 else if (vsite_pbc != nullptr)
490 pbc_atom = vsite_pbc[i/(1 + nra)];
495 /* We need to copy the coordinates here,
496 * single for single atom cg's pbc_atom
497 * is the vsite itself.
499 copy_rvec(x[pbc_atom], xpbc);
501 pbc_null2 = pbc_null;
512 /* Copy the old position */
514 copy_rvec(x[avsite], xv);
516 /* Construct the vsite depending on type */
523 constr_vsite2(x[ai], x[aj], x[avsite], a1, pbc_null2);
529 constr_vsite3(x[ai], x[aj], x[ak], x[avsite], a1, b1, pbc_null2);
535 constr_vsite3FD(x[ai], x[aj], x[ak], x[avsite], a1, b1, pbc_null2);
541 constr_vsite3FAD(x[ai], x[aj], x[ak], x[avsite], a1, b1, pbc_null2);
548 constr_vsite3OUT(x[ai], x[aj], x[ak], x[avsite], a1, b1, c1, pbc_null2);
556 constr_vsite4FD(x[ai], x[aj], x[ak], x[al], x[avsite], a1, b1, c1,
565 constr_vsite4FDN(x[ai], x[aj], x[ak], x[al], x[avsite], a1, b1, c1,
569 inc = constr_vsiten(ia, ip, x, pbc_null2);
572 gmx_fatal(FARGS, "No such vsite type %d in %s, line %d",
573 ftype, __FILE__, __LINE__);
578 /* Match the pbc of this vsite to the rest of its charge group */
580 int ishift = pbc_dx_aiuc(pbc_null, x[avsite], xpbc, dx);
581 if (ishift != CENTRAL)
583 rvec_add(xpbc, dx, x[avsite]);
588 /* Calculate velocity of vsite... */
590 rvec_sub(x[avsite], xv, vv);
591 svmul(inv_dt, vv, v[avsite]);
594 /* Increment loop variables */
602 void construct_vsites(const gmx_vsite_t *vsite,
605 const t_iparams ip[], const t_ilist ilist[],
606 int ePBC, gmx_bool bMolPBC,
607 t_commrec *cr, matrix box)
609 t_pbc pbc, *pbc_null;
611 gmx_bool bDomDec = cr && DOMAINDECOMP(cr);
613 /* We only need to do pbc when we have inter-cg vsites */
614 if (ePBC != epbcNONE && (bDomDec || bMolPBC) && vsite->n_intercg_vsite)
616 /* This is wasting some CPU time as we now do this multiple times
620 clear_ivec(null_ivec);
621 pbc_null = set_pbc_dd(&pbc, ePBC,
622 DOMAINDECOMP(cr) ? cr->dd->nc : null_ivec,
632 dd_move_x_vsites(cr->dd, box, x);
635 if (vsite->nthreads == 1)
637 construct_vsites_thread(vsite,
644 #pragma omp parallel num_threads(vsite->nthreads)
648 int th = gmx_omp_get_thread_num();
649 construct_vsites_thread(vsite,
651 ip, vsite->tData[th]->ilist,
653 if (vsite->tData[th]->useInterdependentTask)
655 /* Here we don't need a barrier (unlike the spreading),
656 * since both tasks only construct vsites from particles,
657 * or local vsites, not from non-local vsites.
659 construct_vsites_thread(vsite,
661 ip, vsite->tData[th]->idTask.ilist,
665 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
667 /* Now we can construct the vsites that might depend on other vsites */
668 construct_vsites_thread(vsite,
670 ip, vsite->tData[vsite->nthreads]->ilist,
675 static void spread_vsite2(const t_iatom ia[], real a,
677 rvec f[], rvec fshift[],
678 const t_pbc *pbc, const t_graph *g)
689 svmul(1 - a, f[av], fi);
690 svmul( a, f[av], fj);
699 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, av), di);
701 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), di);
706 siv = pbc_dx_aiuc(pbc, x[ai], x[av], dx);
707 sij = pbc_dx_aiuc(pbc, x[ai], x[aj], dx);
715 if (fshift && (siv != CENTRAL || sij != CENTRAL))
717 rvec_inc(fshift[siv], f[av]);
718 rvec_dec(fshift[CENTRAL], fi);
719 rvec_dec(fshift[sij], fj);
722 /* TOTAL: 13 flops */
725 void construct_vsites_mtop(gmx_vsite_t *vsite,
726 gmx_mtop_t *mtop, rvec x[])
729 for (int mb = 0; mb < mtop->nmolblock; mb++)
731 const gmx_molblock_t *molb = &mtop->molblock[mb];
732 const gmx_moltype_t *molt = &mtop->moltype[molb->type];
733 for (int mol = 0; mol < molb->nmol; mol++)
735 construct_vsites(vsite, x+as, 0.0, nullptr,
736 mtop->ffparams.iparams, molt->ilist,
737 epbcNONE, TRUE, nullptr, nullptr);
738 as += molt->atoms.nr;
743 static void spread_vsite3(const t_iatom ia[], real a, real b,
745 rvec f[], rvec fshift[],
746 const t_pbc *pbc, const t_graph *g)
758 svmul(1 - a - b, f[av], fi);
759 svmul( a, f[av], fj);
760 svmul( b, f[av], fk);
770 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, ia[1]), di);
772 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, aj), di);
774 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, ak), di);
779 siv = pbc_dx_aiuc(pbc, x[ai], x[av], dx);
780 sij = pbc_dx_aiuc(pbc, x[ai], x[aj], dx);
781 sik = pbc_dx_aiuc(pbc, x[ai], x[ak], dx);
790 if (fshift && (siv != CENTRAL || sij != CENTRAL || sik != CENTRAL))
792 rvec_inc(fshift[siv], f[av]);
793 rvec_dec(fshift[CENTRAL], fi);
794 rvec_dec(fshift[sij], fj);
795 rvec_dec(fshift[sik], fk);
798 /* TOTAL: 20 flops */
801 static void spread_vsite3FD(const t_iatom ia[], real a, real b,
803 rvec f[], rvec fshift[],
804 gmx_bool VirCorr, matrix dxdf,
805 const t_pbc *pbc, const t_graph *g)
807 real c, invl, fproj, a1;
808 rvec xvi, xij, xjk, xix, fv, temp;
809 t_iatom av, ai, aj, ak;
817 copy_rvec(f[av], fv);
819 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
820 skj = pbc_rvec_sub(pbc, x[ak], x[aj], xjk);
823 /* xix goes from i to point x on the line jk */
824 xix[XX] = xij[XX]+a*xjk[XX];
825 xix[YY] = xij[YY]+a*xjk[YY];
826 xix[ZZ] = xij[ZZ]+a*xjk[ZZ];
829 invl = gmx::invsqrt(iprod(xix, xix));
833 fproj = iprod(xix, fv)*invl*invl; /* = (xix . f)/(xix . xix) */
835 temp[XX] = c*(fv[XX]-fproj*xix[XX]);
836 temp[YY] = c*(fv[YY]-fproj*xix[YY]);
837 temp[ZZ] = c*(fv[ZZ]-fproj*xix[ZZ]);
840 /* c is already calculated in constr_vsite3FD
841 storing c somewhere will save 26 flops! */
844 f[ai][XX] += fv[XX] - temp[XX];
845 f[ai][YY] += fv[YY] - temp[YY];
846 f[ai][ZZ] += fv[ZZ] - temp[ZZ];
847 f[aj][XX] += a1*temp[XX];
848 f[aj][YY] += a1*temp[YY];
849 f[aj][ZZ] += a1*temp[ZZ];
850 f[ak][XX] += a*temp[XX];
851 f[ak][YY] += a*temp[YY];
852 f[ak][ZZ] += a*temp[ZZ];
857 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
859 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
861 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, aj), di);
866 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
873 if (fshift && (svi != CENTRAL || sji != CENTRAL || skj != CENTRAL))
875 rvec_dec(fshift[svi], fv);
876 fshift[CENTRAL][XX] += fv[XX] - (1 + a)*temp[XX];
877 fshift[CENTRAL][YY] += fv[YY] - (1 + a)*temp[YY];
878 fshift[CENTRAL][ZZ] += fv[ZZ] - (1 + a)*temp[ZZ];
879 fshift[ sji][XX] += temp[XX];
880 fshift[ sji][YY] += temp[YY];
881 fshift[ sji][ZZ] += temp[ZZ];
882 fshift[ skj][XX] += a*temp[XX];
883 fshift[ skj][YY] += a*temp[YY];
884 fshift[ skj][ZZ] += a*temp[ZZ];
889 /* When VirCorr=TRUE, the virial for the current forces is not
890 * calculated from the redistributed forces. This means that
891 * the effect of non-linear virtual site constructions on the virial
892 * needs to be added separately. This contribution can be calculated
893 * in many ways, but the simplest and cheapest way is to use
894 * the first constructing atom ai as a reference position in space:
895 * subtract (xv-xi)*fv and add (xj-xi)*fj + (xk-xi)*fk.
899 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
901 for (int i = 0; i < DIM; i++)
903 for (int j = 0; j < DIM; j++)
905 /* As xix is a linear combination of j and k, use that here */
906 dxdf[i][j] += -xiv[i]*fv[j] + xix[i]*temp[j];
911 /* TOTAL: 61 flops */
914 static void spread_vsite3FAD(const t_iatom ia[], real a, real b,
916 rvec f[], rvec fshift[],
917 gmx_bool VirCorr, matrix dxdf,
918 const t_pbc *pbc, const t_graph *g)
920 rvec xvi, xij, xjk, xperp, Fpij, Fppp, fv, f1, f2, f3;
921 real a1, b1, c1, c2, invdij, invdij2, invdp, fproj;
922 t_iatom av, ai, aj, ak;
923 int svi, sji, skj, d;
930 copy_rvec(f[ia[1]], fv);
932 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
933 skj = pbc_rvec_sub(pbc, x[ak], x[aj], xjk);
936 invdij = gmx::invsqrt(iprod(xij, xij));
937 invdij2 = invdij * invdij;
938 c1 = iprod(xij, xjk) * invdij2;
939 xperp[XX] = xjk[XX] - c1*xij[XX];
940 xperp[YY] = xjk[YY] - c1*xij[YY];
941 xperp[ZZ] = xjk[ZZ] - c1*xij[ZZ];
942 /* xperp in plane ijk, perp. to ij */
943 invdp = gmx::invsqrt(iprod(xperp, xperp));
948 /* a1, b1 and c1 are already calculated in constr_vsite3FAD
949 storing them somewhere will save 45 flops! */
951 fproj = iprod(xij, fv)*invdij2;
952 svmul(fproj, xij, Fpij); /* proj. f on xij */
953 svmul(iprod(xperp, fv)*invdp*invdp, xperp, Fppp); /* proj. f on xperp */
954 svmul(b1*fproj, xperp, f3);
957 rvec_sub(fv, Fpij, f1); /* f1 = f - Fpij */
958 rvec_sub(f1, Fppp, f2); /* f2 = f - Fpij - Fppp */
959 for (d = 0; (d < DIM); d++)
967 f[ai][XX] += fv[XX] - f1[XX] + c1*f2[XX] + f3[XX];
968 f[ai][YY] += fv[YY] - f1[YY] + c1*f2[YY] + f3[YY];
969 f[ai][ZZ] += fv[ZZ] - f1[ZZ] + c1*f2[ZZ] + f3[ZZ];
970 f[aj][XX] += f1[XX] - c2*f2[XX] - f3[XX];
971 f[aj][YY] += f1[YY] - c2*f2[YY] - f3[YY];
972 f[aj][ZZ] += f1[ZZ] - c2*f2[ZZ] - f3[ZZ];
980 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
982 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
984 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, aj), di);
989 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
996 if (fshift && (svi != CENTRAL || sji != CENTRAL || skj != CENTRAL))
998 rvec_dec(fshift[svi], fv);
999 fshift[CENTRAL][XX] += fv[XX] - f1[XX] - (1-c1)*f2[XX] + f3[XX];
1000 fshift[CENTRAL][YY] += fv[YY] - f1[YY] - (1-c1)*f2[YY] + f3[YY];
1001 fshift[CENTRAL][ZZ] += fv[ZZ] - f1[ZZ] - (1-c1)*f2[ZZ] + f3[ZZ];
1002 fshift[ sji][XX] += f1[XX] - c1 *f2[XX] - f3[XX];
1003 fshift[ sji][YY] += f1[YY] - c1 *f2[YY] - f3[YY];
1004 fshift[ sji][ZZ] += f1[ZZ] - c1 *f2[ZZ] - f3[ZZ];
1005 fshift[ skj][XX] += f2[XX];
1006 fshift[ skj][YY] += f2[YY];
1007 fshift[ skj][ZZ] += f2[ZZ];
1015 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1017 for (i = 0; i < DIM; i++)
1019 for (j = 0; j < DIM; j++)
1021 /* Note that xik=xij+xjk, so we have to add xij*f2 */
1024 + xij[i]*(f1[j] + (1 - c2)*f2[j] - f3[j])
1030 /* TOTAL: 113 flops */
1033 static void spread_vsite3OUT(const t_iatom ia[], real a, real b, real c,
1035 rvec f[], rvec fshift[],
1036 gmx_bool VirCorr, matrix dxdf,
1037 const t_pbc *pbc, const t_graph *g)
1039 rvec xvi, xij, xik, fv, fj, fk;
1050 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1051 ski = pbc_rvec_sub(pbc, x[ak], x[ai], xik);
1054 copy_rvec(f[av], fv);
1061 fj[XX] = a*fv[XX] - xik[ZZ]*cfy + xik[YY]*cfz;
1062 fj[YY] = xik[ZZ]*cfx + a*fv[YY] - xik[XX]*cfz;
1063 fj[ZZ] = -xik[YY]*cfx + xik[XX]*cfy + a*fv[ZZ];
1065 fk[XX] = b*fv[XX] + xij[ZZ]*cfy - xij[YY]*cfz;
1066 fk[YY] = -xij[ZZ]*cfx + b*fv[YY] + xij[XX]*cfz;
1067 fk[ZZ] = xij[YY]*cfx - xij[XX]*cfy + b*fv[ZZ];
1070 f[ai][XX] += fv[XX] - fj[XX] - fk[XX];
1071 f[ai][YY] += fv[YY] - fj[YY] - fk[YY];
1072 f[ai][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ];
1073 rvec_inc(f[aj], fj);
1074 rvec_inc(f[ak], fk);
1079 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
1081 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
1083 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, ai), di);
1088 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1095 if (fshift && (svi != CENTRAL || sji != CENTRAL || ski != CENTRAL))
1097 rvec_dec(fshift[svi], fv);
1098 fshift[CENTRAL][XX] += fv[XX] - fj[XX] - fk[XX];
1099 fshift[CENTRAL][YY] += fv[YY] - fj[YY] - fk[YY];
1100 fshift[CENTRAL][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ];
1101 rvec_inc(fshift[sji], fj);
1102 rvec_inc(fshift[ski], fk);
1109 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1111 for (int i = 0; i < DIM; i++)
1113 for (int j = 0; j < DIM; j++)
1115 dxdf[i][j] += -xiv[i]*fv[j] + xij[i]*fj[j] + xik[i]*fk[j];
1120 /* TOTAL: 54 flops */
1123 static void spread_vsite4FD(const t_iatom ia[], real a, real b, real c,
1125 rvec f[], rvec fshift[],
1126 gmx_bool VirCorr, matrix dxdf,
1127 const t_pbc *pbc, const t_graph *g)
1129 real d, invl, fproj, a1;
1130 rvec xvi, xij, xjk, xjl, xix, fv, temp;
1131 int av, ai, aj, ak, al;
1133 int svi, sji, skj, slj, m;
1141 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1142 skj = pbc_rvec_sub(pbc, x[ak], x[aj], xjk);
1143 slj = pbc_rvec_sub(pbc, x[al], x[aj], xjl);
1146 /* xix goes from i to point x on the plane jkl */
1147 for (m = 0; m < DIM; m++)
1149 xix[m] = xij[m] + a*xjk[m] + b*xjl[m];
1153 invl = gmx::invsqrt(iprod(xix, xix));
1155 /* 4 + ?10? flops */
1157 copy_rvec(f[av], fv);
1159 fproj = iprod(xix, fv)*invl*invl; /* = (xix . f)/(xix . xix) */
1161 for (m = 0; m < DIM; m++)
1163 temp[m] = d*(fv[m] - fproj*xix[m]);
1167 /* c is already calculated in constr_vsite3FD
1168 storing c somewhere will save 35 flops! */
1171 for (m = 0; m < DIM; m++)
1173 f[ai][m] += fv[m] - temp[m];
1174 f[aj][m] += a1*temp[m];
1175 f[ak][m] += a*temp[m];
1176 f[al][m] += b*temp[m];
1182 ivec_sub(SHIFT_IVEC(g, ia[1]), SHIFT_IVEC(g, ai), di);
1184 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
1186 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, aj), di);
1188 ivec_sub(SHIFT_IVEC(g, al), SHIFT_IVEC(g, aj), di);
1193 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1201 (svi != CENTRAL || sji != CENTRAL || skj != CENTRAL || slj != CENTRAL))
1203 rvec_dec(fshift[svi], fv);
1204 for (m = 0; m < DIM; m++)
1206 fshift[CENTRAL][m] += fv[m] - (1 + a + b)*temp[m];
1207 fshift[ sji][m] += temp[m];
1208 fshift[ skj][m] += a*temp[m];
1209 fshift[ slj][m] += b*temp[m];
1218 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1220 for (i = 0; i < DIM; i++)
1222 for (j = 0; j < DIM; j++)
1224 dxdf[i][j] += -xiv[i]*fv[j] + xix[i]*temp[j];
1229 /* TOTAL: 77 flops */
1233 static void spread_vsite4FDN(const t_iatom ia[], real a, real b, real c,
1235 rvec f[], rvec fshift[],
1236 gmx_bool VirCorr, matrix dxdf,
1237 const t_pbc *pbc, const t_graph *g)
1239 rvec xvi, xij, xik, xil, ra, rb, rja, rjb, rab, rm, rt;
1240 rvec fv, fj, fk, fl;
1244 int av, ai, aj, ak, al;
1245 int svi, sij, sik, sil;
1247 /* DEBUG: check atom indices */
1254 copy_rvec(f[av], fv);
1256 sij = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1257 sik = pbc_rvec_sub(pbc, x[ak], x[ai], xik);
1258 sil = pbc_rvec_sub(pbc, x[al], x[ai], xil);
1271 rvec_sub(ra, xij, rja);
1272 rvec_sub(rb, xij, rjb);
1273 rvec_sub(rb, ra, rab);
1276 cprod(rja, rjb, rm);
1279 invrm = gmx::invsqrt(norm2(rm));
1280 denom = invrm*invrm;
1283 cfx = c*invrm*fv[XX];
1284 cfy = c*invrm*fv[YY];
1285 cfz = c*invrm*fv[ZZ];
1296 fj[XX] = ( -rm[XX]*rt[XX]) * cfx + ( rab[ZZ]-rm[YY]*rt[XX]) * cfy + (-rab[YY]-rm[ZZ]*rt[XX]) * cfz;
1297 fj[YY] = (-rab[ZZ]-rm[XX]*rt[YY]) * cfx + ( -rm[YY]*rt[YY]) * cfy + ( rab[XX]-rm[ZZ]*rt[YY]) * cfz;
1298 fj[ZZ] = ( rab[YY]-rm[XX]*rt[ZZ]) * cfx + (-rab[XX]-rm[YY]*rt[ZZ]) * cfy + ( -rm[ZZ]*rt[ZZ]) * cfz;
1309 fk[XX] = ( -rm[XX]*rt[XX]) * cfx + (-a*rjb[ZZ]-rm[YY]*rt[XX]) * cfy + ( a*rjb[YY]-rm[ZZ]*rt[XX]) * cfz;
1310 fk[YY] = ( a*rjb[ZZ]-rm[XX]*rt[YY]) * cfx + ( -rm[YY]*rt[YY]) * cfy + (-a*rjb[XX]-rm[ZZ]*rt[YY]) * cfz;
1311 fk[ZZ] = (-a*rjb[YY]-rm[XX]*rt[ZZ]) * cfx + ( a*rjb[XX]-rm[YY]*rt[ZZ]) * cfy + ( -rm[ZZ]*rt[ZZ]) * cfz;
1322 fl[XX] = ( -rm[XX]*rt[XX]) * cfx + ( b*rja[ZZ]-rm[YY]*rt[XX]) * cfy + (-b*rja[YY]-rm[ZZ]*rt[XX]) * cfz;
1323 fl[YY] = (-b*rja[ZZ]-rm[XX]*rt[YY]) * cfx + ( -rm[YY]*rt[YY]) * cfy + ( b*rja[XX]-rm[ZZ]*rt[YY]) * cfz;
1324 fl[ZZ] = ( b*rja[YY]-rm[XX]*rt[ZZ]) * cfx + (-b*rja[XX]-rm[YY]*rt[ZZ]) * cfy + ( -rm[ZZ]*rt[ZZ]) * cfz;
1327 f[ai][XX] += fv[XX] - fj[XX] - fk[XX] - fl[XX];
1328 f[ai][YY] += fv[YY] - fj[YY] - fk[YY] - fl[YY];
1329 f[ai][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ] - fl[ZZ];
1330 rvec_inc(f[aj], fj);
1331 rvec_inc(f[ak], fk);
1332 rvec_inc(f[al], fl);
1337 ivec_sub(SHIFT_IVEC(g, av), SHIFT_IVEC(g, ai), di);
1339 ivec_sub(SHIFT_IVEC(g, aj), SHIFT_IVEC(g, ai), di);
1341 ivec_sub(SHIFT_IVEC(g, ak), SHIFT_IVEC(g, ai), di);
1343 ivec_sub(SHIFT_IVEC(g, al), SHIFT_IVEC(g, ai), di);
1348 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1355 if (fshift && (svi != CENTRAL || sij != CENTRAL || sik != CENTRAL || sil != CENTRAL))
1357 rvec_dec(fshift[svi], fv);
1358 fshift[CENTRAL][XX] += fv[XX] - fj[XX] - fk[XX] - fl[XX];
1359 fshift[CENTRAL][YY] += fv[YY] - fj[YY] - fk[YY] - fl[YY];
1360 fshift[CENTRAL][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ] - fl[ZZ];
1361 rvec_inc(fshift[sij], fj);
1362 rvec_inc(fshift[sik], fk);
1363 rvec_inc(fshift[sil], fl);
1371 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1373 for (i = 0; i < DIM; i++)
1375 for (j = 0; j < DIM; j++)
1377 dxdf[i][j] += -xiv[i]*fv[j] + xij[i]*fj[j] + xik[i]*fk[j] + xil[i]*fl[j];
1382 /* Total: 207 flops (Yuck!) */
1386 static int spread_vsiten(const t_iatom ia[], const t_iparams ip[],
1388 rvec f[], rvec fshift[],
1389 const t_pbc *pbc, const t_graph *g)
1397 n3 = 3*ip[ia[0]].vsiten.n;
1399 copy_rvec(x[av], xv);
1401 for (i = 0; i < n3; i += 3)
1406 ivec_sub(SHIFT_IVEC(g, ai), SHIFT_IVEC(g, av), di);
1411 siv = pbc_dx_aiuc(pbc, x[ai], xv, dx);
1417 a = ip[ia[i]].vsiten.a;
1418 svmul(a, f[av], fi);
1419 rvec_inc(f[ai], fi);
1420 if (fshift && siv != CENTRAL)
1422 rvec_inc(fshift[siv], fi);
1423 rvec_dec(fshift[CENTRAL], fi);
1432 static int vsite_count(const t_ilist *ilist, int ftype)
1434 if (ftype == F_VSITEN)
1436 return ilist[ftype].nr/3;
1440 return ilist[ftype].nr/(1 + interaction_function[ftype].nratoms);
1444 static void spread_vsite_f_thread(const gmx_vsite_t *vsite,
1446 rvec f[], rvec *fshift,
1447 gmx_bool VirCorr, matrix dxdf,
1448 t_iparams ip[], const t_ilist ilist[],
1449 const t_graph *g, const t_pbc *pbc_null)
1452 const t_pbc *pbc_null2;
1453 const int *vsite_pbc;
1455 bPBCAll = (pbc_null != nullptr && !vsite->bHaveChargeGroups);
1457 /* this loop goes backwards to be able to build *
1458 * higher type vsites from lower types */
1459 pbc_null2 = nullptr;
1460 vsite_pbc = nullptr;
1461 for (int ftype = c_ftypeVsiteEnd - 1; ftype >= c_ftypeVsiteStart; ftype--)
1463 if (ilist[ftype].nr == 0)
1469 int nra = interaction_function[ftype].nratoms;
1471 int nr = ilist[ftype].nr;
1473 const t_iatom *ia = ilist[ftype].iatoms;
1477 pbc_null2 = pbc_null;
1479 else if (pbc_null != nullptr)
1481 vsite_pbc = vsite->vsite_pbc_loc[ftype - c_ftypeVsiteStart];
1484 for (int i = 0; i < nr; )
1486 if (vsite_pbc != nullptr)
1488 if (vsite_pbc[i/(1 + nra)] > -2)
1490 pbc_null2 = pbc_null;
1494 pbc_null2 = nullptr;
1500 /* Constants for constructing */
1502 a1 = ip[tp].vsite.a;
1503 /* Construct the vsite depending on type */
1507 spread_vsite2(ia, a1, x, f, fshift, pbc_null2, g);
1510 b1 = ip[tp].vsite.b;
1511 spread_vsite3(ia, a1, b1, x, f, fshift, pbc_null2, g);
1514 b1 = ip[tp].vsite.b;
1515 spread_vsite3FD(ia, a1, b1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1518 b1 = ip[tp].vsite.b;
1519 spread_vsite3FAD(ia, a1, b1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1522 b1 = ip[tp].vsite.b;
1523 c1 = ip[tp].vsite.c;
1524 spread_vsite3OUT(ia, a1, b1, c1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1527 b1 = ip[tp].vsite.b;
1528 c1 = ip[tp].vsite.c;
1529 spread_vsite4FD(ia, a1, b1, c1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1532 b1 = ip[tp].vsite.b;
1533 c1 = ip[tp].vsite.c;
1534 spread_vsite4FDN(ia, a1, b1, c1, x, f, fshift, VirCorr, dxdf, pbc_null2, g);
1537 inc = spread_vsiten(ia, ip, x, f, fshift, pbc_null2, g);
1540 gmx_fatal(FARGS, "No such vsite type %d in %s, line %d",
1541 ftype, __FILE__, __LINE__);
1543 clear_rvec(f[ia[1]]);
1545 /* Increment loop variables */
1553 /*! \brief Clears the task force buffer elements that are written by task idTask */
1554 static void clearTaskForceBufferUsedElements(InterdependentTask *idTask)
1556 int ntask = idTask->spreadTask.size();
1557 for (int ti = 0; ti < ntask; ti++)
1559 const AtomIndex *atomList = &idTask->atomIndex[idTask->spreadTask[ti]];
1560 int natom = atomList->atom.size();
1561 RVec *force = idTask->force.data();
1562 for (int i = 0; i < natom; i++)
1564 clear_rvec(force[atomList->atom[i]]);
1569 void spread_vsite_f(const gmx_vsite_t *vsite,
1570 const rvec * gmx_restrict x,
1571 rvec * gmx_restrict f, rvec * gmx_restrict fshift,
1572 gmx_bool VirCorr, matrix vir,
1573 t_nrnb *nrnb, const t_idef *idef,
1574 int ePBC, gmx_bool bMolPBC, const t_graph *g, const matrix box,
1577 t_pbc pbc, *pbc_null;
1579 /* We only need to do pbc when we have inter-cg vsites */
1580 if ((DOMAINDECOMP(cr) || bMolPBC) && vsite->n_intercg_vsite)
1582 /* This is wasting some CPU time as we now do this multiple times
1585 pbc_null = set_pbc_dd(&pbc, ePBC, cr->dd ? cr->dd->nc : nullptr, FALSE, box);
1592 if (DOMAINDECOMP(cr))
1594 dd_clear_f_vsites(cr->dd, f);
1597 if (vsite->nthreads == 1)
1601 clear_mat(vsite->tData[0]->dxdf);
1603 spread_vsite_f_thread(vsite,
1605 VirCorr, vsite->tData[0]->dxdf,
1606 idef->iparams, idef->il,
1611 /* First spread the vsites that might depend on non-local vsites */
1614 clear_mat(vsite->tData[vsite->nthreads]->dxdf);
1616 spread_vsite_f_thread(vsite,
1618 VirCorr, vsite->tData[vsite->nthreads]->dxdf,
1620 vsite->tData[vsite->nthreads]->ilist,
1623 #pragma omp parallel num_threads(vsite->nthreads)
1627 int thread = gmx_omp_get_thread_num();
1628 VsiteThread *tData = vsite->tData[thread];
1631 if (thread == 0 || fshift == nullptr)
1637 fshift_t = tData->fshift;
1639 for (int i = 0; i < SHIFTS; i++)
1641 clear_rvec(fshift_t[i]);
1646 clear_mat(tData->dxdf);
1649 if (tData->useInterdependentTask)
1651 /* Spread the vsites that spread outside our local range.
1652 * This is done using a thread-local force buffer force.
1653 * First we need to copy the input vsite forces to force.
1655 InterdependentTask *idTask = &tData->idTask;
1657 /* Clear the buffer elements set by our task during
1658 * the last call to spread_vsite_f.
1660 clearTaskForceBufferUsedElements(idTask);
1662 int nvsite = idTask->vsite.size();
1663 for (int i = 0; i < nvsite; i++)
1665 copy_rvec(f[idTask->vsite[i]],
1666 idTask->force[idTask->vsite[i]]);
1668 spread_vsite_f_thread(vsite,
1669 x, as_rvec_array(idTask->force.data()), fshift_t,
1670 VirCorr, tData->dxdf,
1672 tData->idTask.ilist,
1675 /* We need a barrier before reducing forces below
1676 * that have been produced by a different thread above.
1680 /* Loop over all thread task and reduce forces they
1681 * produced on atoms that fall in our range.
1682 * Note that atomic reduction would be a simpler solution,
1683 * but that might not have good support on all platforms.
1685 int ntask = idTask->reduceTask.size();
1686 for (int ti = 0; ti < ntask; ti++)
1688 const InterdependentTask *idt_foreign = &vsite->tData[idTask->reduceTask[ti]]->idTask;
1689 const AtomIndex *atomList = &idt_foreign->atomIndex[thread];
1690 const RVec *f_foreign = idt_foreign->force.data();
1692 int natom = atomList->atom.size();
1693 for (int i = 0; i < natom; i++)
1695 int ind = atomList->atom[i];
1696 rvec_inc(f[ind], f_foreign[ind]);
1697 /* Clearing of f_foreign is done at the next step */
1700 /* Clear the vsite forces, both in f and force */
1701 for (int i = 0; i < nvsite; i++)
1703 int ind = tData->idTask.vsite[i];
1705 clear_rvec(tData->idTask.force[ind]);
1709 /* Spread the vsites that spread locally only */
1710 spread_vsite_f_thread(vsite,
1712 VirCorr, tData->dxdf,
1717 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1720 if (fshift != nullptr)
1722 for (int th = 1; th < vsite->nthreads; th++)
1724 for (int i = 0; i < SHIFTS; i++)
1726 rvec_inc(fshift[i], vsite->tData[th]->fshift[i]);
1734 for (int th = 0; th < (vsite->nthreads == 1 ? 1 : vsite->nthreads + 1); th++)
1736 for (int i = 0; i < DIM; i++)
1738 for (int j = 0; j < DIM; j++)
1740 vir[i][j] += -0.5*vsite->tData[th]->dxdf[i][j];
1746 if (DOMAINDECOMP(cr))
1748 dd_move_f_vsites(cr->dd, f, fshift);
1751 inc_nrnb(nrnb, eNR_VSITE2, vsite_count(idef->il, F_VSITE2));
1752 inc_nrnb(nrnb, eNR_VSITE3, vsite_count(idef->il, F_VSITE3));
1753 inc_nrnb(nrnb, eNR_VSITE3FD, vsite_count(idef->il, F_VSITE3FD));
1754 inc_nrnb(nrnb, eNR_VSITE3FAD, vsite_count(idef->il, F_VSITE3FAD));
1755 inc_nrnb(nrnb, eNR_VSITE3OUT, vsite_count(idef->il, F_VSITE3OUT));
1756 inc_nrnb(nrnb, eNR_VSITE4FD, vsite_count(idef->il, F_VSITE4FD));
1757 inc_nrnb(nrnb, eNR_VSITE4FDN, vsite_count(idef->il, F_VSITE4FDN));
1758 inc_nrnb(nrnb, eNR_VSITEN, vsite_count(idef->il, F_VSITEN));
1761 static int *atom2cg(t_block *cgs)
1765 snew(a2cg, cgs->index[cgs->nr]);
1766 for (int cg = 0; cg < cgs->nr; cg++)
1768 for (int i = cgs->index[cg]; i < cgs->index[cg+1]; i++)
1777 int count_intercg_vsites(const gmx_mtop_t *mtop)
1779 gmx_molblock_t *molb;
1780 gmx_moltype_t *molt;
1782 int n_intercg_vsite;
1784 n_intercg_vsite = 0;
1785 for (int mb = 0; mb < mtop->nmolblock; mb++)
1787 molb = &mtop->molblock[mb];
1788 molt = &mtop->moltype[molb->type];
1790 a2cg = atom2cg(&molt->cgs);
1791 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
1793 int nral = NRAL(ftype);
1794 t_ilist *il = &molt->ilist[ftype];
1795 const t_iatom *ia = il->iatoms;
1796 for (int i = 0; i < il->nr; i += 1 + nral)
1798 int cg = a2cg[ia[1+i]];
1799 for (int a = 1; a < nral; a++)
1801 if (a2cg[ia[1+a]] != cg)
1803 n_intercg_vsite += molb->nmol;
1812 return n_intercg_vsite;
1815 static int **get_vsite_pbc(const t_iparams *iparams, const t_ilist *ilist,
1816 const t_atom *atom, const t_mdatoms *md,
1817 const t_block *cgs, const int *a2cg)
1821 /* Make an array that tells if the pbc of an atom is set */
1822 snew(pbc_set, cgs->index[cgs->nr]);
1823 /* PBC is set for all non vsites */
1824 for (int a = 0; a < cgs->index[cgs->nr]; a++)
1826 if ((atom && atom[a].ptype != eptVSite) ||
1827 (md && md->ptype[a] != eptVSite))
1834 snew(vsite_pbc, F_VSITEN-F_VSITE2+1);
1836 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
1839 int nral = NRAL(ftype);
1840 const t_ilist *il = &ilist[ftype];
1841 const t_iatom *ia = il->iatoms;
1844 snew(vsite_pbc[ftype-F_VSITE2], il->nr/(1 + nral));
1845 vsite_pbc_f = vsite_pbc[ftype-F_VSITE2];
1850 int vsi = i/(1 + nral);
1851 t_iatom vsite = ia[i+1];
1852 int cg_v = a2cg[vsite];
1853 /* A value of -2 signals that this vsite and its contructing
1854 * atoms are all within the same cg, so no pbc is required.
1856 vsite_pbc_f[vsi] = -2;
1857 /* Check if constructing atoms are outside the vsite's cg */
1859 if (ftype == F_VSITEN)
1861 nc3 = 3*iparams[ia[i]].vsiten.n;
1862 for (int j = 0; j < nc3; j += 3)
1864 if (a2cg[ia[i+j+2]] != cg_v)
1866 vsite_pbc_f[vsi] = -1;
1872 for (int a = 1; a < nral; a++)
1874 if (a2cg[ia[i+1+a]] != cg_v)
1876 vsite_pbc_f[vsi] = -1;
1880 if (vsite_pbc_f[vsi] == -1)
1882 /* Check if this is the first processed atom of a vsite only cg */
1883 gmx_bool bViteOnlyCG_and_FirstAtom = TRUE;
1884 for (int a = cgs->index[cg_v]; a < cgs->index[cg_v+1]; a++)
1886 /* Non-vsites already have pbc set, so simply check for pbc_set */
1889 bViteOnlyCG_and_FirstAtom = FALSE;
1893 if (bViteOnlyCG_and_FirstAtom)
1895 /* First processed atom of a vsite only charge group.
1896 * The pbc of the input coordinates to construct_vsites
1897 * should be preserved.
1899 vsite_pbc_f[vsi] = vsite;
1901 else if (cg_v != a2cg[ia[1+i+1]])
1903 /* This vsite has a different charge group index
1904 * than it's first constructing atom
1905 * and the charge group has more than one atom,
1906 * search for the first normal particle
1907 * or vsite that already had its pbc defined.
1908 * If nothing is found, use full pbc for this vsite.
1910 for (int a = cgs->index[cg_v]; a < cgs->index[cg_v+1]; a++)
1912 if (a != vsite && pbc_set[a])
1914 vsite_pbc_f[vsi] = a;
1917 fprintf(debug, "vsite %d match pbc with atom %d\n",
1925 fprintf(debug, "vsite atom %d cg %d - %d pbc atom %d\n",
1926 vsite+1, cgs->index[cg_v]+1, cgs->index[cg_v+1],
1927 vsite_pbc_f[vsi]+1);
1931 if (ftype == F_VSITEN)
1933 /* The other entries in vsite_pbc_f are not used for center vsites */
1941 /* This vsite now has its pbc defined */
1953 gmx_vsite_t *init_vsite(const gmx_mtop_t *mtop, t_commrec *cr,
1954 gmx_bool bSerial_NoPBC)
1957 gmx_moltype_t *molt;
1959 /* check if there are vsites */
1961 for (int ftype = 0; ftype < F_NRE; ftype++)
1963 if (interaction_function[ftype].flags & IF_VSITE)
1965 GMX_ASSERT(ftype >= c_ftypeVsiteStart && ftype < c_ftypeVsiteEnd, "c_ftypeVsiteStart and/or c_ftypeVsiteEnd do not have correct values");
1967 nvsite += gmx_mtop_ftype_count(mtop, ftype);
1971 GMX_ASSERT(ftype < c_ftypeVsiteStart || ftype >= c_ftypeVsiteEnd, "c_ftypeVsiteStart and/or c_ftypeVsiteEnd do not have correct values");
1982 vsite->n_intercg_vsite = count_intercg_vsites(mtop);
1984 vsite->bHaveChargeGroups = (ncg_mtop(mtop) < mtop->natoms);
1986 /* If we don't have charge groups, the vsite follows its own pbc */
1987 if (!bSerial_NoPBC &&
1988 vsite->bHaveChargeGroups &&
1989 vsite->n_intercg_vsite > 0 && DOMAINDECOMP(cr))
1991 vsite->nvsite_pbc_molt = mtop->nmoltype;
1992 snew(vsite->vsite_pbc_molt, vsite->nvsite_pbc_molt);
1993 for (int mt = 0; mt < mtop->nmoltype; mt++)
1995 molt = &mtop->moltype[mt];
1996 /* Make an atom to charge group index */
1997 int *a2cg = atom2cg(&molt->cgs);
1998 vsite->vsite_pbc_molt[mt] = get_vsite_pbc(mtop->ffparams.iparams,
2000 molt->atoms.atom, nullptr,
2005 snew(vsite->vsite_pbc_loc_nalloc, c_ftypeVsiteEnd - c_ftypeVsiteStart);
2006 snew(vsite->vsite_pbc_loc, c_ftypeVsiteEnd - c_ftypeVsiteStart);
2011 vsite->nthreads = 1;
2015 vsite->nthreads = gmx_omp_nthreads_get(emntVSITE);
2019 /* We need one extra thread data structure for the overlap vsites */
2020 snew(vsite->tData, vsite->nthreads + 1);
2021 #pragma omp parallel for num_threads(vsite->nthreads) schedule(static)
2022 for (int thread = 0; thread < vsite->nthreads; thread++)
2026 vsite->tData[thread] = new VsiteThread;
2028 InterdependentTask *idTask = &vsite->tData[thread]->idTask;
2030 idTask->atomIndex.resize(vsite->nthreads);
2032 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2034 if (vsite->nthreads > 1)
2036 vsite->tData[vsite->nthreads] = new VsiteThread;
2040 vsite->taskIndex = nullptr;
2041 vsite->taskIndexNalloc = 0;
2046 static gmx_inline void flagAtom(InterdependentTask *idTask, int atom,
2047 int thread, int nthread, int natperthread)
2049 if (!idTask->use[atom])
2051 idTask->use[atom] = true;
2052 thread = atom/natperthread;
2053 /* Assign all non-local atom force writes to thread 0 */
2054 if (thread >= nthread)
2058 idTask->atomIndex[thread].atom.push_back(atom);
2062 /*\brief Here we try to assign all vsites that are in our local range.
2064 * Our task local atom range is tData->rangeStart - tData->rangeEnd.
2065 * Vsites that depend only on local atoms, as indicated by taskIndex[]==thread,
2066 * are assigned to task tData->ilist. Vsites that depend on non-local atoms
2067 * but not on other vsites are assigned to task tData->id_task.ilist.
2068 * taskIndex[] is set for all vsites in our range, either to our local tasks
2069 * or to the single last task as taskIndex[]=2*nthreads.
2071 static void assignVsitesToThread(VsiteThread *tData,
2076 const t_ilist *ilist,
2077 const t_iparams *ip,
2078 const unsigned short *ptype)
2080 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2082 tData->ilist[ftype].nr = 0;
2083 tData->idTask.ilist[ftype].nr = 0;
2085 int nral1 = 1 + NRAL(ftype);
2087 t_iatom *iat = ilist[ftype].iatoms;
2088 for (int i = 0; i < ilist[ftype].nr; )
2090 if (ftype == F_VSITEN)
2092 /* The 3 below is from 1+NRAL(ftype)=3 */
2093 inc = ip[iat[i]].vsiten.n*3;
2096 if (iat[1 + i] < tData->rangeStart ||
2097 iat[1 + i] >= tData->rangeEnd)
2099 /* This vsite belongs to a different thread */
2104 /* We would like to assign this vsite to task thread,
2105 * but it might depend on atoms outside the atom range of thread
2106 * or on another vsite not assigned to task thread.
2109 if (ftype != F_VSITEN)
2111 for (int j = i + 2; j < i + nral1; j++)
2113 /* Do a range check to avoid a harmless race on taskIndex */
2114 if (iat[j] < tData->rangeStart ||
2115 iat[j] >= tData->rangeEnd ||
2116 taskIndex[iat[j]] != thread)
2118 if (!tData->useInterdependentTask ||
2119 ptype[iat[j]] == eptVSite)
2121 /* At least one constructing atom is a vsite
2122 * that is not assigned to the same thread.
2123 * Put this vsite into a separate task.
2129 /* There are constructing atoms outside our range,
2130 * put this vsite into a second task to be executed
2131 * on the same thread. During construction no barrier
2132 * is needed between the two tasks on the same thread.
2133 * During spreading we need to run this task with
2134 * an additional thread-local intermediate force buffer
2135 * (or atomic reduction) and a barrier between the two
2138 task = nthread + thread;
2144 for (int j = i + 2; j < i + inc; j += 3)
2146 /* Do a range check to avoid a harmless race on taskIndex */
2147 if (iat[j] < tData->rangeStart ||
2148 iat[j] >= tData->rangeEnd ||
2149 taskIndex[iat[j]] != thread)
2151 GMX_ASSERT(ptype[iat[j]] != eptVSite, "A vsite to be assigned in assignVsitesToThread has a vsite as a constructing atom that does not belong to our task, such vsites should be assigned to the single 'master' task");
2153 task = nthread + thread;
2158 /* Update this vsite's thread index entry */
2159 taskIndex[iat[1+i]] = task;
2161 if (task == thread || task == nthread + thread)
2163 /* Copy this vsite to the thread data struct of thread */
2167 il_task = &tData->ilist[ftype];
2171 il_task = &tData->idTask.ilist[ftype];
2173 /* Ensure we have sufficient memory allocated */
2174 if (il_task->nr + inc > il_task->nalloc)
2176 il_task->nalloc = over_alloc_large(il_task->nr + inc);
2177 srenew(il_task->iatoms, il_task->nalloc);
2179 /* Copy the vsite data to the thread-task local array */
2180 for (int j = i; j < i + inc; j++)
2182 il_task->iatoms[il_task->nr++] = iat[j];
2184 if (task == nthread + thread)
2186 /* This vsite write outside our own task force block.
2187 * Put it into the interdependent task list and flag
2188 * the atoms involved for reduction.
2190 tData->idTask.vsite.push_back(iat[i + 1]);
2191 if (ftype != F_VSITEN)
2193 for (int j = i + 2; j < i + nral1; j++)
2195 flagAtom(&tData->idTask, iat[j],
2196 thread, nthread, natperthread);
2201 for (int j = i + 2; j < i + inc; j += 3)
2203 flagAtom(&tData->idTask, iat[j],
2204 thread, nthread, natperthread);
2215 /*! \brief Assign all vsites with taskIndex[]==task to task tData */
2216 static void assignVsitesToSingleTask(VsiteThread *tData,
2218 const int *taskIndex,
2219 const t_ilist *ilist,
2220 const t_iparams *ip)
2222 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2224 tData->ilist[ftype].nr = 0;
2225 tData->idTask.ilist[ftype].nr = 0;
2227 int nral1 = 1 + NRAL(ftype);
2229 t_iatom *iat = ilist[ftype].iatoms;
2230 t_ilist *il_task = &tData->ilist[ftype];
2232 for (int i = 0; i < ilist[ftype].nr; )
2234 if (ftype == F_VSITEN)
2236 /* The 3 below is from 1+NRAL(ftype)=3 */
2237 inc = ip[iat[i]].vsiten.n*3;
2239 /* Check if the vsite is assigned to our task */
2240 if (taskIndex[iat[1 + i]] == task)
2242 /* Ensure we have sufficient memory allocated */
2243 if (il_task->nr + inc > il_task->nalloc)
2245 il_task->nalloc = over_alloc_large(il_task->nr + inc);
2246 srenew(il_task->iatoms, il_task->nalloc);
2248 /* Copy the vsite data to the thread-task local array */
2249 for (int j = i; j < i + inc; j++)
2251 il_task->iatoms[il_task->nr++] = iat[j];
2260 void split_vsites_over_threads(const t_ilist *ilist,
2261 const t_iparams *ip,
2262 const t_mdatoms *mdatoms,
2263 gmx_bool bLimitRange,
2266 int vsite_atom_range, natperthread;
2268 if (vsite->nthreads == 1)
2274 /* The current way of distributing the vsites over threads in primitive.
2275 * We divide the atom range 0 - natoms_in_vsite uniformly over threads,
2276 * without taking into account how the vsites are distributed.
2277 * Without domain decomposition we bLimitRange=TRUE and we at least
2278 * tighten the upper bound of the range (useful for common systems
2279 * such as a vsite-protein in 3-site water).
2280 * With domain decomposition, as long as the vsites are distributed
2281 * uniformly in each domain along the major dimension, usually x,
2282 * it will also perform well.
2286 vsite_atom_range = -1;
2287 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2290 if (ftype != F_VSITEN)
2292 int nral1 = 1 + NRAL(ftype);
2293 const t_iatom *iat = ilist[ftype].iatoms;
2294 for (int i = 0; i < ilist[ftype].nr; i += nral1)
2296 for (int j = i + 1; j < i + nral1; j++)
2298 vsite_atom_range = std::max(vsite_atom_range, iat[j]);
2306 const t_iatom *iat = ilist[ftype].iatoms;
2309 while (i < ilist[ftype].nr)
2311 /* The 3 below is from 1+NRAL(ftype)=3 */
2312 vs_ind_end = i + ip[iat[i]].vsiten.n*3;
2314 vsite_atom_range = std::max(vsite_atom_range, iat[i+1]);
2315 while (i < vs_ind_end)
2317 vsite_atom_range = std::max(vsite_atom_range, iat[i+2]);
2325 natperthread = (vsite_atom_range + vsite->nthreads - 1)/vsite->nthreads;
2329 /* Any local or not local atom could be involved in virtual sites.
2330 * But since we usually have very few non-local virtual sites
2331 * (only non-local vsites that depend on local vsites),
2332 * we distribute the local atom range equally over the threads.
2333 * When assigning vsites to threads, we should take care that the last
2334 * threads also covers the non-local range.
2336 vsite_atom_range = mdatoms->nr;
2337 natperthread = (mdatoms->homenr + vsite->nthreads - 1)/vsite->nthreads;
2342 fprintf(debug, "virtual site thread dist: natoms %d, range %d, natperthread %d\n", mdatoms->nr, vsite_atom_range, natperthread);
2345 /* To simplify the vsite assignment, we make an index which tells us
2346 * to which task particles, both non-vsites and vsites, are assigned.
2348 if (mdatoms->nr > vsite->taskIndexNalloc)
2350 vsite->taskIndexNalloc = over_alloc_large(mdatoms->nr);
2351 srenew(vsite->taskIndex, vsite->taskIndexNalloc);
2354 /* Initialize the task index array. Here we assign the non-vsite
2355 * particles to task=thread, so we easily figure out if vsites
2356 * depend on local and/or non-local particles in assignVsitesToThread.
2358 int *taskIndex = vsite->taskIndex;
2361 for (int i = 0; i < mdatoms->nr; i++)
2363 if (mdatoms->ptype[i] == eptVSite)
2365 /* vsites are not assigned to a task yet */
2370 /* assign non-vsite particles to task thread */
2371 taskIndex[i] = thread;
2373 if (i == (thread + 1)*natperthread && thread < vsite->nthreads)
2380 #pragma omp parallel num_threads(vsite->nthreads)
2384 int thread = gmx_omp_get_thread_num();
2385 VsiteThread *tData = vsite->tData[thread];
2387 /* Clear the buffer use flags that were set before */
2388 if (tData->useInterdependentTask)
2390 InterdependentTask *idTask = &tData->idTask;
2392 /* To avoid an extra OpenMP barrier in spread_vsite_f,
2393 * we clear the force buffer at the next step,
2394 * so we need to do it here as well.
2396 clearTaskForceBufferUsedElements(idTask);
2398 idTask->vsite.resize(0);
2399 for (int t = 0; t < vsite->nthreads; t++)
2401 AtomIndex *atomIndex = &idTask->atomIndex[t];
2402 int natom = atomIndex->atom.size();
2403 for (int i = 0; i < natom; i++)
2405 idTask->use[atomIndex->atom[i]] = false;
2407 atomIndex->atom.resize(0);
2412 /* To avoid large f_buf allocations of #threads*vsite_atom_range
2413 * we don't use task2 with more than 200000 atoms. This doesn't
2414 * affect performance, since with such a large range relatively few
2415 * vsites will end up in the separate task.
2416 * Note that useTask2 should be the same for all threads.
2418 tData->useInterdependentTask = (vsite_atom_range <= 200000);
2419 if (tData->useInterdependentTask)
2421 size_t natoms_use_in_vsites = vsite_atom_range;
2422 InterdependentTask *idTask = &tData->idTask;
2423 /* To avoid resizing and re-clearing every nstlist steps,
2424 * we never down size the force buffer.
2426 if (natoms_use_in_vsites > idTask->force.size() ||
2427 natoms_use_in_vsites > idTask->use.size())
2429 idTask->force.resize(natoms_use_in_vsites, { 0, 0, 0 });
2430 idTask->use.resize(natoms_use_in_vsites, false);
2434 /* Assign all vsites that can execute independently on threads */
2435 tData->rangeStart = thread *natperthread;
2436 if (thread < vsite->nthreads - 1)
2438 tData->rangeEnd = (thread + 1)*natperthread;
2442 /* The last thread should cover up to the end of the range */
2443 tData->rangeEnd = mdatoms->nr;
2445 assignVsitesToThread(tData,
2446 thread, vsite->nthreads,
2449 ilist, ip, mdatoms->ptype);
2451 if (tData->useInterdependentTask)
2453 /* In the worst case, all tasks write to force ranges of
2454 * all other tasks, leading to #tasks^2 scaling (this is only
2455 * the overhead, the actual flops remain constant).
2456 * But in most cases there is far less coupling. To improve
2457 * scaling at high thread counts we therefore construct
2458 * an index to only loop over the actually affected tasks.
2460 InterdependentTask *idTask = &tData->idTask;
2462 /* Ensure assignVsitesToThread finished on other threads */
2465 idTask->spreadTask.resize(0);
2466 idTask->reduceTask.resize(0);
2467 for (int t = 0; t < vsite->nthreads; t++)
2469 /* Do we write to the force buffer of task t? */
2470 if (idTask->atomIndex[t].atom.size() > 0)
2472 idTask->spreadTask.push_back(t);
2474 /* Does task t write to our force buffer? */
2475 if (vsite->tData[t]->idTask.atomIndex[thread].atom.size() > 0)
2477 idTask->reduceTask.push_back(t);
2482 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2484 /* Assign all remaining vsites, that will have taskIndex[]=2*vsite->nthreads,
2485 * to a single task that will not run in parallel with other tasks.
2487 assignVsitesToSingleTask(vsite->tData[vsite->nthreads],
2492 if (debug && vsite->nthreads > 1)
2494 fprintf(debug, "virtual site useInterdependentTask %d, nuse:\n",
2495 vsite->tData[0]->useInterdependentTask);
2496 for (int th = 0; th < vsite->nthreads + 1; th++)
2498 fprintf(debug, " %4d", vsite->tData[th]->idTask.nuse);
2500 fprintf(debug, "\n");
2502 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2504 if (ilist[ftype].nr > 0)
2506 fprintf(debug, "%-20s thread dist:",
2507 interaction_function[ftype].longname);
2508 for (int th = 0; th < vsite->nthreads + 1; th++)
2510 fprintf(debug, " %4d %4d ",
2511 vsite->tData[th]->ilist[ftype].nr,
2512 vsite->tData[th]->idTask.ilist[ftype].nr);
2514 fprintf(debug, "\n");
2520 int nrOrig = vsiteIlistNrCount(ilist);
2522 for (int th = 0; th < vsite->nthreads + 1; th++)
2525 vsiteIlistNrCount(vsite->tData[th]->ilist) +
2526 vsiteIlistNrCount(vsite->tData[th]->idTask.ilist);
2528 GMX_ASSERT(nrThreaded == nrOrig, "The number of virtual sites assigned to all thread task has to match the total number of virtual sites");
2532 void set_vsite_top(gmx_vsite_t *vsite, gmx_localtop_t *top, t_mdatoms *md,
2535 if (vsite->n_intercg_vsite > 0)
2537 if (vsite->bHaveChargeGroups)
2539 /* Make an atom to charge group index */
2540 int *a2cg = atom2cg(&top->cgs);
2541 vsite->vsite_pbc_loc = get_vsite_pbc(top->idef.iparams,
2542 top->idef.il, nullptr, md,
2549 if (vsite->nthreads > 1)
2551 if (vsite->bHaveChargeGroups)
2553 gmx_fatal(FARGS, "The combination of threading, virtual sites and charge groups is not implemented");
2556 split_vsites_over_threads(top->idef.il, top->idef.iparams,
2557 md, !DOMAINDECOMP(cr), vsite);