2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017 The GROMACS development team.
7 * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
39 * \brief Implements the VirtualSitesHandler class and vsite standalone functions
41 * \author Berk Hess <hess@kth.se>
42 * \ingroup module_mdlib
55 #include "gromacs/domdec/domdec.h"
56 #include "gromacs/domdec/domdec_struct.h"
57 #include "gromacs/gmxlib/network.h"
58 #include "gromacs/gmxlib/nrnb.h"
59 #include "gromacs/math/functions.h"
60 #include "gromacs/math/vec.h"
61 #include "gromacs/mdlib/gmx_omp_nthreads.h"
62 #include "gromacs/mdtypes/commrec.h"
63 #include "gromacs/pbcutil/ishift.h"
64 #include "gromacs/pbcutil/pbc.h"
65 #include "gromacs/timing/wallcycle.h"
66 #include "gromacs/topology/ifunc.h"
67 #include "gromacs/topology/mtop_util.h"
68 #include "gromacs/topology/topology.h"
69 #include "gromacs/utility/exceptions.h"
70 #include "gromacs/utility/fatalerror.h"
71 #include "gromacs/utility/gmxassert.h"
72 #include "gromacs/utility/gmxomp.h"
74 /* The strategy used here for assigning virtual sites to (thread-)tasks
77 * We divide the atom range that vsites operate on (natoms_local with DD,
78 * 0 - last atom involved in vsites without DD) equally over all threads.
80 * Vsites in the local range constructed from atoms in the local range
81 * and/or other vsites that are fully local are assigned to a simple,
84 * Vsites that are not assigned after using the above criterion get assigned
85 * to a so called "interdependent" thread task when none of the constructing
86 * atoms is a vsite. These tasks are called interdependent, because one task
87 * accesses atoms assigned to a different task/thread.
88 * Note that this option is turned off with large (local) atom counts
89 * to avoid high memory usage.
91 * Any remaining vsites are assigned to a separate master thread task.
96 //! VirialHandling is often used outside VirtualSitesHandler class members
97 using VirialHandling = VirtualSitesHandler::VirialHandling;
99 /*! \brief Information on PBC and domain decomposition for virtual sites
104 //! Constructs without PBC and DD
105 DomainInfo() = default;
107 //! Constructs with PBC and DD, if !=nullptr
108 DomainInfo(PbcType pbcType, bool haveInterUpdateGroupVirtualSites, gmx_domdec_t* domdec) :
110 useMolPbc_(pbcType != PbcType::No && haveInterUpdateGroupVirtualSites),
115 //! Returns whether we are using domain decomposition with more than 1 DD rank
116 bool useDomdec() const { return (domdec_ != nullptr); }
119 const PbcType pbcType_ = PbcType::No;
120 //! Whether molecules are broken over PBC
121 const bool useMolPbc_ = false;
122 //! Pointer to the domain decomposition struct, nullptr without PP DD
123 const gmx_domdec_t* domdec_ = nullptr;
126 /*! \brief List of atom indices belonging to a task
130 //! List of atom indices
131 std::vector<int> atom;
134 /*! \brief Data structure for thread tasks that use constructing atoms outside their own atom range
136 struct InterdependentTask
138 //! The interaction lists, only vsite entries are used
139 InteractionLists ilist;
140 //! Thread/task-local force buffer
141 std::vector<RVec> force;
142 //! The atom indices of the vsites of our task
143 std::vector<int> vsite;
144 //! Flags if elements in force are spread to or not
145 std::vector<bool> use;
146 //! The number of entries set to true in use
148 //! Array of atoms indices, size nthreads, covering all nuse set elements in use
149 std::vector<AtomIndex> atomIndex;
150 //! List of tasks (force blocks) this task spread forces to
151 std::vector<int> spreadTask;
152 //! List of tasks that write to this tasks force block range
153 std::vector<int> reduceTask;
156 /*! \brief Vsite thread task data structure
160 //! Start of atom range of this task
162 //! End of atom range of this task
164 //! The interaction lists, only vsite entries are used
165 std::array<InteractionList, F_NRE> ilist;
166 //! Local fshift accumulation buffer
167 std::array<RVec, c_numShiftVectors> fshift;
168 //! Local virial dx*df accumulation buffer
170 //! Tells if interdependent task idTask should be used (in addition to the rest of this task), this bool has the same value on all threads
171 bool useInterdependentTask;
172 //! Data for vsites that involve constructing atoms in the atom range of other threads/tasks
173 InterdependentTask idTask;
175 /*! \brief Constructor */
180 for (auto& elem : fshift)
182 elem = { 0.0_real, 0.0_real, 0.0_real };
185 useInterdependentTask = false;
190 /*! \brief Information on how the virtual site work is divided over thread tasks
195 //! Constructor, retrieves the number of threads to use from gmx_omp_nthreads.h
198 //! Returns the number of threads to use for vsite operations
199 int numThreads() const { return numThreads_; }
201 //! Returns the thread data for the given thread
202 const VsiteThread& threadData(int threadIndex) const { return *tData_[threadIndex]; }
204 //! Returns the thread data for the given thread
205 VsiteThread& threadData(int threadIndex) { return *tData_[threadIndex]; }
207 //! Returns the thread data for vsites that depend on non-local vsites
208 const VsiteThread& threadDataNonLocalDependent() const { return *tData_[numThreads_]; }
210 //! Returns the thread data for vsites that depend on non-local vsites
211 VsiteThread& threadDataNonLocalDependent() { return *tData_[numThreads_]; }
213 //! Set VSites and distribute VSite work over threads, should be called after DD partitioning
214 void setVirtualSites(ArrayRef<const InteractionList> ilist,
215 ArrayRef<const t_iparams> iparams,
218 ArrayRef<const ParticleType> ptype,
222 //! Number of threads used for vsite operations
223 const int numThreads_;
224 //! Thread local vsites and work structs
225 std::vector<std::unique_ptr<VsiteThread>> tData_;
226 //! Work array for dividing vsites over threads
227 std::vector<int> taskIndex_;
230 /*! \brief Impl class for VirtualSitesHandler
232 class VirtualSitesHandler::Impl
235 //! Constructor, domdec should be nullptr without DD
236 Impl(const gmx_mtop_t& mtop,
237 gmx_domdec_t* domdec,
239 ArrayRef<const RangePartitioning> updateGroupingPerMoleculeType);
241 //! Returns the number of virtual sites acting over multiple update groups
242 int numInterUpdategroupVirtualSites() const { return numInterUpdategroupVirtualSites_; }
244 //! Set VSites and distribute VSite work over threads, should be called after DD partitioning
245 void setVirtualSites(ArrayRef<const InteractionList> ilist,
248 ArrayRef<const ParticleType> ptype);
250 /*! \brief Create positions of vsite atoms based for the local system
252 * \param[in,out] x The coordinates
253 * \param[in,out] v The velocities, needed if operation requires it
254 * \param[in] box The box
255 * \param[in] operation Whether we calculate positions, velocities, or both
257 void construct(ArrayRef<RVec> x, ArrayRef<RVec> v, const matrix box, VSiteOperation operation) const;
259 /*! \brief Spread the force operating on the vsite atoms on the surrounding atoms.
261 * vsite should point to a valid object.
262 * The virialHandling parameter determines how virial contributions are handled.
263 * If this is set to Linear, shift forces are accumulated into fshift.
264 * If this is set to NonLinear, non-linear contributions are added to virial.
265 * This non-linear correction is required when the virial is not calculated
266 * afterwards from the particle position and forces, but in a different way,
267 * as for instance for the PME mesh contribution.
269 void spreadForces(ArrayRef<const RVec> x,
271 VirialHandling virialHandling,
272 ArrayRef<RVec> fshift,
276 gmx_wallcycle* wcycle);
279 //! The number of vsites that cross update groups, when =0 no PBC treatment is needed
280 const int numInterUpdategroupVirtualSites_;
281 //! PBC and DD information
282 const DomainInfo domainInfo_;
283 //! The interaction parameters
284 const ArrayRef<const t_iparams> iparams_;
285 //! The interaction lists
286 ArrayRef<const InteractionList> ilists_;
287 //! Information for handling vsite threading
288 ThreadingInfo threadingInfo_;
291 VirtualSitesHandler::~VirtualSitesHandler() = default;
293 int VirtualSitesHandler::numInterUpdategroupVirtualSites() const
295 return impl_->numInterUpdategroupVirtualSites();
298 /*! \brief Returns the sum of the vsite ilist sizes over all vsite types
300 * \param[in] ilist The interaction list
302 static int vsiteIlistNrCount(ArrayRef<const InteractionList> ilist)
305 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
307 nr += ilist[ftype].size();
313 //! Computes the distance between xi and xj, pbc is used when pbc!=nullptr
314 static int pbc_rvec_sub(const t_pbc* pbc, const rvec xi, const rvec xj, rvec dx)
318 return pbc_dx_aiuc(pbc, xi, xj, dx);
322 rvec_sub(xi, xj, dx);
323 return c_centralShiftIndex;
327 //! Returns the 1/norm(x)
328 static inline real inverseNorm(const rvec x)
330 return gmx::invsqrt(iprod(x, x));
333 //! Whether we're calculating the virtual site position
334 enum class VSiteCalculatePosition
339 //! Whether we're calculating the virtual site velocity
340 enum class VSiteCalculateVelocity
347 /* Vsite construction routines */
349 // GCC 8 falsely flags unused variables if constexpr prunes a code path, fixed in GCC 9
350 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85827
352 GCC_DIAGNOSTIC_IGNORE(-Wunused-but-set-parameter)
355 template<VSiteCalculatePosition calculatePosition, VSiteCalculateVelocity calculateVelocity>
356 static void constr_vsite1(const rvec xi, rvec x, const rvec vi, rvec v)
358 if (calculatePosition == VSiteCalculatePosition::Yes)
363 if (calculateVelocity == VSiteCalculateVelocity::Yes)
369 template<VSiteCalculatePosition calculatePosition, VSiteCalculateVelocity calculateVelocity>
371 constr_vsite2(const rvec xi, const rvec xj, rvec x, real a, const t_pbc* pbc, const rvec vi, const rvec vj, rvec v)
373 const real b = 1 - a;
376 if (calculatePosition == VSiteCalculatePosition::Yes)
381 pbc_dx_aiuc(pbc, xj, xi, dx);
382 x[XX] = xi[XX] + a * dx[XX];
383 x[YY] = xi[YY] + a * dx[YY];
384 x[ZZ] = xi[ZZ] + a * dx[ZZ];
388 x[XX] = b * xi[XX] + a * xj[XX];
389 x[YY] = b * xi[YY] + a * xj[YY];
390 x[ZZ] = b * xi[ZZ] + a * xj[ZZ];
393 /* TOTAL: 10 flops */
395 if (calculateVelocity == VSiteCalculateVelocity::Yes)
397 v[XX] = b * vi[XX] + a * vj[XX];
398 v[YY] = b * vi[YY] + a * vj[YY];
399 v[ZZ] = b * vi[ZZ] + a * vj[ZZ];
403 template<VSiteCalculatePosition calculatePosition, VSiteCalculateVelocity calculateVelocity>
405 constr_vsite2FD(const rvec xi, const rvec xj, rvec x, real a, const t_pbc* pbc, const rvec vi, const rvec vj, rvec v)
408 pbc_rvec_sub(pbc, xj, xi, xij);
411 const real invNormXij = inverseNorm(xij);
412 const real b = a * invNormXij;
415 if (calculatePosition == VSiteCalculatePosition::Yes)
417 x[XX] = xi[XX] + b * xij[XX];
418 x[YY] = xi[YY] + b * xij[YY];
419 x[ZZ] = xi[ZZ] + b * xij[ZZ];
421 /* TOTAL: 25 flops */
423 if (calculateVelocity == VSiteCalculateVelocity::Yes)
426 rvec_sub(vj, vi, vij);
427 const real vijDotXij = iprod(vij, xij);
429 v[XX] = vi[XX] + b * (vij[XX] - xij[XX] * vijDotXij * invNormXij * invNormXij);
430 v[YY] = vi[YY] + b * (vij[YY] - xij[YY] * vijDotXij * invNormXij * invNormXij);
431 v[ZZ] = vi[ZZ] + b * (vij[ZZ] - xij[ZZ] * vijDotXij * invNormXij * invNormXij);
435 template<VSiteCalculatePosition calculatePosition, VSiteCalculateVelocity calculateVelocity>
436 static void constr_vsite3(const rvec xi,
448 const real c = 1 - a - b;
451 if (calculatePosition == VSiteCalculatePosition::Yes)
457 pbc_dx_aiuc(pbc, xj, xi, dxj);
458 pbc_dx_aiuc(pbc, xk, xi, dxk);
459 x[XX] = xi[XX] + a * dxj[XX] + b * dxk[XX];
460 x[YY] = xi[YY] + a * dxj[YY] + b * dxk[YY];
461 x[ZZ] = xi[ZZ] + a * dxj[ZZ] + b * dxk[ZZ];
465 x[XX] = c * xi[XX] + a * xj[XX] + b * xk[XX];
466 x[YY] = c * xi[YY] + a * xj[YY] + b * xk[YY];
467 x[ZZ] = c * xi[ZZ] + a * xj[ZZ] + b * xk[ZZ];
470 /* TOTAL: 17 flops */
472 if (calculateVelocity == VSiteCalculateVelocity::Yes)
474 v[XX] = c * vi[XX] + a * vj[XX] + b * vk[XX];
475 v[YY] = c * vi[YY] + a * vj[YY] + b * vk[YY];
476 v[ZZ] = c * vi[ZZ] + a * vj[ZZ] + b * vk[ZZ];
480 template<VSiteCalculatePosition calculatePosition, VSiteCalculateVelocity calculateVelocity>
481 static void constr_vsite3FD(const rvec xi,
495 pbc_rvec_sub(pbc, xj, xi, xij);
496 pbc_rvec_sub(pbc, xk, xj, xjk);
499 /* temp goes from i to a point on the line jk */
500 temp[XX] = xij[XX] + a * xjk[XX];
501 temp[YY] = xij[YY] + a * xjk[YY];
502 temp[ZZ] = xij[ZZ] + a * xjk[ZZ];
505 const real invNormTemp = inverseNorm(temp);
506 const real c = b * invNormTemp;
509 if (calculatePosition == VSiteCalculatePosition::Yes)
511 x[XX] = xi[XX] + c * temp[XX];
512 x[YY] = xi[YY] + c * temp[YY];
513 x[ZZ] = xi[ZZ] + c * temp[ZZ];
515 /* TOTAL: 34 flops */
517 if (calculateVelocity == VSiteCalculateVelocity::Yes)
521 rvec_sub(vj, vi, vij);
522 rvec_sub(vk, vj, vjk);
523 const rvec tempV = { vij[XX] + a * vjk[XX], vij[YY] + a * vjk[YY], vij[ZZ] + a * vjk[ZZ] };
524 const real tempDotTempV = iprod(temp, tempV);
526 v[XX] = vi[XX] + c * (tempV[XX] - temp[XX] * tempDotTempV * invNormTemp * invNormTemp);
527 v[YY] = vi[YY] + c * (tempV[YY] - temp[YY] * tempDotTempV * invNormTemp * invNormTemp);
528 v[ZZ] = vi[ZZ] + c * (tempV[ZZ] - temp[ZZ] * tempDotTempV * invNormTemp * invNormTemp);
532 template<VSiteCalculatePosition calculatePosition, VSiteCalculateVelocity calculateVelocity>
533 static void constr_vsite3FAD(const rvec xi,
544 { // Note: a = d * cos(theta)
545 // b = d * sin(theta)
548 pbc_rvec_sub(pbc, xj, xi, xij);
549 pbc_rvec_sub(pbc, xk, xj, xjk);
552 const real invdij = inverseNorm(xij);
553 const real xijDotXjk = iprod(xij, xjk);
554 const real c1 = invdij * invdij * xijDotXjk;
555 xp[XX] = xjk[XX] - c1 * xij[XX];
556 xp[YY] = xjk[YY] - c1 * xij[YY];
557 xp[ZZ] = xjk[ZZ] - c1 * xij[ZZ];
558 const real a1 = a * invdij;
559 const real invNormXp = inverseNorm(xp);
560 const real b1 = b * invNormXp;
563 if (calculatePosition == VSiteCalculatePosition::Yes)
565 x[XX] = xi[XX] + a1 * xij[XX] + b1 * xp[XX];
566 x[YY] = xi[YY] + a1 * xij[YY] + b1 * xp[YY];
567 x[ZZ] = xi[ZZ] + a1 * xij[ZZ] + b1 * xp[ZZ];
569 /* TOTAL: 63 flops */
572 if (calculateVelocity == VSiteCalculateVelocity::Yes)
576 rvec_sub(vj, vi, vij);
577 rvec_sub(vk, vj, vjk);
579 const real vijDotXjkPlusXijDotVjk = iprod(vij, xjk) + iprod(xij, vjk);
580 const real xijDotVij = iprod(xij, vij);
581 const real invNormXij2 = invdij * invdij;
585 - xij[XX] * invNormXij2
586 * (vijDotXjkPlusXijDotVjk - invNormXij2 * xijDotXjk * xijDotVij * 2)
587 - vij[XX] * xijDotXjk * invNormXij2;
589 - xij[YY] * invNormXij2
590 * (vijDotXjkPlusXijDotVjk - invNormXij2 * xijDotXjk * xijDotVij * 2)
591 - vij[YY] * xijDotXjk * invNormXij2;
593 - xij[ZZ] * invNormXij2
594 * (vijDotXjkPlusXijDotVjk - invNormXij2 * xijDotXjk * xijDotVij * 2)
595 - vij[ZZ] * xijDotXjk * invNormXij2;
597 const real xpDotVp = iprod(xp, vp);
599 v[XX] = vi[XX] + a1 * (vij[XX] - xij[XX] * xijDotVij * invdij * invdij)
600 + b1 * (vp[XX] - xp[XX] * xpDotVp * invNormXp * invNormXp);
601 v[YY] = vi[YY] + a1 * (vij[YY] - xij[YY] * xijDotVij * invdij * invdij)
602 + b1 * (vp[YY] - xp[YY] * xpDotVp * invNormXp * invNormXp);
603 v[ZZ] = vi[ZZ] + a1 * (vij[ZZ] - xij[ZZ] * xijDotVij * invdij * invdij)
604 + b1 * (vp[ZZ] - xp[ZZ] * xpDotVp * invNormXp * invNormXp);
608 template<VSiteCalculatePosition calculatePosition, VSiteCalculateVelocity calculateVelocity>
609 static void constr_vsite3OUT(const rvec xi,
624 pbc_rvec_sub(pbc, xj, xi, xij);
625 pbc_rvec_sub(pbc, xk, xi, xik);
626 cprod(xij, xik, temp);
629 if (calculatePosition == VSiteCalculatePosition::Yes)
631 x[XX] = xi[XX] + a * xij[XX] + b * xik[XX] + c * temp[XX];
632 x[YY] = xi[YY] + a * xij[YY] + b * xik[YY] + c * temp[YY];
633 x[ZZ] = xi[ZZ] + a * xij[ZZ] + b * xik[ZZ] + c * temp[ZZ];
635 /* TOTAL: 33 flops */
638 if (calculateVelocity == VSiteCalculateVelocity::Yes)
642 rvec_sub(vj, vi, vij);
643 rvec_sub(vk, vi, vik);
647 cprod(vij, xik, temp1);
648 cprod(xij, vik, temp2);
650 v[XX] = vi[XX] + a * vij[XX] + b * vik[XX] + c * (temp1[XX] + temp2[XX]);
651 v[YY] = vi[YY] + a * vij[YY] + b * vik[YY] + c * (temp1[YY] + temp2[YY]);
652 v[ZZ] = vi[ZZ] + a * vij[ZZ] + b * vik[ZZ] + c * (temp1[ZZ] + temp2[ZZ]);
656 template<VSiteCalculatePosition calculatePosition, VSiteCalculateVelocity calculateVelocity>
657 static void constr_vsite4FD(const rvec xi,
672 rvec xij, xjk, xjl, temp;
675 pbc_rvec_sub(pbc, xj, xi, xij);
676 pbc_rvec_sub(pbc, xk, xj, xjk);
677 pbc_rvec_sub(pbc, xl, xj, xjl);
680 /* temp goes from i to a point on the plane jkl */
681 temp[XX] = xij[XX] + a * xjk[XX] + b * xjl[XX];
682 temp[YY] = xij[YY] + a * xjk[YY] + b * xjl[YY];
683 temp[ZZ] = xij[ZZ] + a * xjk[ZZ] + b * xjl[ZZ];
686 const real invRm = inverseNorm(temp);
690 if (calculatePosition == VSiteCalculatePosition::Yes)
692 x[XX] = xi[XX] + d * temp[XX];
693 x[YY] = xi[YY] + d * temp[YY];
694 x[ZZ] = xi[ZZ] + d * temp[ZZ];
696 /* TOTAL: 43 flops */
698 if (calculateVelocity == VSiteCalculateVelocity::Yes)
704 rvec_sub(vj, vi, vij);
705 rvec_sub(vk, vj, vjk);
706 rvec_sub(vl, vj, vjl);
709 vm[XX] = vij[XX] + a * vjk[XX] + b * vjl[XX];
710 vm[YY] = vij[YY] + a * vjk[YY] + b * vjl[YY];
711 vm[ZZ] = vij[ZZ] + a * vjk[ZZ] + b * vjl[ZZ];
713 const real vmDotRm = iprod(vm, temp);
714 v[XX] = vi[XX] + d * (vm[XX] - temp[XX] * vmDotRm * invRm * invRm);
715 v[YY] = vi[YY] + d * (vm[YY] - temp[YY] * vmDotRm * invRm * invRm);
716 v[ZZ] = vi[ZZ] + d * (vm[ZZ] - temp[ZZ] * vmDotRm * invRm * invRm);
720 template<VSiteCalculatePosition calculatePosition, VSiteCalculateVelocity calculateVelocity>
721 static void constr_vsite4FDN(const rvec xi,
736 rvec xij, xik, xil, ra, rb, rja, rjb, rm;
739 pbc_rvec_sub(pbc, xj, xi, xij);
740 pbc_rvec_sub(pbc, xk, xi, xik);
741 pbc_rvec_sub(pbc, xl, xi, xil);
744 ra[XX] = a * xik[XX];
745 ra[YY] = a * xik[YY];
746 ra[ZZ] = a * xik[ZZ];
748 rb[XX] = b * xil[XX];
749 rb[YY] = b * xil[YY];
750 rb[ZZ] = b * xil[ZZ];
754 rvec_sub(ra, xij, rja);
755 rvec_sub(rb, xij, rjb);
761 const real invNormRm = inverseNorm(rm);
765 if (calculatePosition == VSiteCalculatePosition::Yes)
767 x[XX] = xi[XX] + d * rm[XX];
768 x[YY] = xi[YY] + d * rm[YY];
769 x[ZZ] = xi[ZZ] + d * rm[ZZ];
771 /* TOTAL: 47 flops */
774 if (calculateVelocity == VSiteCalculateVelocity::Yes)
779 rvec_sub(vj, vi, vij);
780 rvec_sub(vk, vi, vik);
781 rvec_sub(vl, vi, vil);
786 vja[XX] = a * vik[XX] - vij[XX];
787 vja[YY] = a * vik[YY] - vij[YY];
788 vja[ZZ] = a * vik[ZZ] - vij[ZZ];
789 vjb[XX] = b * vil[XX] - vij[XX];
790 vjb[YY] = b * vil[YY] - vij[YY];
791 vjb[ZZ] = b * vil[ZZ] - vij[ZZ];
795 cprod(vja, rjb, temp1);
796 cprod(rja, vjb, temp2);
799 vm[XX] = temp1[XX] + temp2[XX];
800 vm[YY] = temp1[YY] + temp2[YY];
801 vm[ZZ] = temp1[ZZ] + temp2[ZZ];
803 const real rmDotVm = iprod(rm, vm);
804 v[XX] = vi[XX] + d * (vm[XX] - rm[XX] * rmDotVm * invNormRm * invNormRm);
805 v[YY] = vi[YY] + d * (vm[YY] - rm[YY] * rmDotVm * invNormRm * invNormRm);
806 v[ZZ] = vi[ZZ] + d * (vm[ZZ] - rm[ZZ] * rmDotVm * invNormRm * invNormRm);
810 template<VSiteCalculatePosition calculatePosition, VSiteCalculateVelocity calculateVelocity>
811 static int constr_vsiten(const t_iatom* ia,
812 ArrayRef<const t_iparams> ip,
823 const int n3 = 3 * ip[ia[0]].vsiten.n;
824 const int av = ia[1];
826 copy_rvec(x[ai], x1);
827 copy_rvec(v[ai], v1);
829 for (int i = 3; i < n3; i += 3)
832 a = ip[ia[i]].vsiten.a;
833 if (calculatePosition == VSiteCalculatePosition::Yes)
837 pbc_dx_aiuc(pbc, x[ai], x1, dx);
841 rvec_sub(x[ai], x1, dx);
843 dsum[XX] += a * dx[XX];
844 dsum[YY] += a * dx[YY];
845 dsum[ZZ] += a * dx[ZZ];
848 if (calculateVelocity == VSiteCalculateVelocity::Yes)
850 rvec_sub(v[ai], v1, dx);
851 dvsum[XX] += a * dx[XX];
852 dvsum[YY] += a * dx[YY];
853 dvsum[ZZ] += a * dx[ZZ];
858 if (calculatePosition == VSiteCalculatePosition::Yes)
860 x[av][XX] = x1[XX] + dsum[XX];
861 x[av][YY] = x1[YY] + dsum[YY];
862 x[av][ZZ] = x1[ZZ] + dsum[ZZ];
865 if (calculateVelocity == VSiteCalculateVelocity::Yes)
867 v[av][XX] = v1[XX] + dvsum[XX];
868 v[av][YY] = v1[YY] + dvsum[YY];
869 v[av][ZZ] = v1[ZZ] + dvsum[ZZ];
879 //! PBC modes for vsite construction and spreading
882 all, //!< Apply normal, simple PBC for all vsites
883 none //!< No PBC treatment needed
886 /*! \brief Returns the PBC mode based on the system PBC and vsite properties
888 * \param[in] pbcPtr A pointer to a PBC struct or nullptr when no PBC treatment is required
890 static PbcMode getPbcMode(const t_pbc* pbcPtr)
892 if (pbcPtr == nullptr)
894 return PbcMode::none;
902 /*! \brief Executes the vsite construction task for a single thread
904 * \tparam operation Whether we are calculating positions, velocities, or both
905 * \param[in,out] x Coordinates to construct vsites for
906 * \param[in,out] v Velocities are generated for virtual sites if `operation` requires it
907 * \param[in] ip Interaction parameters for all interaction, only vsite parameters are used
908 * \param[in] ilist The interaction lists, only vsites are usesd
909 * \param[in] pbc_null PBC struct, used for PBC distance calculations when !=nullptr
911 template<VSiteCalculatePosition calculatePosition, VSiteCalculateVelocity calculateVelocity>
912 static void construct_vsites_thread(ArrayRef<RVec> x,
914 ArrayRef<const t_iparams> ip,
915 ArrayRef<const InteractionList> ilist,
916 const t_pbc* pbc_null)
918 if (calculateVelocity == VSiteCalculateVelocity::Yes)
920 GMX_RELEASE_ASSERT(!v.empty(),
921 "Can't calculate velocities without access to velocity vector.");
924 // Work around clang bug (unfixed as of Feb 2021)
925 // https://bugs.llvm.org/show_bug.cgi?id=35450
927 CLANG_DIAGNOSTIC_IGNORE(-Wunused-lambda-capture)
929 // GCC 8 falsely flags unused variables if constexpr prunes a code path, fixed in GCC 9
930 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85827
932 GCC_DIAGNOSTIC_IGNORE(-Wunused-but-set-parameter)
934 // getVOrNull returns a velocity rvec if we need it, nullptr otherwise.
935 auto getVOrNull = [v](int idx) -> real* {
936 if (calculateVelocity == VSiteCalculateVelocity::Yes)
938 return v[idx].as_vec();
946 CLANG_DIAGNOSTIC_RESET
948 const PbcMode pbcMode = getPbcMode(pbc_null);
949 /* We need another pbc pointer, as with charge groups we switch per vsite */
950 const t_pbc* pbc_null2 = pbc_null;
952 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
954 if (ilist[ftype].empty())
960 int nra = interaction_function[ftype].nratoms;
962 int nr = ilist[ftype].size();
964 const t_iatom* ia = ilist[ftype].iatoms.data();
966 for (int i = 0; i < nr;)
969 /* The vsite and constructing atoms */
972 /* Constants for constructing vsites */
973 real a1 = ip[tp].vsite.a;
974 /* Copy the old position */
976 copy_rvec(x[avsite], xv);
978 /* Construct the vsite depending on type */
984 constr_vsite1<calculatePosition, calculateVelocity>(
985 x[ai], x[avsite], getVOrNull(ai), getVOrNull(avsite));
989 constr_vsite2<calculatePosition, calculateVelocity>(x[ai],
1000 constr_vsite2FD<calculatePosition, calculateVelocity>(x[ai],
1007 getVOrNull(avsite));
1012 b1 = ip[tp].vsite.b;
1013 constr_vsite3<calculatePosition, calculateVelocity>(x[ai],
1023 getVOrNull(avsite));
1028 b1 = ip[tp].vsite.b;
1029 constr_vsite3FD<calculatePosition, calculateVelocity>(x[ai],
1039 getVOrNull(avsite));
1044 b1 = ip[tp].vsite.b;
1045 constr_vsite3FAD<calculatePosition, calculateVelocity>(x[ai],
1055 getVOrNull(avsite));
1060 b1 = ip[tp].vsite.b;
1061 c1 = ip[tp].vsite.c;
1062 constr_vsite3OUT<calculatePosition, calculateVelocity>(x[ai],
1073 getVOrNull(avsite));
1079 b1 = ip[tp].vsite.b;
1080 c1 = ip[tp].vsite.c;
1081 constr_vsite4FD<calculatePosition, calculateVelocity>(x[ai],
1094 getVOrNull(avsite));
1100 b1 = ip[tp].vsite.b;
1101 c1 = ip[tp].vsite.c;
1102 constr_vsite4FDN<calculatePosition, calculateVelocity>(x[ai],
1115 getVOrNull(avsite));
1118 inc = constr_vsiten<calculatePosition, calculateVelocity>(ia, ip, x, pbc_null2, v);
1121 gmx_fatal(FARGS, "No such vsite type %d in %s, line %d", ftype, __FILE__, __LINE__);
1124 if (pbcMode == PbcMode::all)
1126 /* Keep the vsite in the same periodic image as before */
1128 int ishift = pbc_dx_aiuc(pbc_null, x[avsite], xv, dx);
1129 if (ishift != c_centralShiftIndex)
1131 rvec_add(xv, dx, x[avsite]);
1135 /* Increment loop variables */
1143 /*! \brief Dispatch the vsite construction tasks for all threads
1145 * \param[in] threadingInfo Used to divide work over threads when != nullptr
1146 * \param[in,out] x Coordinates to construct vsites for
1147 * \param[in,out] v When not empty, velocities are generated for virtual sites
1148 * \param[in] ip Interaction parameters for all interaction, only vsite parameters are used
1149 * \param[in] ilist The interaction lists, only vsites are usesd
1150 * \param[in] domainInfo Information about PBC and DD
1151 * \param[in] box Used for PBC when PBC is set in domainInfo
1153 template<VSiteCalculatePosition calculatePosition, VSiteCalculateVelocity calculateVelocity>
1154 static void construct_vsites(const ThreadingInfo* threadingInfo,
1157 ArrayRef<const t_iparams> ip,
1158 ArrayRef<const InteractionList> ilist,
1159 const DomainInfo& domainInfo,
1162 const bool useDomdec = domainInfo.useDomdec();
1164 t_pbc pbc, *pbc_null;
1166 /* We only need to do pbc when we have inter update-group vsites.
1167 * Note that with domain decomposition we do not need to apply PBC here
1168 * when we have at least 3 domains along each dimension. Currently we
1169 * do not optimize this case.
1171 if (domainInfo.pbcType_ != PbcType::No && domainInfo.useMolPbc_)
1173 /* This is wasting some CPU time as we now do this multiple times
1177 clear_ivec(null_ivec);
1178 pbc_null = set_pbc_dd(
1179 &pbc, domainInfo.pbcType_, useDomdec ? domainInfo.domdec_->numCells : null_ivec, FALSE, box);
1188 if (calculateVelocity == VSiteCalculateVelocity::Yes)
1190 dd_move_x_and_v_vsites(
1191 *domainInfo.domdec_, box, as_rvec_array(x.data()), as_rvec_array(v.data()));
1195 dd_move_x_vsites(*domainInfo.domdec_, box, as_rvec_array(x.data()));
1199 if (threadingInfo == nullptr || threadingInfo->numThreads() == 1)
1201 construct_vsites_thread<calculatePosition, calculateVelocity>(x, v, ip, ilist, pbc_null);
1205 #pragma omp parallel num_threads(threadingInfo->numThreads())
1209 const int th = gmx_omp_get_thread_num();
1210 const VsiteThread& tData = threadingInfo->threadData(th);
1211 GMX_ASSERT(tData.rangeStart >= 0,
1212 "The thread data should be initialized before calling construct_vsites");
1214 construct_vsites_thread<calculatePosition, calculateVelocity>(
1215 x, v, ip, tData.ilist, pbc_null);
1216 if (tData.useInterdependentTask)
1218 /* Here we don't need a barrier (unlike the spreading),
1219 * since both tasks only construct vsites from particles,
1220 * or local vsites, not from non-local vsites.
1222 construct_vsites_thread<calculatePosition, calculateVelocity>(
1223 x, v, ip, tData.idTask.ilist, pbc_null);
1226 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
1228 /* Now we can construct the vsites that might depend on other vsites */
1229 construct_vsites_thread<calculatePosition, calculateVelocity>(
1230 x, v, ip, threadingInfo->threadDataNonLocalDependent().ilist, pbc_null);
1234 void VirtualSitesHandler::Impl::construct(ArrayRef<RVec> x,
1237 VSiteOperation operation) const
1241 case VSiteOperation::Positions:
1242 construct_vsites<VSiteCalculatePosition::Yes, VSiteCalculateVelocity::No>(
1243 &threadingInfo_, x, v, iparams_, ilists_, domainInfo_, box);
1245 case VSiteOperation::Velocities:
1246 construct_vsites<VSiteCalculatePosition::No, VSiteCalculateVelocity::Yes>(
1247 &threadingInfo_, x, v, iparams_, ilists_, domainInfo_, box);
1249 case VSiteOperation::PositionsAndVelocities:
1250 construct_vsites<VSiteCalculatePosition::Yes, VSiteCalculateVelocity::Yes>(
1251 &threadingInfo_, x, v, iparams_, ilists_, domainInfo_, box);
1253 default: gmx_fatal(FARGS, "Unknown virtual site operation");
1257 void VirtualSitesHandler::construct(ArrayRef<RVec> x, ArrayRef<RVec> v, const matrix box, VSiteOperation operation) const
1259 impl_->construct(x, v, box, operation);
1262 void constructVirtualSites(ArrayRef<RVec> x, ArrayRef<const t_iparams> ip, ArrayRef<const InteractionList> ilist)
1266 const DomainInfo domainInfo;
1267 construct_vsites<VSiteCalculatePosition::Yes, VSiteCalculateVelocity::No>(
1268 nullptr, x, {}, ip, ilist, domainInfo, nullptr);
1272 /* Force spreading routines */
1274 static void spread_vsite1(const t_iatom ia[], ArrayRef<RVec> f)
1276 const int av = ia[1];
1277 const int ai = ia[2];
1282 template<VirialHandling virialHandling>
1283 static void spread_vsite2(const t_iatom ia[],
1285 ArrayRef<const RVec> x,
1287 ArrayRef<RVec> fshift,
1297 svmul(1 - a, f[av], fi);
1298 svmul(a, f[av], fj);
1301 rvec_inc(f[ai], fi);
1302 rvec_inc(f[aj], fj);
1305 if (virialHandling == VirialHandling::Pbc)
1311 siv = pbc_dx_aiuc(pbc, x[ai], x[av], dx);
1312 sij = pbc_dx_aiuc(pbc, x[ai], x[aj], dx);
1316 siv = c_centralShiftIndex;
1317 sij = c_centralShiftIndex;
1320 if (siv != c_centralShiftIndex || sij != c_centralShiftIndex)
1322 rvec_inc(fshift[siv], f[av]);
1323 rvec_dec(fshift[c_centralShiftIndex], fi);
1324 rvec_dec(fshift[sij], fj);
1328 /* TOTAL: 13 flops */
1331 void constructVirtualSitesGlobal(const gmx_mtop_t& mtop, gmx::ArrayRef<gmx::RVec> x)
1333 GMX_ASSERT(x.ssize() >= mtop.natoms, "x should contain the whole system");
1334 GMX_ASSERT(!mtop.moleculeBlockIndices.empty(),
1335 "molblock indices are needed in constructVsitesGlobal");
1337 for (size_t mb = 0; mb < mtop.molblock.size(); mb++)
1339 const gmx_molblock_t& molb = mtop.molblock[mb];
1340 const gmx_moltype_t& molt = mtop.moltype[molb.type];
1341 if (vsiteIlistNrCount(molt.ilist) > 0)
1343 int atomOffset = mtop.moleculeBlockIndices[mb].globalAtomStart;
1344 for (int mol = 0; mol < molb.nmol; mol++)
1346 constructVirtualSites(
1347 x.subArray(atomOffset, molt.atoms.nr), mtop.ffparams.iparams, molt.ilist);
1348 atomOffset += molt.atoms.nr;
1354 template<VirialHandling virialHandling>
1355 static void spread_vsite2FD(const t_iatom ia[],
1357 ArrayRef<const RVec> x,
1359 ArrayRef<RVec> fshift,
1363 const int av = ia[1];
1364 const int ai = ia[2];
1365 const int aj = ia[3];
1367 copy_rvec(f[av], fv);
1370 int sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1373 const real invDistance = inverseNorm(xij);
1374 const real b = a * invDistance;
1375 /* 4 + ?10? flops */
1377 const real fproj = iprod(xij, fv) * invDistance * invDistance;
1380 fj[XX] = b * (fv[XX] - fproj * xij[XX]);
1381 fj[YY] = b * (fv[YY] - fproj * xij[YY]);
1382 fj[ZZ] = b * (fv[ZZ] - fproj * xij[ZZ]);
1385 /* b is already calculated in constr_vsite2FD
1386 storing b somewhere will save flops. */
1388 f[ai][XX] += fv[XX] - fj[XX];
1389 f[ai][YY] += fv[YY] - fj[YY];
1390 f[ai][ZZ] += fv[ZZ] - fj[ZZ];
1391 f[aj][XX] += fj[XX];
1392 f[aj][YY] += fj[YY];
1393 f[aj][ZZ] += fj[ZZ];
1396 if (virialHandling == VirialHandling::Pbc)
1402 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1406 svi = c_centralShiftIndex;
1409 if (svi != c_centralShiftIndex || sji != c_centralShiftIndex)
1411 rvec_dec(fshift[svi], fv);
1412 fshift[c_centralShiftIndex][XX] += fv[XX] - fj[XX];
1413 fshift[c_centralShiftIndex][YY] += fv[YY] - fj[YY];
1414 fshift[c_centralShiftIndex][ZZ] += fv[ZZ] - fj[ZZ];
1415 fshift[sji][XX] += fj[XX];
1416 fshift[sji][YY] += fj[YY];
1417 fshift[sji][ZZ] += fj[ZZ];
1421 if (virialHandling == VirialHandling::NonLinear)
1423 /* Under this condition, the virial for the current forces is not
1424 * calculated from the redistributed forces. This means that
1425 * the effect of non-linear virtual site constructions on the virial
1426 * needs to be added separately. This contribution can be calculated
1427 * in many ways, but the simplest and cheapest way is to use
1428 * the first constructing atom ai as a reference position in space:
1429 * subtract (xv-xi)*fv and add (xj-xi)*fj.
1433 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1435 for (int i = 0; i < DIM; i++)
1437 for (int j = 0; j < DIM; j++)
1439 /* As xix is a linear combination of j and k, use that here */
1440 dxdf[i][j] += -xiv[i] * fv[j] + xij[i] * fj[j];
1445 /* TOTAL: 38 flops */
1448 template<VirialHandling virialHandling>
1449 static void spread_vsite3(const t_iatom ia[],
1452 ArrayRef<const RVec> x,
1454 ArrayRef<RVec> fshift,
1457 rvec fi, fj, fk, dx;
1465 svmul(1 - a - b, f[av], fi);
1466 svmul(a, f[av], fj);
1467 svmul(b, f[av], fk);
1470 rvec_inc(f[ai], fi);
1471 rvec_inc(f[aj], fj);
1472 rvec_inc(f[ak], fk);
1475 if (virialHandling == VirialHandling::Pbc)
1482 siv = pbc_dx_aiuc(pbc, x[ai], x[av], dx);
1483 sij = pbc_dx_aiuc(pbc, x[ai], x[aj], dx);
1484 sik = pbc_dx_aiuc(pbc, x[ai], x[ak], dx);
1488 siv = c_centralShiftIndex;
1489 sij = c_centralShiftIndex;
1490 sik = c_centralShiftIndex;
1493 if (siv != c_centralShiftIndex || sij != c_centralShiftIndex || sik != c_centralShiftIndex)
1495 rvec_inc(fshift[siv], f[av]);
1496 rvec_dec(fshift[c_centralShiftIndex], fi);
1497 rvec_dec(fshift[sij], fj);
1498 rvec_dec(fshift[sik], fk);
1502 /* TOTAL: 20 flops */
1505 template<VirialHandling virialHandling>
1506 static void spread_vsite3FD(const t_iatom ia[],
1509 ArrayRef<const RVec> x,
1511 ArrayRef<RVec> fshift,
1516 rvec xvi, xij, xjk, xix, fv, temp;
1517 t_iatom av, ai, aj, ak;
1524 copy_rvec(f[av], fv);
1526 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1527 skj = pbc_rvec_sub(pbc, x[ak], x[aj], xjk);
1530 /* xix goes from i to point x on the line jk */
1531 xix[XX] = xij[XX] + a * xjk[XX];
1532 xix[YY] = xij[YY] + a * xjk[YY];
1533 xix[ZZ] = xij[ZZ] + a * xjk[ZZ];
1536 const real invDistance = inverseNorm(xix);
1537 const real c = b * invDistance;
1538 /* 4 + ?10? flops */
1540 fproj = iprod(xix, fv) * invDistance * invDistance; /* = (xix . f)/(xix . xix) */
1542 temp[XX] = c * (fv[XX] - fproj * xix[XX]);
1543 temp[YY] = c * (fv[YY] - fproj * xix[YY]);
1544 temp[ZZ] = c * (fv[ZZ] - fproj * xix[ZZ]);
1547 /* c is already calculated in constr_vsite3FD
1548 storing c somewhere will save 26 flops! */
1551 f[ai][XX] += fv[XX] - temp[XX];
1552 f[ai][YY] += fv[YY] - temp[YY];
1553 f[ai][ZZ] += fv[ZZ] - temp[ZZ];
1554 f[aj][XX] += a1 * temp[XX];
1555 f[aj][YY] += a1 * temp[YY];
1556 f[aj][ZZ] += a1 * temp[ZZ];
1557 f[ak][XX] += a * temp[XX];
1558 f[ak][YY] += a * temp[YY];
1559 f[ak][ZZ] += a * temp[ZZ];
1562 if (virialHandling == VirialHandling::Pbc)
1567 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1571 svi = c_centralShiftIndex;
1574 if (svi != c_centralShiftIndex || sji != c_centralShiftIndex || skj != c_centralShiftIndex)
1576 rvec_dec(fshift[svi], fv);
1577 fshift[c_centralShiftIndex][XX] += fv[XX] - (1 + a) * temp[XX];
1578 fshift[c_centralShiftIndex][YY] += fv[YY] - (1 + a) * temp[YY];
1579 fshift[c_centralShiftIndex][ZZ] += fv[ZZ] - (1 + a) * temp[ZZ];
1580 fshift[sji][XX] += temp[XX];
1581 fshift[sji][YY] += temp[YY];
1582 fshift[sji][ZZ] += temp[ZZ];
1583 fshift[skj][XX] += a * temp[XX];
1584 fshift[skj][YY] += a * temp[YY];
1585 fshift[skj][ZZ] += a * temp[ZZ];
1589 if (virialHandling == VirialHandling::NonLinear)
1591 /* Under this condition, the virial for the current forces is not
1592 * calculated from the redistributed forces. This means that
1593 * the effect of non-linear virtual site constructions on the virial
1594 * needs to be added separately. This contribution can be calculated
1595 * in many ways, but the simplest and cheapest way is to use
1596 * the first constructing atom ai as a reference position in space:
1597 * subtract (xv-xi)*fv and add (xj-xi)*fj + (xk-xi)*fk.
1601 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1603 for (int i = 0; i < DIM; i++)
1605 for (int j = 0; j < DIM; j++)
1607 /* As xix is a linear combination of j and k, use that here */
1608 dxdf[i][j] += -xiv[i] * fv[j] + xix[i] * temp[j];
1613 /* TOTAL: 61 flops */
1616 template<VirialHandling virialHandling>
1617 static void spread_vsite3FAD(const t_iatom ia[],
1620 ArrayRef<const RVec> x,
1622 ArrayRef<RVec> fshift,
1626 rvec xvi, xij, xjk, xperp, Fpij, Fppp, fv, f1, f2, f3;
1627 real a1, b1, c1, c2, invdij, invdij2, invdp, fproj;
1628 t_iatom av, ai, aj, ak;
1635 copy_rvec(f[ia[1]], fv);
1637 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1638 skj = pbc_rvec_sub(pbc, x[ak], x[aj], xjk);
1641 invdij = inverseNorm(xij);
1642 invdij2 = invdij * invdij;
1643 c1 = iprod(xij, xjk) * invdij2;
1644 xperp[XX] = xjk[XX] - c1 * xij[XX];
1645 xperp[YY] = xjk[YY] - c1 * xij[YY];
1646 xperp[ZZ] = xjk[ZZ] - c1 * xij[ZZ];
1647 /* xperp in plane ijk, perp. to ij */
1648 invdp = inverseNorm(xperp);
1653 /* a1, b1 and c1 are already calculated in constr_vsite3FAD
1654 storing them somewhere will save 45 flops! */
1656 fproj = iprod(xij, fv) * invdij2;
1657 svmul(fproj, xij, Fpij); /* proj. f on xij */
1658 svmul(iprod(xperp, fv) * invdp * invdp, xperp, Fppp); /* proj. f on xperp */
1659 svmul(b1 * fproj, xperp, f3);
1662 rvec_sub(fv, Fpij, f1); /* f1 = f - Fpij */
1663 rvec_sub(f1, Fppp, f2); /* f2 = f - Fpij - Fppp */
1664 for (int d = 0; d < DIM; d++)
1672 f[ai][XX] += fv[XX] - f1[XX] + c1 * f2[XX] + f3[XX];
1673 f[ai][YY] += fv[YY] - f1[YY] + c1 * f2[YY] + f3[YY];
1674 f[ai][ZZ] += fv[ZZ] - f1[ZZ] + c1 * f2[ZZ] + f3[ZZ];
1675 f[aj][XX] += f1[XX] - c2 * f2[XX] - f3[XX];
1676 f[aj][YY] += f1[YY] - c2 * f2[YY] - f3[YY];
1677 f[aj][ZZ] += f1[ZZ] - c2 * f2[ZZ] - f3[ZZ];
1678 f[ak][XX] += f2[XX];
1679 f[ak][YY] += f2[YY];
1680 f[ak][ZZ] += f2[ZZ];
1683 if (virialHandling == VirialHandling::Pbc)
1689 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1693 svi = c_centralShiftIndex;
1696 if (svi != c_centralShiftIndex || sji != c_centralShiftIndex || skj != c_centralShiftIndex)
1698 rvec_dec(fshift[svi], fv);
1699 fshift[c_centralShiftIndex][XX] += fv[XX] - f1[XX] - (1 - c1) * f2[XX] + f3[XX];
1700 fshift[c_centralShiftIndex][YY] += fv[YY] - f1[YY] - (1 - c1) * f2[YY] + f3[YY];
1701 fshift[c_centralShiftIndex][ZZ] += fv[ZZ] - f1[ZZ] - (1 - c1) * f2[ZZ] + f3[ZZ];
1702 fshift[sji][XX] += f1[XX] - c1 * f2[XX] - f3[XX];
1703 fshift[sji][YY] += f1[YY] - c1 * f2[YY] - f3[YY];
1704 fshift[sji][ZZ] += f1[ZZ] - c1 * f2[ZZ] - f3[ZZ];
1705 fshift[skj][XX] += f2[XX];
1706 fshift[skj][YY] += f2[YY];
1707 fshift[skj][ZZ] += f2[ZZ];
1711 if (virialHandling == VirialHandling::NonLinear)
1714 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1716 for (int i = 0; i < DIM; i++)
1718 for (int j = 0; j < DIM; j++)
1720 /* Note that xik=xij+xjk, so we have to add xij*f2 */
1721 dxdf[i][j] += -xiv[i] * fv[j] + xij[i] * (f1[j] + (1 - c2) * f2[j] - f3[j])
1727 /* TOTAL: 113 flops */
1730 template<VirialHandling virialHandling>
1731 static void spread_vsite3OUT(const t_iatom ia[],
1735 ArrayRef<const RVec> x,
1737 ArrayRef<RVec> fshift,
1741 rvec xvi, xij, xik, fv, fj, fk;
1751 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1752 ski = pbc_rvec_sub(pbc, x[ak], x[ai], xik);
1755 copy_rvec(f[av], fv);
1762 fj[XX] = a * fv[XX] - xik[ZZ] * cfy + xik[YY] * cfz;
1763 fj[YY] = xik[ZZ] * cfx + a * fv[YY] - xik[XX] * cfz;
1764 fj[ZZ] = -xik[YY] * cfx + xik[XX] * cfy + a * fv[ZZ];
1766 fk[XX] = b * fv[XX] + xij[ZZ] * cfy - xij[YY] * cfz;
1767 fk[YY] = -xij[ZZ] * cfx + b * fv[YY] + xij[XX] * cfz;
1768 fk[ZZ] = xij[YY] * cfx - xij[XX] * cfy + b * fv[ZZ];
1771 f[ai][XX] += fv[XX] - fj[XX] - fk[XX];
1772 f[ai][YY] += fv[YY] - fj[YY] - fk[YY];
1773 f[ai][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ];
1774 rvec_inc(f[aj], fj);
1775 rvec_inc(f[ak], fk);
1778 if (virialHandling == VirialHandling::Pbc)
1783 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1787 svi = c_centralShiftIndex;
1790 if (svi != c_centralShiftIndex || sji != c_centralShiftIndex || ski != c_centralShiftIndex)
1792 rvec_dec(fshift[svi], fv);
1793 fshift[c_centralShiftIndex][XX] += fv[XX] - fj[XX] - fk[XX];
1794 fshift[c_centralShiftIndex][YY] += fv[YY] - fj[YY] - fk[YY];
1795 fshift[c_centralShiftIndex][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ];
1796 rvec_inc(fshift[sji], fj);
1797 rvec_inc(fshift[ski], fk);
1801 if (virialHandling == VirialHandling::NonLinear)
1805 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1807 for (int i = 0; i < DIM; i++)
1809 for (int j = 0; j < DIM; j++)
1811 dxdf[i][j] += -xiv[i] * fv[j] + xij[i] * fj[j] + xik[i] * fk[j];
1816 /* TOTAL: 54 flops */
1819 template<VirialHandling virialHandling>
1820 static void spread_vsite4FD(const t_iatom ia[],
1824 ArrayRef<const RVec> x,
1826 ArrayRef<RVec> fshift,
1831 rvec xvi, xij, xjk, xjl, xix, fv, temp;
1832 int av, ai, aj, ak, al;
1833 int sji, skj, slj, m;
1841 sji = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1842 skj = pbc_rvec_sub(pbc, x[ak], x[aj], xjk);
1843 slj = pbc_rvec_sub(pbc, x[al], x[aj], xjl);
1846 /* xix goes from i to point x on the plane jkl */
1847 for (m = 0; m < DIM; m++)
1849 xix[m] = xij[m] + a * xjk[m] + b * xjl[m];
1853 const real invDistance = inverseNorm(xix);
1854 const real d = c * invDistance;
1855 /* 4 + ?10? flops */
1857 copy_rvec(f[av], fv);
1859 fproj = iprod(xix, fv) * invDistance * invDistance; /* = (xix . f)/(xix . xix) */
1861 for (m = 0; m < DIM; m++)
1863 temp[m] = d * (fv[m] - fproj * xix[m]);
1867 /* c is already calculated in constr_vsite3FD
1868 storing c somewhere will save 35 flops! */
1871 for (m = 0; m < DIM; m++)
1873 f[ai][m] += fv[m] - temp[m];
1874 f[aj][m] += a1 * temp[m];
1875 f[ak][m] += a * temp[m];
1876 f[al][m] += b * temp[m];
1880 if (virialHandling == VirialHandling::Pbc)
1885 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
1889 svi = c_centralShiftIndex;
1892 if (svi != c_centralShiftIndex || sji != c_centralShiftIndex || skj != c_centralShiftIndex
1893 || slj != c_centralShiftIndex)
1895 rvec_dec(fshift[svi], fv);
1896 for (m = 0; m < DIM; m++)
1898 fshift[c_centralShiftIndex][m] += fv[m] - (1 + a + b) * temp[m];
1899 fshift[sji][m] += temp[m];
1900 fshift[skj][m] += a * temp[m];
1901 fshift[slj][m] += b * temp[m];
1906 if (virialHandling == VirialHandling::NonLinear)
1911 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
1913 for (i = 0; i < DIM; i++)
1915 for (j = 0; j < DIM; j++)
1917 dxdf[i][j] += -xiv[i] * fv[j] + xix[i] * temp[j];
1922 /* TOTAL: 77 flops */
1925 template<VirialHandling virialHandling>
1926 static void spread_vsite4FDN(const t_iatom ia[],
1930 ArrayRef<const RVec> x,
1932 ArrayRef<RVec> fshift,
1936 rvec xvi, xij, xik, xil, ra, rb, rja, rjb, rab, rm, rt;
1937 rvec fv, fj, fk, fl;
1940 int av, ai, aj, ak, al;
1943 /* DEBUG: check atom indices */
1950 copy_rvec(f[av], fv);
1952 sij = pbc_rvec_sub(pbc, x[aj], x[ai], xij);
1953 sik = pbc_rvec_sub(pbc, x[ak], x[ai], xik);
1954 sil = pbc_rvec_sub(pbc, x[al], x[ai], xil);
1957 ra[XX] = a * xik[XX];
1958 ra[YY] = a * xik[YY];
1959 ra[ZZ] = a * xik[ZZ];
1961 rb[XX] = b * xil[XX];
1962 rb[YY] = b * xil[YY];
1963 rb[ZZ] = b * xil[ZZ];
1967 rvec_sub(ra, xij, rja);
1968 rvec_sub(rb, xij, rjb);
1969 rvec_sub(rb, ra, rab);
1972 cprod(rja, rjb, rm);
1975 invrm = inverseNorm(rm);
1976 denom = invrm * invrm;
1979 cfx = c * invrm * fv[XX];
1980 cfy = c * invrm * fv[YY];
1981 cfz = c * invrm * fv[ZZ];
1992 fj[XX] = (-rm[XX] * rt[XX]) * cfx + (rab[ZZ] - rm[YY] * rt[XX]) * cfy
1993 + (-rab[YY] - rm[ZZ] * rt[XX]) * cfz;
1994 fj[YY] = (-rab[ZZ] - rm[XX] * rt[YY]) * cfx + (-rm[YY] * rt[YY]) * cfy
1995 + (rab[XX] - rm[ZZ] * rt[YY]) * cfz;
1996 fj[ZZ] = (rab[YY] - rm[XX] * rt[ZZ]) * cfx + (-rab[XX] - rm[YY] * rt[ZZ]) * cfy
1997 + (-rm[ZZ] * rt[ZZ]) * cfz;
2003 rt[XX] *= denom * a;
2004 rt[YY] *= denom * a;
2005 rt[ZZ] *= denom * a;
2008 fk[XX] = (-rm[XX] * rt[XX]) * cfx + (-a * rjb[ZZ] - rm[YY] * rt[XX]) * cfy
2009 + (a * rjb[YY] - rm[ZZ] * rt[XX]) * cfz;
2010 fk[YY] = (a * rjb[ZZ] - rm[XX] * rt[YY]) * cfx + (-rm[YY] * rt[YY]) * cfy
2011 + (-a * rjb[XX] - rm[ZZ] * rt[YY]) * cfz;
2012 fk[ZZ] = (-a * rjb[YY] - rm[XX] * rt[ZZ]) * cfx + (a * rjb[XX] - rm[YY] * rt[ZZ]) * cfy
2013 + (-rm[ZZ] * rt[ZZ]) * cfz;
2019 rt[XX] *= denom * b;
2020 rt[YY] *= denom * b;
2021 rt[ZZ] *= denom * b;
2024 fl[XX] = (-rm[XX] * rt[XX]) * cfx + (b * rja[ZZ] - rm[YY] * rt[XX]) * cfy
2025 + (-b * rja[YY] - rm[ZZ] * rt[XX]) * cfz;
2026 fl[YY] = (-b * rja[ZZ] - rm[XX] * rt[YY]) * cfx + (-rm[YY] * rt[YY]) * cfy
2027 + (b * rja[XX] - rm[ZZ] * rt[YY]) * cfz;
2028 fl[ZZ] = (b * rja[YY] - rm[XX] * rt[ZZ]) * cfx + (-b * rja[XX] - rm[YY] * rt[ZZ]) * cfy
2029 + (-rm[ZZ] * rt[ZZ]) * cfz;
2032 f[ai][XX] += fv[XX] - fj[XX] - fk[XX] - fl[XX];
2033 f[ai][YY] += fv[YY] - fj[YY] - fk[YY] - fl[YY];
2034 f[ai][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ] - fl[ZZ];
2035 rvec_inc(f[aj], fj);
2036 rvec_inc(f[ak], fk);
2037 rvec_inc(f[al], fl);
2040 if (virialHandling == VirialHandling::Pbc)
2045 svi = pbc_rvec_sub(pbc, x[av], x[ai], xvi);
2049 svi = c_centralShiftIndex;
2052 if (svi != c_centralShiftIndex || sij != c_centralShiftIndex || sik != c_centralShiftIndex
2053 || sil != c_centralShiftIndex)
2055 rvec_dec(fshift[svi], fv);
2056 fshift[c_centralShiftIndex][XX] += fv[XX] - fj[XX] - fk[XX] - fl[XX];
2057 fshift[c_centralShiftIndex][YY] += fv[YY] - fj[YY] - fk[YY] - fl[YY];
2058 fshift[c_centralShiftIndex][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ] - fl[ZZ];
2059 rvec_inc(fshift[sij], fj);
2060 rvec_inc(fshift[sik], fk);
2061 rvec_inc(fshift[sil], fl);
2065 if (virialHandling == VirialHandling::NonLinear)
2070 pbc_rvec_sub(pbc, x[av], x[ai], xiv);
2072 for (i = 0; i < DIM; i++)
2074 for (j = 0; j < DIM; j++)
2076 dxdf[i][j] += -xiv[i] * fv[j] + xij[i] * fj[j] + xik[i] * fk[j] + xil[i] * fl[j];
2081 /* Total: 207 flops (Yuck!) */
2084 template<VirialHandling virialHandling>
2085 static int spread_vsiten(const t_iatom ia[],
2086 ArrayRef<const t_iparams> ip,
2087 ArrayRef<const RVec> x,
2089 ArrayRef<RVec> fshift,
2097 n3 = 3 * ip[ia[0]].vsiten.n;
2099 copy_rvec(x[av], xv);
2101 for (i = 0; i < n3; i += 3)
2106 siv = pbc_dx_aiuc(pbc, x[ai], xv, dx);
2110 siv = c_centralShiftIndex;
2112 a = ip[ia[i]].vsiten.a;
2113 svmul(a, f[av], fi);
2114 rvec_inc(f[ai], fi);
2116 if (virialHandling == VirialHandling::Pbc && siv != c_centralShiftIndex)
2118 rvec_inc(fshift[siv], fi);
2119 rvec_dec(fshift[c_centralShiftIndex], fi);
2129 //! Returns the number of virtual sites in the interaction list, for VSITEN the number of atoms
2130 static int vsite_count(ArrayRef<const InteractionList> ilist, int ftype)
2132 if (ftype == F_VSITEN)
2134 return ilist[ftype].size() / 3;
2138 return ilist[ftype].size() / (1 + interaction_function[ftype].nratoms);
2142 //! Executes the force spreading task for a single thread
2143 template<VirialHandling virialHandling>
2144 static void spreadForceForThread(ArrayRef<const RVec> x,
2146 ArrayRef<RVec> fshift,
2148 ArrayRef<const t_iparams> ip,
2149 ArrayRef<const InteractionList> ilist,
2150 const t_pbc* pbc_null)
2152 const PbcMode pbcMode = getPbcMode(pbc_null);
2153 /* We need another pbc pointer, as with charge groups we switch per vsite */
2154 const t_pbc* pbc_null2 = pbc_null;
2155 gmx::ArrayRef<const int> vsite_pbc;
2157 /* this loop goes backwards to be able to build *
2158 * higher type vsites from lower types */
2159 for (int ftype = c_ftypeVsiteEnd - 1; ftype >= c_ftypeVsiteStart; ftype--)
2161 if (ilist[ftype].empty())
2167 int nra = interaction_function[ftype].nratoms;
2169 int nr = ilist[ftype].size();
2171 const t_iatom* ia = ilist[ftype].iatoms.data();
2173 if (pbcMode == PbcMode::all)
2175 pbc_null2 = pbc_null;
2178 for (int i = 0; i < nr;)
2182 /* Constants for constructing */
2184 a1 = ip[tp].vsite.a;
2185 /* Construct the vsite depending on type */
2188 case F_VSITE1: spread_vsite1(ia, f); break;
2190 spread_vsite2<virialHandling>(ia, a1, x, f, fshift, pbc_null2);
2193 spread_vsite2FD<virialHandling>(ia, a1, x, f, fshift, dxdf, pbc_null2);
2196 b1 = ip[tp].vsite.b;
2197 spread_vsite3<virialHandling>(ia, a1, b1, x, f, fshift, pbc_null2);
2200 b1 = ip[tp].vsite.b;
2201 spread_vsite3FD<virialHandling>(ia, a1, b1, x, f, fshift, dxdf, pbc_null2);
2204 b1 = ip[tp].vsite.b;
2205 spread_vsite3FAD<virialHandling>(ia, a1, b1, x, f, fshift, dxdf, pbc_null2);
2208 b1 = ip[tp].vsite.b;
2209 c1 = ip[tp].vsite.c;
2210 spread_vsite3OUT<virialHandling>(ia, a1, b1, c1, x, f, fshift, dxdf, pbc_null2);
2213 b1 = ip[tp].vsite.b;
2214 c1 = ip[tp].vsite.c;
2215 spread_vsite4FD<virialHandling>(ia, a1, b1, c1, x, f, fshift, dxdf, pbc_null2);
2218 b1 = ip[tp].vsite.b;
2219 c1 = ip[tp].vsite.c;
2220 spread_vsite4FDN<virialHandling>(ia, a1, b1, c1, x, f, fshift, dxdf, pbc_null2);
2223 inc = spread_vsiten<virialHandling>(ia, ip, x, f, fshift, pbc_null2);
2226 gmx_fatal(FARGS, "No such vsite type %d in %s, line %d", ftype, __FILE__, __LINE__);
2228 clear_rvec(f[ia[1]]);
2230 /* Increment loop variables */
2238 //! Wrapper function for calling the templated thread-local spread function
2239 static void spreadForceWrapper(ArrayRef<const RVec> x,
2241 const VirialHandling virialHandling,
2242 ArrayRef<RVec> fshift,
2244 const bool clearDxdf,
2245 ArrayRef<const t_iparams> ip,
2246 ArrayRef<const InteractionList> ilist,
2247 const t_pbc* pbc_null)
2249 if (virialHandling == VirialHandling::NonLinear && clearDxdf)
2254 switch (virialHandling)
2256 case VirialHandling::None:
2257 spreadForceForThread<VirialHandling::None>(x, f, fshift, dxdf, ip, ilist, pbc_null);
2259 case VirialHandling::Pbc:
2260 spreadForceForThread<VirialHandling::Pbc>(x, f, fshift, dxdf, ip, ilist, pbc_null);
2262 case VirialHandling::NonLinear:
2263 spreadForceForThread<VirialHandling::NonLinear>(x, f, fshift, dxdf, ip, ilist, pbc_null);
2268 //! Clears the task force buffer elements that are written by task idTask
2269 static void clearTaskForceBufferUsedElements(InterdependentTask* idTask)
2271 int ntask = idTask->spreadTask.size();
2272 for (int ti = 0; ti < ntask; ti++)
2274 const AtomIndex* atomList = &idTask->atomIndex[idTask->spreadTask[ti]];
2275 int natom = atomList->atom.size();
2276 RVec* force = idTask->force.data();
2277 for (int i = 0; i < natom; i++)
2279 clear_rvec(force[atomList->atom[i]]);
2284 void VirtualSitesHandler::Impl::spreadForces(ArrayRef<const RVec> x,
2286 const VirialHandling virialHandling,
2287 ArrayRef<RVec> fshift,
2291 gmx_wallcycle* wcycle)
2293 wallcycle_start(wcycle, WallCycleCounter::VsiteSpread);
2295 const bool useDomdec = domainInfo_.useDomdec();
2297 t_pbc pbc, *pbc_null;
2299 if (domainInfo_.useMolPbc_)
2301 /* This is wasting some CPU time as we now do this multiple times
2304 pbc_null = set_pbc_dd(
2305 &pbc, domainInfo_.pbcType_, useDomdec ? domainInfo_.domdec_->numCells : nullptr, FALSE, box);
2314 dd_clear_f_vsites(*domainInfo_.domdec_, f);
2317 const int numThreads = threadingInfo_.numThreads();
2319 if (numThreads == 1)
2322 spreadForceWrapper(x, f, virialHandling, fshift, dxdf, true, iparams_, ilists_, pbc_null);
2324 if (virialHandling == VirialHandling::NonLinear)
2326 for (int i = 0; i < DIM; i++)
2328 for (int j = 0; j < DIM; j++)
2330 virial[i][j] += -0.5 * dxdf[i][j];
2337 /* First spread the vsites that might depend on non-local vsites */
2338 auto& nlDependentVSites = threadingInfo_.threadDataNonLocalDependent();
2339 spreadForceWrapper(x,
2343 nlDependentVSites.dxdf,
2346 nlDependentVSites.ilist,
2349 #pragma omp parallel num_threads(numThreads)
2353 int thread = gmx_omp_get_thread_num();
2354 VsiteThread& tData = threadingInfo_.threadData(thread);
2356 ArrayRef<RVec> fshift_t;
2357 if (virialHandling == VirialHandling::Pbc)
2365 fshift_t = tData.fshift;
2367 for (int i = 0; i < c_numShiftVectors; i++)
2369 clear_rvec(fshift_t[i]);
2374 if (tData.useInterdependentTask)
2376 /* Spread the vsites that spread outside our local range.
2377 * This is done using a thread-local force buffer force.
2378 * First we need to copy the input vsite forces to force.
2380 InterdependentTask* idTask = &tData.idTask;
2382 /* Clear the buffer elements set by our task during
2383 * the last call to spread_vsite_f.
2385 clearTaskForceBufferUsedElements(idTask);
2387 int nvsite = idTask->vsite.size();
2388 for (int i = 0; i < nvsite; i++)
2390 copy_rvec(f[idTask->vsite[i]], idTask->force[idTask->vsite[i]]);
2392 spreadForceWrapper(x,
2402 /* We need a barrier before reducing forces below
2403 * that have been produced by a different thread above.
2407 /* Loop over all thread task and reduce forces they
2408 * produced on atoms that fall in our range.
2409 * Note that atomic reduction would be a simpler solution,
2410 * but that might not have good support on all platforms.
2412 int ntask = idTask->reduceTask.size();
2413 for (int ti = 0; ti < ntask; ti++)
2415 const InterdependentTask& idt_foreign =
2416 threadingInfo_.threadData(idTask->reduceTask[ti]).idTask;
2417 const AtomIndex& atomList = idt_foreign.atomIndex[thread];
2418 const RVec* f_foreign = idt_foreign.force.data();
2420 for (int ind : atomList.atom)
2422 rvec_inc(f[ind], f_foreign[ind]);
2423 /* Clearing of f_foreign is done at the next step */
2426 /* Clear the vsite forces, both in f and force */
2427 for (int i = 0; i < nvsite; i++)
2429 int ind = tData.idTask.vsite[i];
2431 clear_rvec(tData.idTask.force[ind]);
2435 /* Spread the vsites that spread locally only */
2437 x, f, virialHandling, fshift_t, tData.dxdf, false, iparams_, tData.ilist, pbc_null);
2439 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
2442 if (virialHandling == VirialHandling::Pbc)
2444 for (int th = 1; th < numThreads; th++)
2446 for (int i = 0; i < c_numShiftVectors; i++)
2448 rvec_inc(fshift[i], threadingInfo_.threadData(th).fshift[i]);
2453 if (virialHandling == VirialHandling::NonLinear)
2455 for (int th = 0; th < numThreads + 1; th++)
2457 /* MSVC doesn't like matrix references, so we use a pointer */
2458 const matrix& dxdf = threadingInfo_.threadData(th).dxdf;
2460 for (int i = 0; i < DIM; i++)
2462 for (int j = 0; j < DIM; j++)
2464 virial[i][j] += -0.5 * dxdf[i][j];
2473 dd_move_f_vsites(*domainInfo_.domdec_, f, fshift);
2476 inc_nrnb(nrnb, eNR_VSITE1, vsite_count(ilists_, F_VSITE1));
2477 inc_nrnb(nrnb, eNR_VSITE2, vsite_count(ilists_, F_VSITE2));
2478 inc_nrnb(nrnb, eNR_VSITE2FD, vsite_count(ilists_, F_VSITE2FD));
2479 inc_nrnb(nrnb, eNR_VSITE3, vsite_count(ilists_, F_VSITE3));
2480 inc_nrnb(nrnb, eNR_VSITE3FD, vsite_count(ilists_, F_VSITE3FD));
2481 inc_nrnb(nrnb, eNR_VSITE3FAD, vsite_count(ilists_, F_VSITE3FAD));
2482 inc_nrnb(nrnb, eNR_VSITE3OUT, vsite_count(ilists_, F_VSITE3OUT));
2483 inc_nrnb(nrnb, eNR_VSITE4FD, vsite_count(ilists_, F_VSITE4FD));
2484 inc_nrnb(nrnb, eNR_VSITE4FDN, vsite_count(ilists_, F_VSITE4FDN));
2485 inc_nrnb(nrnb, eNR_VSITEN, vsite_count(ilists_, F_VSITEN));
2487 wallcycle_stop(wcycle, WallCycleCounter::VsiteSpread);
2490 /*! \brief Returns the an array with group indices for each atom
2492 * \param[in] grouping The paritioning of the atom range into atom groups
2494 static std::vector<int> makeAtomToGroupMapping(const gmx::RangePartitioning& grouping)
2496 std::vector<int> atomToGroup(grouping.fullRange().end(), 0);
2498 for (int group = 0; group < grouping.numBlocks(); group++)
2500 auto block = grouping.block(group);
2501 std::fill(atomToGroup.begin() + block.begin(), atomToGroup.begin() + block.end(), group);
2507 int countNonlinearVsites(const gmx_mtop_t& mtop)
2509 int numNonlinearVsites = 0;
2510 for (const gmx_molblock_t& molb : mtop.molblock)
2512 const gmx_moltype_t& molt = mtop.moltype[molb.type];
2514 for (const auto& ilist : extractILists(molt.ilist, IF_VSITE))
2516 if (ilist.functionType != F_VSITE2 && ilist.functionType != F_VSITE3
2517 && ilist.functionType != F_VSITEN)
2519 numNonlinearVsites += molb.nmol * ilist.iatoms.size() / (1 + NRAL(ilist.functionType));
2524 return numNonlinearVsites;
2527 void VirtualSitesHandler::spreadForces(ArrayRef<const RVec> x,
2529 const VirialHandling virialHandling,
2530 ArrayRef<RVec> fshift,
2534 gmx_wallcycle* wcycle)
2536 impl_->spreadForces(x, f, virialHandling, fshift, virial, nrnb, box, wcycle);
2539 int countInterUpdategroupVsites(const gmx_mtop_t& mtop,
2540 gmx::ArrayRef<const gmx::RangePartitioning> updateGroupingsPerMoleculeType)
2542 int n_intercg_vsite = 0;
2543 for (const gmx_molblock_t& molb : mtop.molblock)
2545 const gmx_moltype_t& molt = mtop.moltype[molb.type];
2547 std::vector<int> atomToGroup;
2548 if (!updateGroupingsPerMoleculeType.empty())
2550 atomToGroup = makeAtomToGroupMapping(updateGroupingsPerMoleculeType[molb.type]);
2552 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2554 const int nral = NRAL(ftype);
2555 const InteractionList& il = molt.ilist[ftype];
2556 for (int i = 0; i < il.size(); i += 1 + nral)
2558 bool isInterGroup = atomToGroup.empty();
2561 const int group = atomToGroup[il.iatoms[1 + i]];
2562 for (int a = 1; a < nral; a++)
2564 if (atomToGroup[il.iatoms[1 + a]] != group)
2566 isInterGroup = true;
2573 n_intercg_vsite += molb.nmol;
2579 return n_intercg_vsite;
2582 std::unique_ptr<VirtualSitesHandler> makeVirtualSitesHandler(const gmx_mtop_t& mtop,
2583 const t_commrec* cr,
2585 ArrayRef<const RangePartitioning> updateGroupingPerMoleculeType)
2587 GMX_RELEASE_ASSERT(cr != nullptr, "We need a valid commrec");
2589 std::unique_ptr<VirtualSitesHandler> vsite;
2591 /* check if there are vsites */
2593 for (int ftype = 0; ftype < F_NRE; ftype++)
2595 if (interaction_function[ftype].flags & IF_VSITE)
2597 GMX_ASSERT(ftype >= c_ftypeVsiteStart && ftype < c_ftypeVsiteEnd,
2598 "c_ftypeVsiteStart and/or c_ftypeVsiteEnd do not have correct values");
2600 nvsite += gmx_mtop_ftype_count(mtop, ftype);
2604 GMX_ASSERT(ftype < c_ftypeVsiteStart || ftype >= c_ftypeVsiteEnd,
2605 "c_ftypeVsiteStart and/or c_ftypeVsiteEnd do not have correct values");
2614 return std::make_unique<VirtualSitesHandler>(mtop, cr->dd, pbcType, updateGroupingPerMoleculeType);
2617 ThreadingInfo::ThreadingInfo() : numThreads_(gmx_omp_nthreads_get(ModuleMultiThread::VirtualSite))
2619 if (numThreads_ > 1)
2621 /* We need one extra thread data structure for the overlap vsites */
2622 tData_.resize(numThreads_ + 1);
2623 #pragma omp parallel for num_threads(numThreads_) schedule(static)
2624 for (int thread = 0; thread < numThreads_; thread++)
2628 tData_[thread] = std::make_unique<VsiteThread>();
2630 InterdependentTask& idTask = tData_[thread]->idTask;
2632 idTask.atomIndex.resize(numThreads_);
2634 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
2636 if (numThreads_ > 1)
2638 tData_[numThreads_] = std::make_unique<VsiteThread>();
2643 VirtualSitesHandler::Impl::Impl(const gmx_mtop_t& mtop,
2644 gmx_domdec_t* domdec,
2645 const PbcType pbcType,
2646 const ArrayRef<const RangePartitioning> updateGroupingPerMoleculeType) :
2647 numInterUpdategroupVirtualSites_(countInterUpdategroupVsites(mtop, updateGroupingPerMoleculeType)),
2648 domainInfo_({ pbcType, pbcType != PbcType::No && numInterUpdategroupVirtualSites_ > 0, domdec }),
2649 iparams_(mtop.ffparams.iparams)
2653 VirtualSitesHandler::VirtualSitesHandler(const gmx_mtop_t& mtop,
2654 gmx_domdec_t* domdec,
2655 const PbcType pbcType,
2656 const ArrayRef<const RangePartitioning> updateGroupingPerMoleculeType) :
2657 impl_(new Impl(mtop, domdec, pbcType, updateGroupingPerMoleculeType))
2661 //! Flag that atom \p atom which is home in another task, if it has not already been added before
2662 static inline void flagAtom(InterdependentTask* idTask, const int atom, const int numThreads, const int numAtomsPerThread)
2664 if (!idTask->use[atom])
2666 idTask->use[atom] = true;
2667 int thread = atom / numAtomsPerThread;
2668 /* Assign all non-local atom force writes to thread 0 */
2669 if (thread >= numThreads)
2673 idTask->atomIndex[thread].atom.push_back(atom);
2677 /*! \brief Here we try to assign all vsites that are in our local range.
2679 * Our task local atom range is tData->rangeStart - tData->rangeEnd.
2680 * Vsites that depend only on local atoms, as indicated by taskIndex[]==thread,
2681 * are assigned to task tData->ilist. Vsites that depend on non-local atoms
2682 * but not on other vsites are assigned to task tData->id_task.ilist.
2683 * taskIndex[] is set for all vsites in our range, either to our local tasks
2684 * or to the single last task as taskIndex[]=2*nthreads.
2686 static void assignVsitesToThread(VsiteThread* tData,
2690 gmx::ArrayRef<int> taskIndex,
2691 ArrayRef<const InteractionList> ilist,
2692 ArrayRef<const t_iparams> ip,
2693 ArrayRef<const ParticleType> ptype)
2695 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2697 tData->ilist[ftype].clear();
2698 tData->idTask.ilist[ftype].clear();
2700 const int nral1 = 1 + NRAL(ftype);
2701 const int* iat = ilist[ftype].iatoms.data();
2702 for (int i = 0; i < ilist[ftype].size();)
2704 /* Get the number of iatom entries in this virtual site.
2705 * The 3 below for F_VSITEN is from 1+NRAL(ftype)=3
2707 const int numIAtoms = (ftype == F_VSITEN ? ip[iat[i]].vsiten.n * 3 : nral1);
2709 if (iat[1 + i] < tData->rangeStart || iat[1 + i] >= tData->rangeEnd)
2711 /* This vsite belongs to a different thread */
2716 /* We would like to assign this vsite to task thread,
2717 * but it might depend on atoms outside the atom range of thread
2718 * or on another vsite not assigned to task thread.
2721 if (ftype != F_VSITEN)
2723 for (int j = i + 2; j < i + nral1; j++)
2725 /* Do a range check to avoid a harmless race on taskIndex */
2726 if (iat[j] < tData->rangeStart || iat[j] >= tData->rangeEnd || taskIndex[iat[j]] != thread)
2728 if (!tData->useInterdependentTask || ptype[iat[j]] == ParticleType::VSite)
2730 /* At least one constructing atom is a vsite
2731 * that is not assigned to the same thread.
2732 * Put this vsite into a separate task.
2738 /* There are constructing atoms outside our range,
2739 * put this vsite into a second task to be executed
2740 * on the same thread. During construction no barrier
2741 * is needed between the two tasks on the same thread.
2742 * During spreading we need to run this task with
2743 * an additional thread-local intermediate force buffer
2744 * (or atomic reduction) and a barrier between the two
2747 task = nthread + thread;
2753 for (int j = i + 2; j < i + numIAtoms; j += 3)
2755 /* Do a range check to avoid a harmless race on taskIndex */
2756 if (iat[j] < tData->rangeStart || iat[j] >= tData->rangeEnd || taskIndex[iat[j]] != thread)
2758 GMX_ASSERT(ptype[iat[j]] != ParticleType::VSite,
2759 "A vsite to be assigned in assignVsitesToThread has a vsite as "
2760 "a constructing atom that does not belong to our task, such "
2761 "vsites should be assigned to the single 'master' task");
2763 if (tData->useInterdependentTask)
2765 // Assign to the interdependent task
2766 task = nthread + thread;
2770 // Assign to the separate, non-parallel task
2777 /* Update this vsite's thread index entry */
2778 taskIndex[iat[1 + i]] = task;
2780 if (task == thread || task == nthread + thread)
2782 /* Copy this vsite to the thread data struct of thread */
2783 InteractionList* il_task;
2786 il_task = &tData->ilist[ftype];
2790 il_task = &tData->idTask.ilist[ftype];
2792 /* Copy the vsite data to the thread-task local array */
2793 il_task->push_back(iat[i], numIAtoms - 1, iat + i + 1);
2794 if (task == nthread + thread)
2796 /* This vsite writes outside our own task force block.
2797 * Put it into the interdependent task list and flag
2798 * the atoms involved for reduction.
2800 tData->idTask.vsite.push_back(iat[i + 1]);
2801 if (ftype != F_VSITEN)
2803 for (int j = i + 2; j < i + nral1; j++)
2805 flagAtom(&tData->idTask, iat[j], nthread, natperthread);
2810 for (int j = i + 2; j < i + numIAtoms; j += 3)
2812 flagAtom(&tData->idTask, iat[j], nthread, natperthread);
2823 /*! \brief Assign all vsites with taskIndex[]==task to task tData */
2824 static void assignVsitesToSingleTask(VsiteThread* tData,
2826 gmx::ArrayRef<const int> taskIndex,
2827 ArrayRef<const InteractionList> ilist,
2828 ArrayRef<const t_iparams> ip)
2830 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2832 tData->ilist[ftype].clear();
2833 tData->idTask.ilist[ftype].clear();
2835 int nral1 = 1 + NRAL(ftype);
2837 const int* iat = ilist[ftype].iatoms.data();
2838 InteractionList* il_task = &tData->ilist[ftype];
2840 for (int i = 0; i < ilist[ftype].size();)
2842 if (ftype == F_VSITEN)
2844 /* The 3 below is from 1+NRAL(ftype)=3 */
2845 inc = ip[iat[i]].vsiten.n * 3;
2847 /* Check if the vsite is assigned to our task */
2848 if (taskIndex[iat[1 + i]] == task)
2850 /* Copy the vsite data to the thread-task local array */
2851 il_task->push_back(iat[i], inc - 1, iat + i + 1);
2859 void ThreadingInfo::setVirtualSites(ArrayRef<const InteractionList> ilists,
2860 ArrayRef<const t_iparams> iparams,
2863 ArrayRef<const ParticleType> ptype,
2864 const bool useDomdec)
2866 if (numThreads_ <= 1)
2872 /* The current way of distributing the vsites over threads in primitive.
2873 * We divide the atom range 0 - natoms_in_vsite uniformly over threads,
2874 * without taking into account how the vsites are distributed.
2875 * Without domain decomposition we at least tighten the upper bound
2876 * of the range (useful for common systems such as a vsite-protein
2878 * With domain decomposition, as long as the vsites are distributed
2879 * uniformly in each domain along the major dimension, usually x,
2880 * it will also perform well.
2882 int vsite_atom_range;
2886 vsite_atom_range = -1;
2887 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
2890 if (ftype != F_VSITEN)
2892 int nral1 = 1 + NRAL(ftype);
2893 ArrayRef<const int> iat = ilists[ftype].iatoms;
2894 for (int i = 0; i < ilists[ftype].size(); i += nral1)
2896 for (int j = i + 1; j < i + nral1; j++)
2898 vsite_atom_range = std::max(vsite_atom_range, iat[j]);
2906 ArrayRef<const int> iat = ilists[ftype].iatoms;
2909 while (i < ilists[ftype].size())
2911 /* The 3 below is from 1+NRAL(ftype)=3 */
2912 vs_ind_end = i + iparams[iat[i]].vsiten.n * 3;
2914 vsite_atom_range = std::max(vsite_atom_range, iat[i + 1]);
2915 while (i < vs_ind_end)
2917 vsite_atom_range = std::max(vsite_atom_range, iat[i + 2]);
2925 natperthread = (vsite_atom_range + numThreads_ - 1) / numThreads_;
2929 /* Any local or not local atom could be involved in virtual sites.
2930 * But since we usually have very few non-local virtual sites
2931 * (only non-local vsites that depend on local vsites),
2932 * we distribute the local atom range equally over the threads.
2933 * When assigning vsites to threads, we should take care that the last
2934 * threads also covers the non-local range.
2936 vsite_atom_range = numAtoms;
2937 natperthread = (homenr + numThreads_ - 1) / numThreads_;
2943 "virtual site thread dist: natoms %d, range %d, natperthread %d\n",
2949 /* To simplify the vsite assignment, we make an index which tells us
2950 * to which task particles, both non-vsites and vsites, are assigned.
2952 taskIndex_.resize(numAtoms);
2954 /* Initialize the task index array. Here we assign the non-vsite
2955 * particles to task=thread, so we easily figure out if vsites
2956 * depend on local and/or non-local particles in assignVsitesToThread.
2960 for (int i = 0; i < numAtoms; i++)
2962 if (ptype[i] == ParticleType::VSite)
2964 /* vsites are not assigned to a task yet */
2969 /* assign non-vsite particles to task thread */
2970 taskIndex_[i] = thread;
2972 if (i == (thread + 1) * natperthread && thread < numThreads_)
2979 #pragma omp parallel num_threads(numThreads_)
2983 int thread = gmx_omp_get_thread_num();
2984 VsiteThread& tData = *tData_[thread];
2986 /* Clear the buffer use flags that were set before */
2987 if (tData.useInterdependentTask)
2989 InterdependentTask& idTask = tData.idTask;
2991 /* To avoid an extra OpenMP barrier in spread_vsite_f,
2992 * we clear the force buffer at the next step,
2993 * so we need to do it here as well.
2995 clearTaskForceBufferUsedElements(&idTask);
2997 idTask.vsite.resize(0);
2998 for (int t = 0; t < numThreads_; t++)
3000 AtomIndex& atomIndex = idTask.atomIndex[t];
3001 int natom = atomIndex.atom.size();
3002 for (int i = 0; i < natom; i++)
3004 idTask.use[atomIndex.atom[i]] = false;
3006 atomIndex.atom.resize(0);
3011 /* To avoid large f_buf allocations of #threads*vsite_atom_range
3012 * we don't use the interdependent tasks with more than 200000 atoms.
3013 * This doesn't affect performance, since with such a large range
3014 * relatively few vsites will end up in the separate task.
3015 * Note that useInterdependentTask should be the same for all threads.
3017 const int c_maxNumLocalAtomsForInterdependentTask = 200000;
3018 tData.useInterdependentTask = (vsite_atom_range <= c_maxNumLocalAtomsForInterdependentTask);
3019 if (tData.useInterdependentTask)
3021 size_t natoms_use_in_vsites = vsite_atom_range;
3022 InterdependentTask& idTask = tData.idTask;
3023 /* To avoid resizing and re-clearing every nstlist steps,
3024 * we never down size the force buffer.
3026 if (natoms_use_in_vsites > idTask.force.size() || natoms_use_in_vsites > idTask.use.size())
3028 idTask.force.resize(natoms_use_in_vsites, { 0, 0, 0 });
3029 idTask.use.resize(natoms_use_in_vsites, false);
3033 /* Assign all vsites that can execute independently on threads */
3034 tData.rangeStart = thread * natperthread;
3035 if (thread < numThreads_ - 1)
3037 tData.rangeEnd = (thread + 1) * natperthread;
3041 /* The last thread should cover up to the end of the range */
3042 tData.rangeEnd = numAtoms;
3044 assignVsitesToThread(
3045 &tData, thread, numThreads_, natperthread, taskIndex_, ilists, iparams, ptype);
3047 if (tData.useInterdependentTask)
3049 /* In the worst case, all tasks write to force ranges of
3050 * all other tasks, leading to #tasks^2 scaling (this is only
3051 * the overhead, the actual flops remain constant).
3052 * But in most cases there is far less coupling. To improve
3053 * scaling at high thread counts we therefore construct
3054 * an index to only loop over the actually affected tasks.
3056 InterdependentTask& idTask = tData.idTask;
3058 /* Ensure assignVsitesToThread finished on other threads */
3061 idTask.spreadTask.resize(0);
3062 idTask.reduceTask.resize(0);
3063 for (int t = 0; t < numThreads_; t++)
3065 /* Do we write to the force buffer of task t? */
3066 if (!idTask.atomIndex[t].atom.empty())
3068 idTask.spreadTask.push_back(t);
3070 /* Does task t write to our force buffer? */
3071 if (!tData_[t]->idTask.atomIndex[thread].atom.empty())
3073 idTask.reduceTask.push_back(t);
3078 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
3080 /* Assign all remaining vsites, that will have taskIndex[]=2*vsite->nthreads,
3081 * to a single task that will not run in parallel with other tasks.
3083 assignVsitesToSingleTask(tData_[numThreads_].get(), 2 * numThreads_, taskIndex_, ilists, iparams);
3085 if (debug && numThreads_ > 1)
3088 "virtual site useInterdependentTask %d, nuse:\n",
3089 static_cast<int>(tData_[0]->useInterdependentTask));
3090 for (int th = 0; th < numThreads_ + 1; th++)
3092 fprintf(debug, " %4d", tData_[th]->idTask.nuse);
3094 fprintf(debug, "\n");
3096 for (int ftype = c_ftypeVsiteStart; ftype < c_ftypeVsiteEnd; ftype++)
3098 if (!ilists[ftype].empty())
3100 fprintf(debug, "%-20s thread dist:", interaction_function[ftype].longname);
3101 for (int th = 0; th < numThreads_ + 1; th++)
3105 tData_[th]->ilist[ftype].size(),
3106 tData_[th]->idTask.ilist[ftype].size());
3108 fprintf(debug, "\n");
3114 int nrOrig = vsiteIlistNrCount(ilists);
3116 for (int th = 0; th < numThreads_ + 1; th++)
3118 nrThreaded += vsiteIlistNrCount(tData_[th]->ilist) + vsiteIlistNrCount(tData_[th]->idTask.ilist);
3120 GMX_ASSERT(nrThreaded == nrOrig,
3121 "The number of virtual sites assigned to all thread task has to match the total "
3122 "number of virtual sites");
3126 void VirtualSitesHandler::Impl::setVirtualSites(ArrayRef<const InteractionList> ilists,
3129 ArrayRef<const ParticleType> ptype)
3133 threadingInfo_.setVirtualSites(ilists, iparams_, numAtoms, homenr, ptype, domainInfo_.useDomdec());
3136 void VirtualSitesHandler::setVirtualSites(ArrayRef<const InteractionList> ilists,
3139 ArrayRef<const ParticleType> ptype)
3141 impl_->setVirtualSites(ilists, numAtoms, homenr, ptype);