2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
7 * Copyright (c) 2018,2019,2020,2021, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
45 #include "gromacs/fileio/confio.h"
46 #include "gromacs/gmxlib/network.h"
47 #include "gromacs/math/functions.h"
48 #include "gromacs/math/units.h"
49 #include "gromacs/math/utilities.h"
50 #include "gromacs/math/vec.h"
51 #include "gromacs/math/vectypes.h"
52 #include "gromacs/mdtypes/commrec.h"
53 #include "gromacs/mdtypes/inputrec.h"
54 #include "gromacs/mdtypes/md_enums.h"
55 #include "gromacs/mdtypes/state.h"
56 #include "gromacs/pbcutil/pbc.h"
57 #include "gromacs/pulling/pull.h"
58 #include "gromacs/utility/arrayref.h"
59 #include "gromacs/utility/fatalerror.h"
60 #include "gromacs/utility/futil.h"
61 #include "gromacs/utility/gmxassert.h"
62 #include "gromacs/utility/real.h"
63 #include "gromacs/utility/smalloc.h"
65 #include "pull_internal.h"
72 // Helper function to deduce MPI datatype from the type of data
73 gmx_unused static MPI_Datatype mpiDatatype(const float gmx_unused* data)
78 // Helper function to deduce MPI datatype from the type of data
79 gmx_unused static MPI_Datatype mpiDatatype(const double gmx_unused* data)
87 // Helper function; note that gmx_sum(d) should actually be templated
88 gmx_unused static void gmxAllReduce(int n, real* data, const t_commrec* cr)
94 // Helper function; note that gmx_sum(d) should actually be templated
95 gmx_unused static void gmxAllReduce(int n, double* data, const t_commrec* cr)
97 gmx_sumd(n, data, cr);
100 // Reduce data of n elements over all ranks currently participating in pull
102 static void pullAllReduce(const t_commrec* cr, pull_comm_t* comm, int n, T* data)
104 if (cr != nullptr && PAR(cr))
106 if (comm->bParticipateAll)
108 /* Sum the contributions over all DD ranks */
109 gmxAllReduce(n, data, cr);
113 /* Separate branch because gmx_sum uses cr->mpi_comm_mygroup */
115 MPI_Allreduce(MPI_IN_PLACE, data, n, mpiDatatype(data), MPI_SUM, comm->mpi_comm_com);
117 gmx_incons("comm->bParticipateAll=FALSE without GMX_MPI");
123 /* Copies the coordinates of the PBC atom of pgrp to x_pbc.
124 * When those coordinates are not available on this rank, clears x_pbc.
126 static void setPbcAtomCoords(const pull_group_work_t& pgrp, ArrayRef<const RVec> x, rvec x_pbc)
128 if (pgrp.pbcAtomSet != nullptr)
130 if (pgrp.pbcAtomSet->numAtomsLocal() > 0)
132 /* We have the atom locally, copy its coordinates */
133 copy_rvec(x[pgrp.pbcAtomSet->localIndex()[0]], x_pbc);
137 /* Another rank has it, clear the coordinates for MPI_Allreduce */
143 copy_rvec(x[pgrp.params.pbcatom], x_pbc);
147 static void pull_set_pbcatoms(const t_commrec* cr, struct pull_t* pull, ArrayRef<const RVec> x, ArrayRef<RVec> x_pbc)
150 for (size_t g = 0; g < pull->group.size(); g++)
152 const pull_group_work_t& group = pull->group[g];
153 if (group.needToCalcCom && (group.epgrppbc == epgrppbcREFAT || group.epgrppbc == epgrppbcPREVSTEPCOM))
155 setPbcAtomCoords(pull->group[g], x, x_pbc[g]);
160 clear_rvec(x_pbc[g]);
164 if (cr && PAR(cr) && numPbcAtoms > 0)
166 /* Sum over participating ranks to get x_pbc from the home ranks.
167 * This can be very expensive at high parallelization, so we only
168 * do this after each DD repartitioning.
170 pullAllReduce(cr, &pull->comm, pull->group.size() * DIM, static_cast<real*>(x_pbc[0]));
174 static void make_cyl_refgrps(const t_commrec* cr,
176 ArrayRef<const real> masses,
179 ArrayRef<const RVec> x)
181 pull_comm_t* comm = &pull->comm;
183 GMX_ASSERT(comm->cylinderBuffer.size() == pull->coord.size() * c_cylinderBufferStride,
184 "cylinderBuffer should have the correct size");
186 double inv_cyl_r2 = 1.0 / gmx::square(pull->params.cylinder_r);
188 /* loop over all groups to make a reference group for each*/
189 int bufferOffset = 0;
190 for (pull_coord_work_t& pcrd : pull->coord)
192 double sum_a, wmass, wwmass;
193 dvec radf_fac0, radf_fac1;
198 clear_dvec(radf_fac0);
199 clear_dvec(radf_fac1);
201 if (pcrd.params.eGeom == PullGroupGeometry::Cylinder)
203 /* pref will be the same group for all pull coordinates */
204 const pull_group_work_t& pref = pull->group[pcrd.params.group[0]];
205 const pull_group_work_t& pgrp = pull->group[pcrd.params.group[1]];
206 pull_group_work_t& pdyna = *pcrd.dynamicGroup0;
208 copy_dvec_to_rvec(pcrd.spatialData.vec, direction);
210 /* Since we have not calculated the COM of the cylinder group yet,
211 * we calculate distances with respect to location of the pull
212 * group minus the reference position along the vector.
213 * here we already have the COM of the pull group. This resolves
214 * any PBC issues and we don't need to use a PBC-atom here.
216 if (pcrd.params.rate != 0)
218 /* With rate=0, value_ref is set initially */
219 pcrd.value_ref = pcrd.params.init + pcrd.params.rate * t;
222 for (int m = 0; m < DIM; m++)
224 reference[m] = pgrp.x[m] - pcrd.spatialData.vec[m] * pcrd.value_ref;
227 auto localAtomIndices = pref.atomSet.localIndex();
229 /* This actually only needs to be done at init or DD time,
230 * but resizing with the same size does not cause much overhead.
232 pdyna.localWeights.resize(localAtomIndices.size());
233 pdyna.mdw.resize(localAtomIndices.size());
234 pdyna.dv.resize(localAtomIndices.size());
236 /* loop over all atoms in the main ref group */
237 for (gmx::index indexInSet = 0; indexInSet < localAtomIndices.ssize(); indexInSet++)
239 int atomIndex = localAtomIndices[indexInSet];
241 pbc_dx_aiuc(&pbc, x[atomIndex], reference, dx);
242 double axialLocation = iprod(direction, dx);
245 for (int m = 0; m < DIM; m++)
247 /* Determine the radial components */
248 radialLocation[m] = dx[m] - axialLocation * direction[m];
249 dr2 += gmx::square(radialLocation[m]);
251 double dr2_rel = dr2 * inv_cyl_r2;
255 /* add atom to sum of COM and to weight array */
257 double mass = masses[atomIndex];
258 /* The radial weight function is 1-2x^2+x^4,
259 * where x=r/cylinder_r. Since this function depends
260 * on the radial component, we also get radial forces
263 double weight = 1 + (-2 + dr2_rel) * dr2_rel;
264 double dweight_r = (-4 + 4 * dr2_rel) * inv_cyl_r2;
265 pdyna.localWeights[indexInSet] = weight;
266 sum_a += mass * weight * axialLocation;
267 wmass += mass * weight;
268 wwmass += mass * weight * weight;
270 dsvmul(mass * dweight_r, radialLocation, mdw);
271 copy_dvec(mdw, pdyna.mdw[indexInSet]);
272 /* Currently we only have the axial component of the
273 * offset from the cylinder COM up to an unkown offset.
274 * We add this offset after the reduction needed
275 * for determining the COM of the cylinder group.
277 pdyna.dv[indexInSet] = axialLocation;
278 for (int m = 0; m < DIM; m++)
280 radf_fac0[m] += mdw[m];
281 radf_fac1[m] += mdw[m] * axialLocation;
286 pdyna.localWeights[indexInSet] = 0;
291 auto buffer = gmx::arrayRefFromArray(comm->cylinderBuffer.data() + bufferOffset,
292 c_cylinderBufferStride);
293 bufferOffset += c_cylinderBufferStride;
299 buffer[3] = radf_fac0[XX];
300 buffer[4] = radf_fac0[YY];
301 buffer[5] = radf_fac0[ZZ];
303 buffer[6] = radf_fac1[XX];
304 buffer[7] = radf_fac1[YY];
305 buffer[8] = radf_fac1[ZZ];
308 if (cr != nullptr && PAR(cr))
310 /* Sum the contributions over the ranks */
311 pullAllReduce(cr, comm, pull->coord.size() * c_cylinderBufferStride, comm->cylinderBuffer.data());
315 for (pull_coord_work_t& pcrd : pull->coord)
317 if (pcrd.params.eGeom == PullGroupGeometry::Cylinder)
319 pull_group_work_t& dynamicGroup0 = *pcrd.dynamicGroup0;
320 pull_group_work_t& group1 = pull->group[pcrd.params.group[1]];
321 PullCoordSpatialData& spatialData = pcrd.spatialData;
323 auto buffer = gmx::constArrayRefFromArray(comm->cylinderBuffer.data() + bufferOffset,
324 c_cylinderBufferStride);
325 bufferOffset += c_cylinderBufferStride;
326 double wmass = buffer[0];
327 double wwmass = buffer[1];
328 dynamicGroup0.mwscale = 1.0 / wmass;
329 /* Cylinder pulling can't be used with constraints, but we set
330 * wscale and invtm anyhow, in case someone would like to use them.
332 dynamicGroup0.wscale = wmass / wwmass;
333 dynamicGroup0.invtm = wwmass / (wmass * wmass);
335 /* We store the deviation of the COM from the reference location
336 * used above, since we need it when we apply the radial forces
337 * to the atoms in the cylinder group.
339 spatialData.cyl_dev = 0;
340 for (int m = 0; m < DIM; m++)
342 double reference = group1.x[m] - spatialData.vec[m] * pcrd.value_ref;
343 double dist = -spatialData.vec[m] * buffer[2] * dynamicGroup0.mwscale;
344 dynamicGroup0.x[m] = reference - dist;
345 spatialData.cyl_dev += dist;
347 /* Now we know the exact COM of the cylinder reference group,
348 * we can determine the radial force factor (ffrad) that when
349 * multiplied with the axial pull force will give the radial
350 * force on the pulled (non-cylinder) group.
352 for (int m = 0; m < DIM; m++)
354 spatialData.ffrad[m] = (buffer[6 + m] + buffer[3 + m] * spatialData.cyl_dev) / wmass;
360 "Pull cylinder group %d:%8.3f%8.3f%8.3f m:%8.3f\n",
361 pcrd.params.coordIndex,
365 1.0 / dynamicGroup0.invtm);
367 "ffrad %8.3f %8.3f %8.3f\n",
368 spatialData.ffrad[XX],
369 spatialData.ffrad[YY],
370 spatialData.ffrad[ZZ]);
376 static double atan2_0_2pi(double y, double x)
388 static void sum_com_part(const pull_group_work_t* pgrp,
391 ArrayRef<const RVec> x,
392 ArrayRef<const RVec> xp,
393 ArrayRef<const real> mass,
400 dvec sum_wmx = { 0, 0, 0 };
401 dvec sum_wmxp = { 0, 0, 0 };
403 auto localAtomIndices = pgrp->atomSet.localIndex();
404 for (int i = ind_start; i < ind_end; i++)
406 int ii = localAtomIndices[i];
408 if (pgrp->localWeights.empty())
417 w = pgrp->localWeights[i];
422 if (pgrp->epgrppbc == epgrppbcNONE)
424 /* Plain COM: sum the coordinates */
425 for (int d = 0; d < DIM; d++)
427 sum_wmx[d] += wm * x[ii][d];
431 for (int d = 0; d < DIM; d++)
433 sum_wmxp[d] += wm * xp[ii][d];
441 /* Sum the difference with the reference atom */
442 pbc_dx(&pbc, x[ii], x_pbc, dx);
443 for (int d = 0; d < DIM; d++)
445 sum_wmx[d] += wm * dx[d];
449 /* For xp add the difference between xp and x to dx,
450 * such that we use the same periodic image,
451 * also when xp has a large displacement.
453 for (int d = 0; d < DIM; d++)
455 sum_wmxp[d] += wm * (dx[d] + xp[ii][d] - x[ii][d]);
461 sum_com->sum_wm = sum_wm;
462 sum_com->sum_wwm = sum_wwm;
463 copy_dvec(sum_wmx, sum_com->sum_wmx);
466 copy_dvec(sum_wmxp, sum_com->sum_wmxp);
470 static void sum_com_part_cosweight(const pull_group_work_t* pgrp,
475 ArrayRef<const RVec> x,
476 ArrayRef<const RVec> xp,
477 ArrayRef<const real> mass,
480 /* Cosine weighting geometry */
489 auto localAtomIndices = pgrp->atomSet.localIndex();
491 for (int i = ind_start; i < ind_end; i++)
493 int ii = localAtomIndices[i];
495 /* Determine cos and sin sums */
496 real cw = std::cos(x[ii][cosdim] * twopi_box);
497 real sw = std::sin(x[ii][cosdim] * twopi_box);
498 sum_cm += static_cast<double>(cw * m);
499 sum_sm += static_cast<double>(sw * m);
500 sum_ccm += static_cast<double>(cw * cw * m);
501 sum_csm += static_cast<double>(cw * sw * m);
502 sum_ssm += static_cast<double>(sw * sw * m);
506 real cw = std::cos(xp[ii][cosdim] * twopi_box);
507 real sw = std::sin(xp[ii][cosdim] * twopi_box);
508 sum_cmp += static_cast<double>(cw * m);
509 sum_smp += static_cast<double>(sw * m);
513 sum_com->sum_cm = sum_cm;
514 sum_com->sum_sm = sum_sm;
515 sum_com->sum_ccm = sum_ccm;
516 sum_com->sum_csm = sum_csm;
517 sum_com->sum_ssm = sum_ssm;
518 sum_com->sum_cmp = sum_cmp;
519 sum_com->sum_smp = sum_smp;
522 /* calculates center of mass of selection index from all coordinates x */
523 void pull_calc_coms(const t_commrec* cr,
525 ArrayRef<const real> masses,
528 ArrayRef<const RVec> x,
536 GMX_ASSERT(comm->pbcAtomBuffer.size() == pull->group.size(),
537 "pbcAtomBuffer should have size number of groups");
538 GMX_ASSERT(comm->comBuffer.size() == pull->group.size() * c_comBufferStride,
539 "comBuffer should have size #group*c_comBufferStride");
541 if (pull->bRefAt && pull->bSetPBCatoms)
543 pull_set_pbcatoms(cr, pull, x, comm->pbcAtomBuffer);
545 if (cr != nullptr && DOMAINDECOMP(cr))
547 /* We can keep these PBC reference coordinates fixed for nstlist
548 * steps, since atoms won't jump over PBC.
549 * This avoids a global reduction at the next nstlist-1 steps.
550 * Note that the exact values of the pbc reference coordinates
551 * are irrelevant, as long all atoms in the group are within
552 * half a box distance of the reference coordinate.
554 pull->bSetPBCatoms = FALSE;
558 if (pull->cosdim >= 0)
562 assert(pull->npbcdim <= DIM);
564 for (m = pull->cosdim + 1; m < pull->npbcdim; m++)
566 if (pbc.box[m][pull->cosdim] != 0)
568 gmx_fatal(FARGS, "Can not do cosine weighting for trilinic dimensions");
571 twopi_box = 2.0 * M_PI / pbc.box[pull->cosdim][pull->cosdim];
574 for (size_t g = 0; g < pull->group.size(); g++)
576 pull_group_work_t* pgrp = &pull->group[g];
578 /* Cosine-weighted COMs behave different from all other weighted COMs
579 * in the sense that the weights depend on instantaneous coordinates,
580 * not on pre-set weights. Thus we resize the local weight buffer here.
582 if (pgrp->epgrppbc == epgrppbcCOS)
584 pgrp->localWeights.resize(pgrp->atomSet.localIndex().size());
587 auto comBuffer = gmx::arrayRefFromArray(comm->comBuffer.data() + g * c_comBufferStride,
590 if (pgrp->needToCalcCom)
592 if (pgrp->epgrppbc != epgrppbcCOS)
594 rvec x_pbc = { 0, 0, 0 };
596 switch (pgrp->epgrppbc)
599 /* Set the pbc atom */
600 copy_rvec(comm->pbcAtomBuffer[g], x_pbc);
602 case epgrppbcPREVSTEPCOM:
603 /* Set the pbc reference to the COM of the group of the last step */
604 copy_dvec_to_rvec(pgrp->x_prev_step, comm->pbcAtomBuffer[g]);
605 copy_dvec_to_rvec(pgrp->x_prev_step, x_pbc);
608 /* The final sums should end up in comSums[0] */
609 ComSums& comSumsTotal = pull->comSums[0];
611 /* If we have a single-atom group the mass is irrelevant, so
612 * we can remove the mass factor to avoid division by zero.
613 * Note that with constraint pulling the mass does matter, but
614 * in that case a check group mass != 0 has been done before.
616 if (pgrp->params.ind.size() == 1 && pgrp->atomSet.numAtomsLocal() == 1
617 && masses[pgrp->atomSet.localIndex()[0]] == 0)
619 GMX_ASSERT(xp.empty(),
620 "We should not have groups with zero mass with constraints, i.e. "
623 /* Copy the single atom coordinate */
624 for (int d = 0; d < DIM; d++)
626 comSumsTotal.sum_wmx[d] = x[pgrp->atomSet.localIndex()[0]][d];
628 /* Set all mass factors to 1 to get the correct COM */
629 comSumsTotal.sum_wm = 1;
630 comSumsTotal.sum_wwm = 1;
632 else if (pgrp->atomSet.numAtomsLocal() <= c_pullMaxNumLocalAtomsSingleThreaded)
634 sum_com_part(pgrp, 0, pgrp->atomSet.numAtomsLocal(), x, xp, masses, pbc, x_pbc, &comSumsTotal);
638 const int numThreads = pgrp->numThreads();
639 #pragma omp parallel for num_threads(numThreads) schedule(static)
640 for (int t = 0; t < numThreads; t++)
642 int ind_start = (pgrp->atomSet.numAtomsLocal() * (t + 0)) / numThreads;
643 int ind_end = (pgrp->atomSet.numAtomsLocal() * (t + 1)) / numThreads;
645 pgrp, ind_start, ind_end, x, xp, masses, pbc, x_pbc, &pull->comSums[t]);
648 /* Reduce the thread contributions to sum_com[0] */
649 for (int t = 1; t < numThreads; t++)
651 comSumsTotal.sum_wm += pull->comSums[t].sum_wm;
652 comSumsTotal.sum_wwm += pull->comSums[t].sum_wwm;
653 dvec_inc(comSumsTotal.sum_wmx, pull->comSums[t].sum_wmx);
654 dvec_inc(comSumsTotal.sum_wmxp, pull->comSums[t].sum_wmxp);
658 if (pgrp->localWeights.empty())
660 comSumsTotal.sum_wwm = comSumsTotal.sum_wm;
663 /* Copy local sums to a buffer for global summing */
664 copy_dvec(comSumsTotal.sum_wmx, comBuffer[0]);
666 copy_dvec(comSumsTotal.sum_wmxp, comBuffer[1]);
668 comBuffer[2][0] = comSumsTotal.sum_wm;
669 comBuffer[2][1] = comSumsTotal.sum_wwm;
674 /* Cosine weighting geometry.
675 * This uses a slab of the system, thus we always have many
676 * atoms in the pull groups. Therefore, always use threads.
678 const int numThreads = pgrp->numThreads();
679 #pragma omp parallel for num_threads(numThreads) schedule(static)
680 for (int t = 0; t < numThreads; t++)
682 int ind_start = (pgrp->atomSet.numAtomsLocal() * (t + 0)) / numThreads;
683 int ind_end = (pgrp->atomSet.numAtomsLocal() * (t + 1)) / numThreads;
684 sum_com_part_cosweight(
685 pgrp, ind_start, ind_end, pull->cosdim, twopi_box, x, xp, masses, &pull->comSums[t]);
688 /* Reduce the thread contributions to comSums[0] */
689 ComSums& comSumsTotal = pull->comSums[0];
690 for (int t = 1; t < numThreads; t++)
692 comSumsTotal.sum_cm += pull->comSums[t].sum_cm;
693 comSumsTotal.sum_sm += pull->comSums[t].sum_sm;
694 comSumsTotal.sum_ccm += pull->comSums[t].sum_ccm;
695 comSumsTotal.sum_csm += pull->comSums[t].sum_csm;
696 comSumsTotal.sum_ssm += pull->comSums[t].sum_ssm;
697 comSumsTotal.sum_cmp += pull->comSums[t].sum_cmp;
698 comSumsTotal.sum_smp += pull->comSums[t].sum_smp;
701 /* Copy local sums to a buffer for global summing */
702 comBuffer[0][0] = comSumsTotal.sum_cm;
703 comBuffer[0][1] = comSumsTotal.sum_sm;
705 comBuffer[1][0] = comSumsTotal.sum_ccm;
706 comBuffer[1][1] = comSumsTotal.sum_csm;
707 comBuffer[1][2] = comSumsTotal.sum_ssm;
708 comBuffer[2][0] = comSumsTotal.sum_cmp;
709 comBuffer[2][1] = comSumsTotal.sum_smp;
715 clear_dvec(comBuffer[0]);
716 clear_dvec(comBuffer[1]);
717 clear_dvec(comBuffer[2]);
723 pull->group.size() * c_comBufferStride * DIM,
724 static_cast<double*>(comm->comBuffer[0]));
726 for (size_t g = 0; g < pull->group.size(); g++)
728 pull_group_work_t* pgrp;
730 pgrp = &pull->group[g];
731 if (pgrp->needToCalcCom)
733 GMX_ASSERT(!pgrp->params.ind.empty(),
734 "Normal pull groups should have atoms, only group 0, which should have "
735 "bCalcCom=FALSE has nat=0");
737 const auto comBuffer = gmx::constArrayRefFromArray(
738 comm->comBuffer.data() + g * c_comBufferStride, c_comBufferStride);
740 if (pgrp->epgrppbc != epgrppbcCOS)
742 double wmass, wwmass;
745 /* Determine the inverse mass */
746 wmass = comBuffer[2][0];
747 wwmass = comBuffer[2][1];
748 pgrp->mwscale = 1.0 / wmass;
749 /* invtm==0 signals a frozen group, so then we should keep it zero */
750 if (pgrp->invtm != 0)
752 pgrp->wscale = wmass / wwmass;
753 pgrp->invtm = wwmass / (wmass * wmass);
755 /* Divide by the total mass */
756 for (m = 0; m < DIM; m++)
758 pgrp->x[m] = comBuffer[0][m] * pgrp->mwscale;
761 pgrp->xp[m] = comBuffer[1][m] * pgrp->mwscale;
763 if (pgrp->epgrppbc == epgrppbcREFAT || pgrp->epgrppbc == epgrppbcPREVSTEPCOM)
765 pgrp->x[m] += comm->pbcAtomBuffer[g][m];
768 pgrp->xp[m] += comm->pbcAtomBuffer[g][m];
775 /* Cosine weighting geometry */
776 double csw, snw, wmass, wwmass;
778 /* Determine the optimal location of the cosine weight */
779 csw = comBuffer[0][0];
780 snw = comBuffer[0][1];
781 pgrp->x[pull->cosdim] = atan2_0_2pi(snw, csw) / twopi_box;
782 /* Set the weights for the local atoms */
783 wmass = sqrt(csw * csw + snw * snw);
784 wwmass = (comBuffer[1][0] * csw * csw + comBuffer[1][1] * csw * snw
785 + comBuffer[1][2] * snw * snw)
788 pgrp->mwscale = 1.0 / wmass;
789 pgrp->wscale = wmass / wwmass;
790 pgrp->invtm = wwmass / (wmass * wmass);
791 /* Set the weights for the local atoms */
794 for (size_t i = 0; i < pgrp->atomSet.numAtomsLocal(); i++)
796 int ii = pgrp->atomSet.localIndex()[i];
797 pgrp->localWeights[i] = csw * std::cos(twopi_box * x[ii][pull->cosdim])
798 + snw * std::sin(twopi_box * x[ii][pull->cosdim]);
802 csw = comBuffer[2][0];
803 snw = comBuffer[2][1];
804 pgrp->xp[pull->cosdim] = atan2_0_2pi(snw, csw) / twopi_box;
809 fprintf(debug, "Pull group %zu wmass %f invtm %f\n", g, 1.0 / pgrp->mwscale, pgrp->invtm);
816 /* Calculate the COMs for the cyclinder reference groups */
817 make_cyl_refgrps(cr, pull, masses, pbc, t, x);
821 using BoolVec = gmx::BasicVector<bool>;
823 /* Returns whether the pull group obeys the PBC restrictions */
824 static bool pullGroupObeysPbcRestrictions(const pull_group_work_t& group,
825 const BoolVec& dimUsed,
826 ArrayRef<const RVec> x,
829 const real pbcMargin)
831 /* Determine which dimensions are relevant for PBC */
832 BoolVec dimUsesPbc = { false, false, false };
833 bool pbcIsRectangular = true;
834 for (int d = 0; d < pbc.ndim_ePBC; d++)
838 dimUsesPbc[d] = true;
839 /* All non-zero dimensions of vector v are involved in PBC */
840 for (int d2 = d + 1; d2 < pbc.ndim_ePBC; d2++)
843 if (pbc.box[d2][d] != 0)
845 dimUsesPbc[d2] = true;
846 pbcIsRectangular = false;
852 rvec marginPerDim = {};
853 real marginDistance2 = 0;
854 if (pbcIsRectangular)
856 /* Use margins for dimensions independently */
857 for (int d = 0; d < pbc.ndim_ePBC; d++)
859 marginPerDim[d] = pbcMargin * pbc.hbox_diag[d];
864 /* Check the total distance along the relevant dimensions */
865 for (int d = 0; d < pbc.ndim_ePBC; d++)
869 marginDistance2 += pbcMargin * gmx::square(0.5) * norm2(pbc.box[d]);
874 auto localAtomIndices = group.atomSet.localIndex();
875 for (gmx::index indexInSet = 0; indexInSet < localAtomIndices.ssize(); indexInSet++)
878 pbc_dx(&pbc, x[localAtomIndices[indexInSet]], x_pbc, dx);
880 bool atomIsTooFar = false;
881 if (pbcIsRectangular)
883 for (int d = 0; d < pbc.ndim_ePBC; d++)
885 if (dimUsesPbc[d] && (dx[d] < -marginPerDim[d] || dx[d] > marginPerDim[d]))
893 real pbcDistance2 = 0;
894 for (int d = 0; d < pbc.ndim_ePBC; d++)
898 pbcDistance2 += gmx::square(dx[d]);
901 atomIsTooFar = (pbcDistance2 > marginDistance2);
912 int pullCheckPbcWithinGroups(const pull_t& pull, ArrayRef<const RVec> x, const t_pbc& pbc, real pbcMargin)
914 if (pbc.pbcType == PbcType::No)
919 /* Determine what dimensions are used for each group by pull coordinates */
920 std::vector<BoolVec> dimUsed(pull.group.size(), { false, false, false });
921 for (const pull_coord_work_t& pcrd : pull.coord)
923 const t_pull_coord& coordParams = pcrd.params;
924 for (int groupIndex = 0; groupIndex < coordParams.ngroup; groupIndex++)
926 for (int d = 0; d < DIM; d++)
928 if (coordParams.dim[d]
929 && !(coordParams.eGeom == PullGroupGeometry::Cylinder && groupIndex == 0))
931 dimUsed[coordParams.group[groupIndex]][d] = true;
937 /* Check PBC for every group that uses a PBC reference atom treatment */
938 for (size_t g = 0; g < pull.group.size(); g++)
940 const pull_group_work_t& group = pull.group[g];
941 if ((group.epgrppbc == epgrppbcREFAT || group.epgrppbc == epgrppbcPREVSTEPCOM)
942 && !pullGroupObeysPbcRestrictions(group, dimUsed[g], x, pbc, pull.comm.pbcAtomBuffer[g], pbcMargin))
951 bool pullCheckPbcWithinGroup(const pull_t& pull, ArrayRef<const RVec> x, const t_pbc& pbc, int groupNr, real pbcMargin)
953 if (pbc.pbcType == PbcType::No)
957 GMX_ASSERT(groupNr < gmx::ssize(pull.group), "groupNr is out of range");
959 /* Check PBC if the group uses a PBC reference atom treatment. */
960 const pull_group_work_t& group = pull.group[groupNr];
961 if (group.epgrppbc != epgrppbcREFAT && group.epgrppbc != epgrppbcPREVSTEPCOM)
966 /* Determine what dimensions are used for each group by pull coordinates */
967 BoolVec dimUsed = { false, false, false };
968 for (const pull_coord_work_t& pcrd : pull.coord)
970 const t_pull_coord& coordParams = pcrd.params;
971 for (int groupIndex = 0; groupIndex < coordParams.ngroup; groupIndex++)
973 if (coordParams.group[groupIndex] == groupNr)
975 for (int d = 0; d < DIM; d++)
977 if (coordParams.dim[d]
978 && !(coordParams.eGeom == PullGroupGeometry::Cylinder && groupIndex == 0))
987 return (pullGroupObeysPbcRestrictions(
988 group, dimUsed, x, pbc, pull.comm.pbcAtomBuffer[groupNr], pbcMargin));
991 void setPrevStepPullComFromState(struct pull_t* pull, const t_state* state)
993 for (size_t g = 0; g < pull->group.size(); g++)
995 for (int j = 0; j < DIM; j++)
997 pull->group[g].x_prev_step[j] = state->pull_com_prev_step[g * DIM + j];
1002 void updatePrevStepPullCom(struct pull_t* pull, t_state* state)
1004 for (size_t g = 0; g < pull->group.size(); g++)
1006 if (pull->group[g].needToCalcCom)
1008 for (int j = 0; j < DIM; j++)
1010 pull->group[g].x_prev_step[j] = pull->group[g].x[j];
1011 state->pull_com_prev_step[g * DIM + j] = pull->group[g].x[j];
1017 void allocStatePrevStepPullCom(t_state* state, const pull_t* pull)
1021 state->pull_com_prev_step.clear();
1024 size_t ngroup = pull->group.size();
1025 if (state->pull_com_prev_step.size() / DIM != ngroup)
1027 state->pull_com_prev_step.resize(ngroup * DIM, NAN);
1031 void initPullComFromPrevStep(const t_commrec* cr,
1033 ArrayRef<const real> masses,
1035 ArrayRef<const RVec> x)
1037 pull_comm_t* comm = &pull->comm;
1038 size_t ngroup = pull->group.size();
1040 if (!comm->bParticipate)
1045 GMX_ASSERT(comm->pbcAtomBuffer.size() == pull->group.size(),
1046 "pbcAtomBuffer should have size number of groups");
1047 GMX_ASSERT(comm->comBuffer.size() == pull->group.size() * c_comBufferStride,
1048 "comBuffer should have size #group*c_comBufferStride");
1050 pull_set_pbcatoms(cr, pull, x, comm->pbcAtomBuffer);
1052 for (size_t g = 0; g < ngroup; g++)
1054 pull_group_work_t* pgrp;
1056 pgrp = &pull->group[g];
1058 if (pgrp->needToCalcCom && pgrp->epgrppbc == epgrppbcPREVSTEPCOM)
1060 GMX_ASSERT(pgrp->params.ind.size() > 1,
1061 "Groups with no atoms, or only one atom, should not "
1062 "use the COM from the previous step as reference.");
1064 RVec x_pbc = { 0, 0, 0 };
1065 copy_rvec(comm->pbcAtomBuffer[g], x_pbc);
1069 fprintf(debug, "Initialising prev step COM of pull group %zu. x_pbc =", g);
1070 for (int m = 0; m < DIM; m++)
1072 fprintf(debug, " %f", x_pbc[m]);
1074 fprintf(debug, "\n");
1077 /* The following is to a large extent similar to pull_calc_coms() */
1079 /* The final sums should end up in sum_com[0] */
1080 ComSums& comSumsTotal = pull->comSums[0];
1082 if (pgrp->atomSet.numAtomsLocal() <= c_pullMaxNumLocalAtomsSingleThreaded)
1084 sum_com_part(pgrp, 0, pgrp->atomSet.numAtomsLocal(), x, {}, masses, pbc, x_pbc, &comSumsTotal);
1088 const int numThreads = pgrp->numThreads();
1089 #pragma omp parallel for num_threads(numThreads) schedule(static)
1090 for (int t = 0; t < numThreads; t++)
1092 int ind_start = (pgrp->atomSet.numAtomsLocal() * (t + 0)) / numThreads;
1093 int ind_end = (pgrp->atomSet.numAtomsLocal() * (t + 1)) / numThreads;
1094 sum_com_part(pgrp, ind_start, ind_end, x, {}, masses, pbc, x_pbc, &pull->comSums[t]);
1097 /* Reduce the thread contributions to sum_com[0] */
1098 for (int t = 1; t < numThreads; t++)
1100 comSumsTotal.sum_wm += pull->comSums[t].sum_wm;
1101 comSumsTotal.sum_wwm += pull->comSums[t].sum_wwm;
1102 dvec_inc(comSumsTotal.sum_wmx, pull->comSums[t].sum_wmx);
1103 dvec_inc(comSumsTotal.sum_wmxp, pull->comSums[t].sum_wmxp);
1107 if (pgrp->localWeights.empty())
1109 comSumsTotal.sum_wwm = comSumsTotal.sum_wm;
1112 /* Copy local sums to a buffer for global summing */
1113 auto localSums = gmx::arrayRefFromArray(comm->comBuffer.data() + g * c_comBufferStride,
1116 localSums[0] = comSumsTotal.sum_wmx;
1117 localSums[1] = comSumsTotal.sum_wmxp;
1118 localSums[2][0] = comSumsTotal.sum_wm;
1119 localSums[2][1] = comSumsTotal.sum_wwm;
1120 localSums[2][2] = 0;
1124 pullAllReduce(cr, comm, ngroup * c_comBufferStride * DIM, static_cast<double*>(comm->comBuffer[0]));
1126 for (size_t g = 0; g < ngroup; g++)
1128 pull_group_work_t* pgrp;
1130 pgrp = &pull->group[g];
1131 if (pgrp->needToCalcCom)
1133 if (pgrp->epgrppbc == epgrppbcPREVSTEPCOM)
1135 auto localSums = gmx::arrayRefFromArray(
1136 comm->comBuffer.data() + g * c_comBufferStride, c_comBufferStride);
1137 double wmass, wwmass;
1139 /* Determine the inverse mass */
1140 wmass = localSums[2][0];
1141 wwmass = localSums[2][1];
1142 pgrp->mwscale = 1.0 / wmass;
1143 /* invtm==0 signals a frozen group, so then we should keep it zero */
1144 if (pgrp->invtm != 0)
1146 pgrp->wscale = wmass / wwmass;
1147 pgrp->invtm = wwmass / (wmass * wmass);
1149 /* Divide by the total mass */
1150 for (int m = 0; m < DIM; m++)
1152 pgrp->x[m] = localSums[0][m] * pgrp->mwscale;
1153 pgrp->x[m] += comm->pbcAtomBuffer[g][m];
1157 fprintf(debug, "Pull group %zu wmass %f invtm %f\n", g, 1.0 / pgrp->mwscale, pgrp->invtm);
1158 fprintf(debug, "Initialising prev step COM of pull group %zu to", g);
1159 for (int m = 0; m < DIM; m++)
1161 fprintf(debug, " %f", pgrp->x[m]);
1163 fprintf(debug, "\n");
1165 copy_dvec(pgrp->x, pgrp->x_prev_step);