2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
44 #include "gromacs/fileio/confio.h"
45 #include "gromacs/gmxlib/network.h"
46 #include "gromacs/math/functions.h"
47 #include "gromacs/math/utilities.h"
48 #include "gromacs/math/vec.h"
49 #include "gromacs/mdtypes/commrec.h"
50 #include "gromacs/mdtypes/md_enums.h"
51 #include "gromacs/mdtypes/mdatom.h"
52 #include "gromacs/pbcutil/pbc.h"
53 #include "gromacs/pulling/pull.h"
54 #include "gromacs/utility/fatalerror.h"
55 #include "gromacs/utility/futil.h"
56 #include "gromacs/utility/gmxassert.h"
57 #include "gromacs/utility/real.h"
58 #include "gromacs/utility/smalloc.h"
60 #include "pull_internal.h"
64 // Helper function to deduce MPI datatype from the type of data
65 gmx_unused static MPI_Datatype mpiDatatype(const float gmx_unused *data)
70 // Helper function to deduce MPI datatype from the type of data
71 gmx_unused static MPI_Datatype mpiDatatype(const double gmx_unused *data)
79 // Helper function; note that gmx_sum(d) should actually be templated
80 gmx_unused static void gmxAllReduce(int n, real *data, const t_commrec *cr)
86 // Helper function; note that gmx_sum(d) should actually be templated
87 gmx_unused static void gmxAllReduce(int n, double *data, const t_commrec *cr)
89 gmx_sumd(n, data, cr);
92 // Reduce data of n elements over all ranks currently participating in pull
94 static void pullAllReduce(const t_commrec *cr,
99 if (cr != nullptr && PAR(cr))
101 if (comm->bParticipateAll)
103 /* Sum the contributions over all DD ranks */
104 gmxAllReduce(n, data, cr);
108 /* Separate branch because gmx_sum uses cr->mpi_comm_mygroup */
110 #if MPI_IN_PLACE_EXISTS
111 MPI_Allreduce(MPI_IN_PLACE, data, n, mpiDatatype(data), MPI_SUM,
114 std::vector<T> buf(n);
116 MPI_Allreduce(data, buf, n, mpiDatatype(data), MPI_SUM,
119 /* Copy the result from the buffer to the input/output data */
120 for (int i = 0; i < n; i++)
126 gmx_incons("comm->bParticipateAll=FALSE without GMX_MPI");
132 /* Copies the coordinates of the PBC atom of pgrp to x_pbc.
133 * When those coordinates are not available on this rank, clears x_pbc.
135 static void setPbcAtomCoords(const pull_group_work_t &pgrp,
139 if (pgrp.pbcAtomSet != nullptr)
141 if (pgrp.pbcAtomSet->numAtomsLocal() > 0)
143 /* We have the atom locally, copy its coordinates */
144 copy_rvec(x[pgrp.pbcAtomSet->localIndex()[0]], x_pbc);
148 /* Another rank has it, clear the coordinates for MPI_Allreduce */
154 copy_rvec(x[pgrp.params.pbcatom], x_pbc);
158 static void pull_set_pbcatoms(const t_commrec *cr, struct pull_t *pull,
160 gmx::ArrayRef<gmx::RVec> x_pbc)
163 for (size_t g = 0; g < pull->group.size(); g++)
165 const pull_group_work_t &group = pull->group[g];
166 if (group.needToCalcCom && group.epgrppbc == epgrppbcREFAT)
168 setPbcAtomCoords(pull->group[g], x, x_pbc[g]);
173 clear_rvec(x_pbc[g]);
177 if (cr && PAR(cr) && numPbcAtoms > 0)
179 /* Sum over participating ranks to get x_pbc from the home ranks.
180 * This can be very expensive at high parallelization, so we only
181 * do this after each DD repartitioning.
183 pullAllReduce(cr, &pull->comm, pull->group.size()*DIM,
184 static_cast<real *>(x_pbc[0]));
188 static void make_cyl_refgrps(const t_commrec *cr,
195 pull_comm_t *comm = &pull->comm;
197 GMX_ASSERT(comm->cylinderBuffer.size() == pull->coord.size()*c_cylinderBufferStride, "cylinderBuffer should have the correct size");
199 double inv_cyl_r2 = 1.0/gmx::square(pull->params.cylinder_r);
201 /* loop over all groups to make a reference group for each*/
202 for (size_t c = 0; c < pull->coord.size(); c++)
204 pull_coord_work_t *pcrd;
205 double sum_a, wmass, wwmass;
206 dvec radf_fac0, radf_fac1;
208 pcrd = &pull->coord[c];
213 clear_dvec(radf_fac0);
214 clear_dvec(radf_fac1);
216 if (pcrd->params.eGeom == epullgCYL)
218 /* pref will be the same group for all pull coordinates */
219 const pull_group_work_t &pref = pull->group[pcrd->params.group[0]];
220 const pull_group_work_t &pgrp = pull->group[pcrd->params.group[1]];
221 pull_group_work_t &pdyna = pull->dyna[c];
223 copy_dvec_to_rvec(pcrd->spatialData.vec, direction);
225 /* Since we have not calculated the COM of the cylinder group yet,
226 * we calculate distances with respect to location of the pull
227 * group minus the reference position along the vector.
228 * here we already have the COM of the pull group. This resolves
229 * any PBC issues and we don't need to use a PBC-atom here.
231 if (pcrd->params.rate != 0)
233 /* With rate=0, value_ref is set initially */
234 pcrd->value_ref = pcrd->params.init + pcrd->params.rate*t;
237 for (int m = 0; m < DIM; m++)
239 reference[m] = pgrp.x[m] - pcrd->spatialData.vec[m]*pcrd->value_ref;
242 auto localAtomIndices = pref.atomSet.localIndex();
244 /* This actually only needs to be done at init or DD time,
245 * but resizing with the same size does not cause much overhead.
247 pdyna.localWeights.resize(localAtomIndices.size());
248 pdyna.mdw.resize(localAtomIndices.size());
249 pdyna.dv.resize(localAtomIndices.size());
251 /* loop over all atoms in the main ref group */
252 for (gmx::index indexInSet = 0; indexInSet < localAtomIndices.size(); indexInSet++)
254 int atomIndex = localAtomIndices[indexInSet];
256 pbc_dx_aiuc(pbc, x[atomIndex], reference, dx);
257 double axialLocation = iprod(direction, dx);
260 for (int m = 0; m < DIM; m++)
262 /* Determine the radial components */
263 radialLocation[m] = dx[m] - axialLocation*direction[m];
264 dr2 += gmx::square(radialLocation[m]);
266 double dr2_rel = dr2*inv_cyl_r2;
270 /* add atom to sum of COM and to weight array */
272 double mass = md->massT[atomIndex];
273 /* The radial weight function is 1-2x^2+x^4,
274 * where x=r/cylinder_r. Since this function depends
275 * on the radial component, we also get radial forces
278 double weight = 1 + (-2 + dr2_rel)*dr2_rel;
279 double dweight_r = (-4 + 4*dr2_rel)*inv_cyl_r2;
280 pdyna.localWeights[indexInSet] = weight;
281 sum_a += mass*weight*axialLocation;
282 wmass += mass*weight;
283 wwmass += mass*weight*weight;
285 dsvmul(mass*dweight_r, radialLocation, mdw);
286 copy_dvec(mdw, pdyna.mdw[indexInSet]);
287 /* Currently we only have the axial component of the
288 * offset from the cylinder COM up to an unkown offset.
289 * We add this offset after the reduction needed
290 * for determining the COM of the cylinder group.
292 pdyna.dv[indexInSet] = axialLocation;
293 for (int m = 0; m < DIM; m++)
295 radf_fac0[m] += mdw[m];
296 radf_fac1[m] += mdw[m]*axialLocation;
301 pdyna.localWeights[indexInSet] = 0;
306 auto buffer = gmx::arrayRefFromArray(comm->cylinderBuffer.data() + c*c_cylinderBufferStride, c_cylinderBufferStride);
312 buffer[3] = radf_fac0[XX];
313 buffer[4] = radf_fac0[YY];
314 buffer[5] = radf_fac0[ZZ];
316 buffer[6] = radf_fac1[XX];
317 buffer[7] = radf_fac1[YY];
318 buffer[8] = radf_fac1[ZZ];
321 if (cr != nullptr && PAR(cr))
323 /* Sum the contributions over the ranks */
324 pullAllReduce(cr, comm, pull->coord.size()*c_cylinderBufferStride,
325 comm->cylinderBuffer.data());
328 for (size_t c = 0; c < pull->coord.size(); c++)
330 pull_coord_work_t *pcrd;
332 pcrd = &pull->coord[c];
334 if (pcrd->params.eGeom == epullgCYL)
336 pull_group_work_t *pdyna = &pull->dyna[c];
337 pull_group_work_t *pgrp = &pull->group[pcrd->params.group[1]];
338 PullCoordSpatialData &spatialData = pcrd->spatialData;
340 auto buffer = gmx::constArrayRefFromArray(comm->cylinderBuffer.data() + c*c_cylinderBufferStride, c_cylinderBufferStride);
341 double wmass = buffer[0];
342 double wwmass = buffer[1];
343 pdyna->mwscale = 1.0/wmass;
344 /* Cylinder pulling can't be used with constraints, but we set
345 * wscale and invtm anyhow, in case someone would like to use them.
347 pdyna->wscale = wmass/wwmass;
348 pdyna->invtm = wwmass/(wmass*wmass);
350 /* We store the deviation of the COM from the reference location
351 * used above, since we need it when we apply the radial forces
352 * to the atoms in the cylinder group.
354 spatialData.cyl_dev = 0;
355 for (int m = 0; m < DIM; m++)
357 double reference = pgrp->x[m] - spatialData.vec[m]*pcrd->value_ref;
358 double dist = -spatialData.vec[m]*buffer[2]*pdyna->mwscale;
359 pdyna->x[m] = reference - dist;
360 spatialData.cyl_dev += dist;
362 /* Now we know the exact COM of the cylinder reference group,
363 * we can determine the radial force factor (ffrad) that when
364 * multiplied with the axial pull force will give the radial
365 * force on the pulled (non-cylinder) group.
367 for (int m = 0; m < DIM; m++)
369 spatialData.ffrad[m] = (buffer[6 + m] +
370 buffer[3 + m]*spatialData.cyl_dev)/wmass;
375 fprintf(debug, "Pull cylinder group %zu:%8.3f%8.3f%8.3f m:%8.3f\n",
376 c, pdyna->x[0], pdyna->x[1],
377 pdyna->x[2], 1.0/pdyna->invtm);
378 fprintf(debug, "ffrad %8.3f %8.3f %8.3f\n",
379 spatialData.ffrad[XX], spatialData.ffrad[YY], spatialData.ffrad[ZZ]);
385 static double atan2_0_2pi(double y, double x)
397 static void sum_com_part(const pull_group_work_t *pgrp,
398 int ind_start, int ind_end,
399 const rvec *x, const rvec *xp,
407 dvec sum_wmx = { 0, 0, 0 };
408 dvec sum_wmxp = { 0, 0, 0 };
410 auto localAtomIndices = pgrp->atomSet.localIndex();
411 for (int i = ind_start; i < ind_end; i++)
413 int ii = localAtomIndices[i];
415 if (pgrp->localWeights.empty())
424 w = pgrp->localWeights[i];
429 if (pgrp->epgrppbc == epgrppbcNONE)
431 /* Plain COM: sum the coordinates */
432 for (int d = 0; d < DIM; d++)
434 sum_wmx[d] += wm*x[ii][d];
438 for (int d = 0; d < DIM; d++)
440 sum_wmxp[d] += wm*xp[ii][d];
448 /* Sum the difference with the reference atom */
449 pbc_dx(pbc, x[ii], x_pbc, dx);
450 for (int d = 0; d < DIM; d++)
452 sum_wmx[d] += wm*dx[d];
456 /* For xp add the difference between xp and x to dx,
457 * such that we use the same periodic image,
458 * also when xp has a large displacement.
460 for (int d = 0; d < DIM; d++)
462 sum_wmxp[d] += wm*(dx[d] + xp[ii][d] - x[ii][d]);
468 sum_com->sum_wm = sum_wm;
469 sum_com->sum_wwm = sum_wwm;
470 copy_dvec(sum_wmx, sum_com->sum_wmx);
473 copy_dvec(sum_wmxp, sum_com->sum_wmxp);
477 static void sum_com_part_cosweight(const pull_group_work_t *pgrp,
478 int ind_start, int ind_end,
479 int cosdim, real twopi_box,
480 const rvec *x, const rvec *xp,
484 /* Cosine weighting geometry */
493 auto localAtomIndices = pgrp->atomSet.localIndex();
495 for (int i = ind_start; i < ind_end; i++)
497 int ii = localAtomIndices[i];
499 /* Determine cos and sin sums */
500 real cw = std::cos(x[ii][cosdim]*twopi_box);
501 real sw = std::sin(x[ii][cosdim]*twopi_box);
502 sum_cm += static_cast<double>(cw*m);
503 sum_sm += static_cast<double>(sw*m);
504 sum_ccm += static_cast<double>(cw*cw*m);
505 sum_csm += static_cast<double>(cw*sw*m);
506 sum_ssm += static_cast<double>(sw*sw*m);
510 real cw = std::cos(xp[ii][cosdim]*twopi_box);
511 real sw = std::sin(xp[ii][cosdim]*twopi_box);
512 sum_cmp += static_cast<double>(cw*m);
513 sum_smp += static_cast<double>(sw*m);
517 sum_com->sum_cm = sum_cm;
518 sum_com->sum_sm = sum_sm;
519 sum_com->sum_ccm = sum_ccm;
520 sum_com->sum_csm = sum_csm;
521 sum_com->sum_ssm = sum_ssm;
522 sum_com->sum_cmp = sum_cmp;
523 sum_com->sum_smp = sum_smp;
526 /* calculates center of mass of selection index from all coordinates x */
527 void pull_calc_coms(const t_commrec *cr,
532 const rvec x[], rvec *xp)
539 GMX_ASSERT(comm->pbcAtomBuffer.size() == pull->group.size(), "pbcAtomBuffer should have size number of groups");
540 GMX_ASSERT(comm->comBuffer.size() == pull->group.size()*DIM, "comBuffer should have size #group*DIM");
542 if (pull->bRefAt && pull->bSetPBCatoms)
544 pull_set_pbcatoms(cr, pull, x, comm->pbcAtomBuffer);
546 if (cr != nullptr && DOMAINDECOMP(cr))
548 /* We can keep these PBC reference coordinates fixed for nstlist
549 * steps, since atoms won't jump over PBC.
550 * This avoids a global reduction at the next nstlist-1 steps.
551 * Note that the exact values of the pbc reference coordinates
552 * are irrelevant, as long all atoms in the group are within
553 * half a box distance of the reference coordinate.
555 pull->bSetPBCatoms = FALSE;
559 if (pull->cosdim >= 0)
563 assert(pull->npbcdim <= DIM);
565 for (m = pull->cosdim+1; m < pull->npbcdim; m++)
567 if (pbc->box[m][pull->cosdim] != 0)
569 gmx_fatal(FARGS, "Can not do cosine weighting for trilinic dimensions");
572 twopi_box = 2.0*M_PI/pbc->box[pull->cosdim][pull->cosdim];
575 for (size_t g = 0; g < pull->group.size(); g++)
577 pull_group_work_t *pgrp;
579 pgrp = &pull->group[g];
581 if (pgrp->needToCalcCom)
583 if (pgrp->epgrppbc != epgrppbcCOS)
585 rvec x_pbc = { 0, 0, 0 };
587 if (pgrp->epgrppbc == epgrppbcREFAT)
589 /* Set the pbc atom */
590 copy_rvec(comm->pbcAtomBuffer[g], x_pbc);
593 /* The final sums should end up in comSums[0] */
594 ComSums &comSumsTotal = pull->comSums[0];
596 /* If we have a single-atom group the mass is irrelevant, so
597 * we can remove the mass factor to avoid division by zero.
598 * Note that with constraint pulling the mass does matter, but
599 * in that case a check group mass != 0 has been done before.
601 if (pgrp->params.nat == 1 &&
602 pgrp->atomSet.numAtomsLocal() == 1 &&
603 md->massT[pgrp->atomSet.localIndex()[0]] == 0)
605 GMX_ASSERT(xp == nullptr, "We should not have groups with zero mass with constraints, i.e. xp!=NULL");
607 /* Copy the single atom coordinate */
608 for (int d = 0; d < DIM; d++)
610 comSumsTotal.sum_wmx[d] = x[pgrp->atomSet.localIndex()[0]][d];
612 /* Set all mass factors to 1 to get the correct COM */
613 comSumsTotal.sum_wm = 1;
614 comSumsTotal.sum_wwm = 1;
616 else if (pgrp->atomSet.numAtomsLocal() <= c_pullMaxNumLocalAtomsSingleThreaded)
618 sum_com_part(pgrp, 0, pgrp->atomSet.numAtomsLocal(),
625 #pragma omp parallel for num_threads(pull->nthreads) schedule(static)
626 for (int t = 0; t < pull->nthreads; t++)
628 int ind_start = (pgrp->atomSet.numAtomsLocal()*(t + 0))/pull->nthreads;
629 int ind_end = (pgrp->atomSet.numAtomsLocal()*(t + 1))/pull->nthreads;
630 sum_com_part(pgrp, ind_start, ind_end,
636 /* Reduce the thread contributions to sum_com[0] */
637 for (int t = 1; t < pull->nthreads; t++)
639 comSumsTotal.sum_wm += pull->comSums[t].sum_wm;
640 comSumsTotal.sum_wwm += pull->comSums[t].sum_wwm;
641 dvec_inc(comSumsTotal.sum_wmx, pull->comSums[t].sum_wmx);
642 dvec_inc(comSumsTotal.sum_wmxp, pull->comSums[t].sum_wmxp);
646 if (pgrp->localWeights.empty())
648 comSumsTotal.sum_wwm = comSumsTotal.sum_wm;
651 /* Copy local sums to a buffer for global summing */
652 auto buffer = gmx::arrayRefFromArray(comm->comBuffer.data() + g*DIM, DIM);
654 copy_dvec(comSumsTotal.sum_wmx, buffer[0]);
656 copy_dvec(comSumsTotal.sum_wmxp, buffer[1]);
658 buffer[2][0] = comSumsTotal.sum_wm;
659 buffer[2][1] = comSumsTotal.sum_wwm;
664 /* Cosine weighting geometry.
665 * This uses a slab of the system, thus we always have many
666 * atoms in the pull groups. Therefore, always use threads.
668 #pragma omp parallel for num_threads(pull->nthreads) schedule(static)
669 for (int t = 0; t < pull->nthreads; t++)
671 int ind_start = (pgrp->atomSet.numAtomsLocal()*(t + 0))/pull->nthreads;
672 int ind_end = (pgrp->atomSet.numAtomsLocal()*(t + 1))/pull->nthreads;
673 sum_com_part_cosweight(pgrp, ind_start, ind_end,
674 pull->cosdim, twopi_box,
679 /* Reduce the thread contributions to comSums[0] */
680 ComSums &comSumsTotal = pull->comSums[0];
681 for (int t = 1; t < pull->nthreads; t++)
683 comSumsTotal.sum_cm += pull->comSums[t].sum_cm;
684 comSumsTotal.sum_sm += pull->comSums[t].sum_sm;
685 comSumsTotal.sum_ccm += pull->comSums[t].sum_ccm;
686 comSumsTotal.sum_csm += pull->comSums[t].sum_csm;
687 comSumsTotal.sum_ssm += pull->comSums[t].sum_ssm;
688 comSumsTotal.sum_cmp += pull->comSums[t].sum_cmp;
689 comSumsTotal.sum_smp += pull->comSums[t].sum_smp;
692 /* Copy local sums to a buffer for global summing */
693 auto buffer = gmx::arrayRefFromArray(comm->comBuffer.data() + g*DIM, DIM);
695 buffer[0][0] = comSumsTotal.sum_cm;
696 buffer[0][1] = comSumsTotal.sum_sm;
698 buffer[1][0] = comSumsTotal.sum_ccm;
699 buffer[1][1] = comSumsTotal.sum_csm;
700 buffer[1][2] = comSumsTotal.sum_ssm;
701 buffer[2][0] = comSumsTotal.sum_cmp;
702 buffer[2][1] = comSumsTotal.sum_smp;
708 pullAllReduce(cr, comm, pull->group.size()*3*DIM,
709 static_cast<double *>(comm->comBuffer.data()[0]));
711 for (size_t g = 0; g < pull->group.size(); g++)
713 pull_group_work_t *pgrp;
715 pgrp = &pull->group[g];
716 if (pgrp->needToCalcCom)
718 GMX_ASSERT(pgrp->params.nat > 0, "Normal pull groups should have atoms, only group 0, which should have bCalcCom=FALSE has nat=0");
720 auto dvecBuffer = gmx::arrayRefFromArray(comm->comBuffer.data() + g*DIM, DIM);
722 if (pgrp->epgrppbc != epgrppbcCOS)
724 double wmass, wwmass;
727 /* Determine the inverse mass */
728 wmass = dvecBuffer[2][0];
729 wwmass = dvecBuffer[2][1];
730 pgrp->mwscale = 1.0/wmass;
731 /* invtm==0 signals a frozen group, so then we should keep it zero */
732 if (pgrp->invtm != 0)
734 pgrp->wscale = wmass/wwmass;
735 pgrp->invtm = wwmass/(wmass*wmass);
737 /* Divide by the total mass */
738 for (m = 0; m < DIM; m++)
740 pgrp->x[m] = dvecBuffer[0][m]*pgrp->mwscale;
743 pgrp->xp[m] = dvecBuffer[1][m]*pgrp->mwscale;
745 if (pgrp->epgrppbc == epgrppbcREFAT)
747 pgrp->x[m] += comm->pbcAtomBuffer[g][m];
750 pgrp->xp[m] += comm->pbcAtomBuffer[g][m];
757 /* Cosine weighting geometry */
758 double csw, snw, wmass, wwmass;
760 /* Determine the optimal location of the cosine weight */
761 csw = dvecBuffer[0][0];
762 snw = dvecBuffer[0][1];
763 pgrp->x[pull->cosdim] = atan2_0_2pi(snw, csw)/twopi_box;
764 /* Set the weights for the local atoms */
765 wmass = sqrt(csw*csw + snw*snw);
766 wwmass = (dvecBuffer[1][0]*csw*csw +
767 dvecBuffer[1][1]*csw*snw +
768 dvecBuffer[1][2]*snw*snw)/(wmass*wmass);
770 pgrp->mwscale = 1.0/wmass;
771 pgrp->wscale = wmass/wwmass;
772 pgrp->invtm = wwmass/(wmass*wmass);
773 /* Set the weights for the local atoms */
776 for (size_t i = 0; i < pgrp->atomSet.numAtomsLocal(); i++)
778 int ii = pgrp->atomSet.localIndex()[i];
779 pgrp->localWeights[i] = csw*std::cos(twopi_box*x[ii][pull->cosdim]) +
780 snw*std::sin(twopi_box*x[ii][pull->cosdim]);
784 csw = dvecBuffer[2][0];
785 snw = dvecBuffer[2][1];
786 pgrp->xp[pull->cosdim] = atan2_0_2pi(snw, csw)/twopi_box;
791 fprintf(debug, "Pull group %zu wmass %f invtm %f\n",
792 g, 1.0/pgrp->mwscale, pgrp->invtm);
799 /* Calculate the COMs for the cyclinder reference groups */
800 make_cyl_refgrps(cr, pull, md, pbc, t, x);
804 using BoolVec = gmx::BasicVector<bool>;
806 /* Returns whether the pull group obeys the PBC restrictions */
807 static bool pullGroupObeysPbcRestrictions(const pull_group_work_t &group,
808 const BoolVec &dimUsed,
811 const gmx::RVec &x_pbc)
813 /* Determine which dimensions are relevant for PBC */
814 BoolVec dimUsesPbc = { false, false, false };
815 bool pbcIsRectangular = true;
816 for (int d = 0; d < pbc.ndim_ePBC; d++)
820 dimUsesPbc[d] = true;
821 /* All non-zero dimensions of vector v are involved in PBC */
822 for (int d2 = d + 1; d2 < pbc.ndim_ePBC; d2++)
825 if (pbc.box[d2][d] != 0)
827 dimUsesPbc[d2] = true;
828 pbcIsRectangular = false;
834 rvec marginPerDim = {};
835 real marginDistance2 = 0;
836 if (pbcIsRectangular)
838 /* Use margins for dimensions independently */
839 for (int d = 0; d < pbc.ndim_ePBC; d++)
841 marginPerDim[d] = c_pullGroupPbcMargin*pbc.hbox_diag[d];
846 /* Check the total distance along the relevant dimensions */
847 for (int d = 0; d < pbc.ndim_ePBC; d++)
851 marginDistance2 += c_pullGroupPbcMargin*gmx::square(0.5)*norm2(pbc.box[d]);
856 auto localAtomIndices = group.atomSet.localIndex();
857 for (gmx::index indexInSet = 0; indexInSet < localAtomIndices.size(); indexInSet++)
860 pbc_dx(&pbc, x[indexInSet], x_pbc, dx);
862 bool atomIsTooFar = false;
863 if (pbcIsRectangular)
865 for (int d = 0; d < pbc.ndim_ePBC; d++)
867 if (dimUsesPbc[d] && (dx[d] < -marginPerDim[d] ||
868 dx[d] > marginPerDim[d]))
876 real pbcDistance2 = 0;
877 for (int d = 0; d < pbc.ndim_ePBC; d++)
881 pbcDistance2 += gmx::square(dx[d]);
884 atomIsTooFar = (pbcDistance2 > marginDistance2);
895 int pullCheckPbcWithinGroups(const pull_t &pull,
899 if (pbc.ePBC == epbcNONE)
904 /* Determine what dimensions are used for each group by pull coordinates */
905 std::vector<BoolVec> dimUsed(pull.group.size(), { false, false, false });
906 for (size_t c = 0; c < pull.coord.size(); c++)
908 const t_pull_coord &coordParams = pull.coord[c].params;
909 for (int groupIndex = 0; groupIndex < coordParams.ngroup; groupIndex++)
911 for (int d = 0; d < DIM; d++)
913 if (coordParams.dim[d] &&
914 !(coordParams.eGeom == epullgCYL && groupIndex == 0))
916 dimUsed[coordParams.group[groupIndex]][d] = true;
922 /* Check PBC for every group that uses a PBC reference atom treatment */
923 for (size_t g = 0; g < pull.group.size(); g++)
925 const pull_group_work_t &group = pull.group[g];
926 if (group.epgrppbc == epgrppbcREFAT &&
927 !pullGroupObeysPbcRestrictions(group, dimUsed[g], x, pbc, pull.comm.pbcAtomBuffer[g]))