2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /* This file is completely threadsafe - keep it that way! */
51 #include "gromacs/domdec/domdec.h"
52 #include "gromacs/domdec/domdec_struct.h"
53 #include "gromacs/gmxlib/nrnb.h"
54 #include "gromacs/math/functions.h"
55 #include "gromacs/math/units.h"
56 #include "gromacs/math/vec.h"
57 #include "gromacs/mdlib/constr.h"
58 #include "gromacs/mdlib/gmx_omp_nthreads.h"
59 #include "gromacs/mdlib/mdrun.h"
60 #include "gromacs/mdtypes/commrec.h"
61 #include "gromacs/mdtypes/inputrec.h"
62 #include "gromacs/mdtypes/md_enums.h"
63 #include "gromacs/mdtypes/mdatom.h"
64 #include "gromacs/pbcutil/pbc.h"
65 #include "gromacs/pbcutil/pbc-simd.h"
66 #include "gromacs/simd/simd.h"
67 #include "gromacs/simd/simd_math.h"
68 #include "gromacs/simd/vector_operations.h"
69 #include "gromacs/topology/block.h"
70 #include "gromacs/topology/mtop_util.h"
71 #include "gromacs/utility/basedefinitions.h"
72 #include "gromacs/utility/bitmask.h"
73 #include "gromacs/utility/cstringutil.h"
74 #include "gromacs/utility/exceptions.h"
75 #include "gromacs/utility/fatalerror.h"
76 #include "gromacs/utility/gmxomp.h"
77 #include "gromacs/utility/pleasecite.h"
78 #include "gromacs/utility/smalloc.h"
80 using namespace gmx; // TODO: Remove when this file is moved into gmx namespace
83 int b0; /* first constraint for this task */
84 int b1; /* b1-1 is the last constraint for this task */
85 int ntriangle; /* the number of constraints in triangles */
86 int *triangle; /* the list of triangle constraints */
87 int *tri_bits; /* the bits tell if the matrix element should be used */
88 int tri_alloc; /* allocation size of triangle and tri_bits */
89 int nind; /* number of indices */
90 int *ind; /* constraint index for updating atom data */
91 int nind_r; /* number of indices */
92 int *ind_r; /* constraint index for updating atom data */
93 int ind_nalloc; /* allocation size of ind and ind_r */
94 tensor vir_r_m_dr; /* temporary variable for virial calculation */
95 real dhdlambda; /* temporary variable for lambda derivative */
98 typedef struct gmx_lincsdata {
99 int ncg; /* the global number of constraints */
100 int ncg_flex; /* the global number of flexible constraints */
101 int ncg_triangle; /* the global number of constraints in triangles */
102 int nIter; /* the number of iterations */
103 int nOrder; /* the order of the matrix expansion */
104 int max_connect; /* the maximum number of constrains connected to a single atom */
106 int nc_real; /* the number of real constraints */
107 int nc; /* the number of constraints including padding for SIMD */
108 int nc_alloc; /* the number we allocated memory for */
109 int ncc; /* the number of constraint connections */
110 int ncc_alloc; /* the number we allocated memory for */
111 real matlam; /* the FE lambda value used for filling blc and blmf */
112 int *con_index; /* mapping from topology to LINCS constraints */
113 real *bllen0; /* the reference distance in topology A */
114 real *ddist; /* the reference distance in top B - the r.d. in top A */
115 int *bla; /* the atom pairs involved in the constraints */
116 real *blc; /* 1/sqrt(invmass1 + invmass2) */
117 real *blc1; /* as blc, but with all masses 1 */
118 int *blnr; /* index into blbnb and blmf */
119 int *blbnb; /* list of constraint connections */
120 int ntriangle; /* the local number of constraints in triangles */
121 int ncc_triangle; /* the number of constraint connections in triangles */
122 gmx_bool bCommIter; /* communicate before each LINCS interation */
123 real *blmf; /* matrix of mass factors for constraint connections */
124 real *blmf1; /* as blmf, but with all masses 1 */
125 real *bllen; /* the reference bond length */
126 int *nlocat; /* the local atom count per constraint, can be NULL */
128 int ntask; /* The number of tasks = #threads for LINCS */
129 lincs_task_t *task; /* LINCS thread division */
130 gmx_bitmask_t *atf; /* atom flags for thread parallelization */
131 int atf_nalloc; /* allocation size of atf */
132 gmx_bool bTaskDep; /* are the LINCS tasks interdependent? */
133 gmx_bool bTaskDepTri; /* are there triangle constraints that cross task borders? */
134 /* arrays for temporary storage in the LINCS algorithm */
141 real *mlambda; /* the Lagrange multipliers * -1 */
142 /* storage for the constraint RMS relative deviation output */
146 /* Define simd_width for memory allocation used for SIMD code */
147 #if GMX_SIMD_HAVE_REAL
148 static const int simd_width = GMX_SIMD_REAL_WIDTH;
150 static const int simd_width = 1;
153 /* Align to 128 bytes, consistent with the current implementation of
154 AlignedAllocator, which currently forces 128 byte alignment. */
155 static const int align_bytes = 128;
157 real *lincs_rmsd_data(struct gmx_lincsdata *lincsd)
159 return lincsd->rmsd_data;
162 real lincs_rmsd(struct gmx_lincsdata *lincsd)
164 if (lincsd->rmsd_data[0] > 0)
166 return std::sqrt(lincsd->rmsd_data[1]/lincsd->rmsd_data[0]);
174 /* Do a set of nrec LINCS matrix multiplications.
175 * This function will return with up to date thread-local
176 * constraint data, without an OpenMP barrier.
178 static void lincs_matrix_expand(const gmx_lincsdata *lincsd,
179 const lincs_task_t *li_task,
181 real *rhs1, real *rhs2, real *sol)
183 int b0, b1, nrec, rec;
184 const int *blnr = lincsd->blnr;
185 const int *blbnb = lincsd->blbnb;
189 nrec = lincsd->nOrder;
191 for (rec = 0; rec < nrec; rec++)
195 if (lincsd->bTaskDep)
199 for (b = b0; b < b1; b++)
205 for (n = blnr[b]; n < blnr[b+1]; n++)
207 mvb = mvb + blcc[n]*rhs1[blbnb[n]];
210 sol[b] = sol[b] + mvb;
218 } /* nrec*(ncons+2*nrtot) flops */
220 if (lincsd->ntriangle > 0)
222 /* Perform an extra nrec recursions for only the constraints
223 * involved in rigid triangles.
224 * In this way their accuracy should come close to those of the other
225 * constraints, since traingles of constraints can produce eigenvalues
226 * around 0.7, while the effective eigenvalue for bond constraints
227 * is around 0.4 (and 0.7*0.7=0.5).
230 if (lincsd->bTaskDep)
232 /* We need a barrier here, since other threads might still be
233 * reading the contents of rhs1 and/o rhs2.
234 * We could avoid this barrier by introducing two extra rhs
235 * arrays for the triangle constraints only.
240 /* Constraints involved in a triangle are ensured to be in the same
241 * LINCS task. This means no barriers are required during the extra
242 * iterations for the triangle constraints.
244 const int *triangle = li_task->triangle;
245 const int *tri_bits = li_task->tri_bits;
247 for (rec = 0; rec < nrec; rec++)
251 for (tb = 0; tb < li_task->ntriangle; tb++)
253 int b, bits, nr0, nr1, n;
261 for (n = nr0; n < nr1; n++)
263 if (bits & (1 << (n - nr0)))
265 mvb = mvb + blcc[n]*rhs1[blbnb[n]];
269 sol[b] = sol[b] + mvb;
277 } /* nrec*(ntriangle + ncc_triangle*2) flops */
279 if (lincsd->bTaskDepTri)
281 /* The constraints triangles are decoupled from each other,
282 * but constraints in one triangle cross thread task borders.
283 * We could probably avoid this with more advanced setup code.
290 static void lincs_update_atoms_noind(int ncons, const int *bla,
292 const real *fac, rvec *r,
297 real mvb, im1, im2, tmp0, tmp1, tmp2;
299 if (invmass != nullptr)
301 for (b = 0; b < ncons; b++)
317 } /* 16 ncons flops */
321 for (b = 0; b < ncons; b++)
339 static void lincs_update_atoms_ind(int ncons, const int *ind, const int *bla,
341 const real *fac, rvec *r,
346 real mvb, im1, im2, tmp0, tmp1, tmp2;
348 if (invmass != nullptr)
350 for (bi = 0; bi < ncons; bi++)
367 } /* 16 ncons flops */
371 for (bi = 0; bi < ncons; bi++)
386 } /* 16 ncons flops */
390 static void lincs_update_atoms(gmx_lincsdata *li, int th,
392 const real *fac, rvec *r,
398 /* Single thread, we simply update for all constraints */
399 lincs_update_atoms_noind(li->nc_real,
400 li->bla, prefac, fac, r, invmass, x);
404 /* Update the atom vector components for our thread local
405 * constraints that only access our local atom range.
406 * This can be done without a barrier.
408 lincs_update_atoms_ind(li->task[th].nind, li->task[th].ind,
409 li->bla, prefac, fac, r, invmass, x);
411 if (li->task[li->ntask].nind > 0)
413 /* Update the constraints that operate on atoms
414 * in multiple thread atom blocks on the master thread.
419 lincs_update_atoms_ind(li->task[li->ntask].nind,
420 li->task[li->ntask].ind,
421 li->bla, prefac, fac, r, invmass, x);
427 #if GMX_SIMD_HAVE_REAL
428 /* Calculate the constraint distance vectors r to project on from x.
429 * Determine the right-hand side of the matrix equation using quantity f.
430 * This function only differs from calc_dr_x_xp_simd below in that
431 * no constraint length is subtracted and no PBC is used for f.
433 static void gmx_simdcall
434 calc_dr_x_f_simd(int b0,
437 const rvec * gmx_restrict x,
438 const rvec * gmx_restrict f,
439 const real * gmx_restrict blc,
440 const real * pbc_simd,
441 rvec * gmx_restrict r,
442 real * gmx_restrict rhs,
443 real * gmx_restrict sol)
445 assert(b0 % GMX_SIMD_REAL_WIDTH == 0);
447 alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset2[GMX_SIMD_REAL_WIDTH];
449 for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
454 for (int bs = b0; bs < b1; bs += GMX_SIMD_REAL_WIDTH)
456 SimdReal x0_S, y0_S, z0_S;
457 SimdReal x1_S, y1_S, z1_S;
458 SimdReal rx_S, ry_S, rz_S, n2_S, il_S;
459 SimdReal fx_S, fy_S, fz_S, ip_S, rhs_S;
460 alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset0[GMX_SIMD_REAL_WIDTH];
461 alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset1[GMX_SIMD_REAL_WIDTH];
463 for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
465 offset0[i] = bla[bs*2 + i*2];
466 offset1[i] = bla[bs*2 + i*2 + 1];
469 gatherLoadUTranspose<3>(reinterpret_cast<const real *>(x), offset0, &x0_S, &y0_S, &z0_S);
470 gatherLoadUTranspose<3>(reinterpret_cast<const real *>(x), offset1, &x1_S, &y1_S, &z1_S);
475 pbc_correct_dx_simd(&rx_S, &ry_S, &rz_S, pbc_simd);
477 n2_S = norm2(rx_S, ry_S, rz_S);
478 il_S = invsqrt(n2_S);
484 transposeScatterStoreU<3>(reinterpret_cast<real *>(r + bs), offset2, rx_S, ry_S, rz_S);
486 gatherLoadUTranspose<3>(reinterpret_cast<const real *>(f), offset0, &x0_S, &y0_S, &z0_S);
487 gatherLoadUTranspose<3>(reinterpret_cast<const real *>(f), offset1, &x1_S, &y1_S, &z1_S);
492 ip_S = iprod(rx_S, ry_S, rz_S, fx_S, fy_S, fz_S);
494 rhs_S = load<SimdReal>(blc + bs) * ip_S;
496 store(rhs + bs, rhs_S);
497 store(sol + bs, rhs_S);
500 #endif // GMX_SIMD_HAVE_REAL
502 /* LINCS projection, works on derivatives of the coordinates */
503 static void do_lincsp(rvec *x, rvec *f, rvec *fp, t_pbc *pbc,
504 gmx_lincsdata *lincsd, int th,
506 int econq, gmx_bool bCalcDHDL,
507 gmx_bool bCalcVir, tensor rmdf)
510 int *bla, *blnr, *blbnb;
512 real *blc, *blmf, *blcc, *rhs1, *rhs2, *sol;
514 b0 = lincsd->task[th].b0;
515 b1 = lincsd->task[th].b1;
520 blbnb = lincsd->blbnb;
521 if (econq != econqForce)
523 /* Use mass-weighted parameters */
529 /* Use non mass-weighted parameters */
531 blmf = lincsd->blmf1;
533 blcc = lincsd->tmpncc;
538 #if GMX_SIMD_HAVE_REAL
539 /* This SIMD code does the same as the plain-C code after the #else.
540 * The only difference is that we always call pbc code, as with SIMD
541 * the overhead of pbc computation (when not needed) is small.
543 alignas(GMX_SIMD_ALIGNMENT) real pbc_simd[9*GMX_SIMD_REAL_WIDTH];
545 /* Convert the pbc struct for SIMD */
546 set_pbc_simd(pbc, pbc_simd);
548 /* Compute normalized x i-j vectors, store in r.
549 * Compute the inner product of r and xp i-j and store in rhs1.
551 calc_dr_x_f_simd(b0, b1, bla, x, f, blc,
555 #else // GMX_SIMD_HAVE_REAL
557 /* Compute normalized i-j vectors */
560 for (b = b0; b < b1; b++)
564 pbc_dx_aiuc(pbc, x[bla[2*b]], x[bla[2*b+1]], dx);
570 for (b = b0; b < b1; b++)
574 rvec_sub(x[bla[2*b]], x[bla[2*b+1]], dx);
576 } /* 16 ncons flops */
579 for (b = b0; b < b1; b++)
586 mvb = blc[b]*(r[b][0]*(f[i][0] - f[j][0]) +
587 r[b][1]*(f[i][1] - f[j][1]) +
588 r[b][2]*(f[i][2] - f[j][2]));
594 #endif // GMX_SIMD_HAVE_REAL
596 if (lincsd->bTaskDep)
598 /* We need a barrier, since the matrix construction below
599 * can access entries in r of other threads.
604 /* Construct the (sparse) LINCS matrix */
605 for (b = b0; b < b1; b++)
609 for (n = blnr[b]; n < blnr[b+1]; n++)
611 blcc[n] = blmf[n]*iprod(r[b], r[blbnb[n]]);
614 /* Together: 23*ncons + 6*nrtot flops */
616 lincs_matrix_expand(lincsd, &lincsd->task[th], blcc, rhs1, rhs2, sol);
617 /* nrec*(ncons+2*nrtot) flops */
619 if (econq == econqDeriv_FlexCon)
621 /* We only want to constraint the flexible constraints,
622 * so we mask out the normal ones by setting sol to 0.
624 for (b = b0; b < b1; b++)
626 if (!(lincsd->bllen0[b] == 0 && lincsd->ddist[b] == 0))
633 /* We multiply sol by blc, so we can use lincs_update_atoms for OpenMP */
634 for (b = b0; b < b1; b++)
639 /* When constraining forces, we should not use mass weighting,
640 * so we pass invmass=NULL, which results in the use of 1 for all atoms.
642 lincs_update_atoms(lincsd, th, 1.0, sol, r,
643 (econq != econqForce) ? invmass : nullptr, fp);
650 for (b = b0; b < b1; b++)
652 dhdlambda -= sol[b]*lincsd->ddist[b];
655 lincsd->task[th].dhdlambda = dhdlambda;
660 /* Constraint virial,
661 * determines sum r_bond x delta f,
662 * where delta f is the constraint correction
663 * of the quantity that is being constrained.
665 for (b = b0; b < b1; b++)
670 mvb = lincsd->bllen[b]*sol[b];
671 for (i = 0; i < DIM; i++)
674 for (j = 0; j < DIM; j++)
676 rmdf[i][j] += tmp1*r[b][j];
679 } /* 23 ncons flops */
683 #if GMX_SIMD_HAVE_REAL
684 /* Calculate the constraint distance vectors r to project on from x.
685 * Determine the right-hand side of the matrix equation using coordinates xp.
687 static void gmx_simdcall
688 calc_dr_x_xp_simd(int b0,
691 const rvec * gmx_restrict x,
692 const rvec * gmx_restrict xp,
693 const real * gmx_restrict bllen,
694 const real * gmx_restrict blc,
695 const real * pbc_simd,
696 rvec * gmx_restrict r,
697 real * gmx_restrict rhs,
698 real * gmx_restrict sol)
700 assert(b0 % GMX_SIMD_REAL_WIDTH == 0);
701 alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset2[GMX_SIMD_REAL_WIDTH];
703 for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
708 for (int bs = b0; bs < b1; bs += GMX_SIMD_REAL_WIDTH)
710 SimdReal x0_S, y0_S, z0_S;
711 SimdReal x1_S, y1_S, z1_S;
712 SimdReal rx_S, ry_S, rz_S, n2_S, il_S;
713 SimdReal rxp_S, ryp_S, rzp_S, ip_S, rhs_S;
714 alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset0[GMX_SIMD_REAL_WIDTH];
715 alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset1[GMX_SIMD_REAL_WIDTH];
717 for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
719 offset0[i] = bla[bs*2 + i*2];
720 offset1[i] = bla[bs*2 + i*2 + 1];
723 gatherLoadUTranspose<3>(reinterpret_cast<const real *>(x), offset0, &x0_S, &y0_S, &z0_S);
724 gatherLoadUTranspose<3>(reinterpret_cast<const real *>(x), offset1, &x1_S, &y1_S, &z1_S);
729 pbc_correct_dx_simd(&rx_S, &ry_S, &rz_S, pbc_simd);
731 n2_S = norm2(rx_S, ry_S, rz_S);
732 il_S = invsqrt(n2_S);
738 transposeScatterStoreU<3>(reinterpret_cast<real *>(r + bs), offset2, rx_S, ry_S, rz_S);
740 gatherLoadUTranspose<3>(reinterpret_cast<const real *>(xp), offset0, &x0_S, &y0_S, &z0_S);
741 gatherLoadUTranspose<3>(reinterpret_cast<const real *>(xp), offset1, &x1_S, &y1_S, &z1_S);
746 pbc_correct_dx_simd(&rxp_S, &ryp_S, &rzp_S, pbc_simd);
748 ip_S = iprod(rx_S, ry_S, rz_S, rxp_S, ryp_S, rzp_S);
750 rhs_S = load<SimdReal>(blc + bs) * (ip_S - load<SimdReal>(bllen + bs));
752 store(rhs + bs, rhs_S);
753 store(sol + bs, rhs_S);
756 #endif // GMX_SIMD_HAVE_REAL
758 /* Determine the distances and right-hand side for the next iteration */
759 gmx_unused static void calc_dist_iter(
763 const rvec * gmx_restrict xp,
764 const real * gmx_restrict bllen,
765 const real * gmx_restrict blc,
768 real * gmx_restrict rhs,
769 real * gmx_restrict sol,
774 for (b = b0; b < b1; b++)
776 real len, len2, dlen2, mvb;
782 pbc_dx_aiuc(pbc, xp[bla[2*b]], xp[bla[2*b+1]], dx);
786 rvec_sub(xp[bla[2*b]], xp[bla[2*b+1]], dx);
789 dlen2 = 2*len2 - norm2(dx);
790 if (dlen2 < wfac*len2)
792 /* not race free - see detailed comment in caller */
797 mvb = blc[b]*(len - dlen2*gmx::invsqrt(dlen2));
805 } /* 20*ncons flops */
808 #if GMX_SIMD_HAVE_REAL
809 /* As the function above, but using SIMD intrinsics */
810 static void gmx_simdcall
811 calc_dist_iter_simd(int b0,
814 const rvec * gmx_restrict x,
815 const real * gmx_restrict bllen,
816 const real * gmx_restrict blc,
817 const real * pbc_simd,
819 real * gmx_restrict rhs,
820 real * gmx_restrict sol,
823 SimdReal min_S(GMX_REAL_MIN);
825 SimdReal wfac_S(wfac);
830 assert(b0 % GMX_SIMD_REAL_WIDTH == 0);
832 /* Initialize all to FALSE */
833 warn_S = (two_S < setZero());
835 for (bs = b0; bs < b1; bs += GMX_SIMD_REAL_WIDTH)
837 SimdReal x0_S, y0_S, z0_S;
838 SimdReal x1_S, y1_S, z1_S;
839 SimdReal rx_S, ry_S, rz_S, n2_S;
840 SimdReal len_S, len2_S, dlen2_S, lc_S, blc_S;
841 alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset0[GMX_SIMD_REAL_WIDTH];
842 alignas(GMX_SIMD_ALIGNMENT) std::int32_t offset1[GMX_SIMD_REAL_WIDTH];
844 for (int i = 0; i < GMX_SIMD_REAL_WIDTH; i++)
846 offset0[i] = bla[bs*2 + i*2];
847 offset1[i] = bla[bs*2 + i*2 + 1];
850 gatherLoadUTranspose<3>(reinterpret_cast<const real *>(x), offset0, &x0_S, &y0_S, &z0_S);
851 gatherLoadUTranspose<3>(reinterpret_cast<const real *>(x), offset1, &x1_S, &y1_S, &z1_S);
856 pbc_correct_dx_simd(&rx_S, &ry_S, &rz_S, pbc_simd);
858 n2_S = norm2(rx_S, ry_S, rz_S);
860 len_S = load<SimdReal>(bllen + bs);
861 len2_S = len_S * len_S;
863 dlen2_S = fms(two_S, len2_S, n2_S);
865 warn_S = warn_S || (dlen2_S < (wfac_S * len2_S));
867 /* Avoid 1/0 by taking the max with REAL_MIN.
868 * Note: when dlen2 is close to zero (90 degree constraint rotation),
869 * the accuracy of the algorithm is no longer relevant.
871 dlen2_S = max(dlen2_S, min_S);
873 lc_S = fnma(dlen2_S, invsqrt(dlen2_S), len_S);
875 blc_S = load<SimdReal>(blc + bs);
879 store(rhs + bs, lc_S);
880 store(sol + bs, lc_S);
888 #endif // GMX_SIMD_HAVE_REAL
890 static void do_lincs(rvec *x, rvec *xp, matrix box, t_pbc *pbc,
891 gmx_lincsdata *lincsd, int th,
895 real wangle, gmx_bool *bWarn,
896 real invdt, rvec * gmx_restrict v,
897 gmx_bool bCalcVir, tensor vir_r_m_dr)
899 int b0, b1, b, i, j, n, iter;
900 int *bla, *blnr, *blbnb;
902 real *blc, *blmf, *bllen, *blcc, *rhs1, *rhs2, *sol, *blc_sol, *mlambda;
905 b0 = lincsd->task[th].b0;
906 b1 = lincsd->task[th].b1;
911 blbnb = lincsd->blbnb;
914 bllen = lincsd->bllen;
915 blcc = lincsd->tmpncc;
919 blc_sol = lincsd->tmp4;
920 mlambda = lincsd->mlambda;
921 nlocat = lincsd->nlocat;
923 #if GMX_SIMD_HAVE_REAL
925 /* This SIMD code does the same as the plain-C code after the #else.
926 * The only difference is that we always call pbc code, as with SIMD
927 * the overhead of pbc computation (when not needed) is small.
929 alignas(GMX_SIMD_ALIGNMENT) real pbc_simd[9*GMX_SIMD_REAL_WIDTH];
931 /* Convert the pbc struct for SIMD */
932 set_pbc_simd(pbc, pbc_simd);
934 /* Compute normalized x i-j vectors, store in r.
935 * Compute the inner product of r and xp i-j and store in rhs1.
937 calc_dr_x_xp_simd(b0, b1, bla, x, xp, bllen, blc,
941 #else // GMX_SIMD_HAVE_REAL
945 /* Compute normalized i-j vectors */
946 for (b = b0; b < b1; b++)
951 pbc_dx_aiuc(pbc, x[bla[2*b]], x[bla[2*b+1]], dx);
954 pbc_dx_aiuc(pbc, xp[bla[2*b]], xp[bla[2*b+1]], dx);
955 mvb = blc[b]*(iprod(r[b], dx) - bllen[b]);
962 /* Compute normalized i-j vectors */
963 for (b = b0; b < b1; b++)
965 real tmp0, tmp1, tmp2, rlen, mvb;
969 tmp0 = x[i][0] - x[j][0];
970 tmp1 = x[i][1] - x[j][1];
971 tmp2 = x[i][2] - x[j][2];
972 rlen = gmx::invsqrt(tmp0*tmp0 + tmp1*tmp1 + tmp2*tmp2);
980 mvb = blc[b]*(r[b][0]*(xp[i][0] - xp[j][0]) +
981 r[b][1]*(xp[i][1] - xp[j][1]) +
982 r[b][2]*(xp[i][2] - xp[j][2]) - bllen[b]);
987 /* Together: 26*ncons + 6*nrtot flops */
990 #endif // GMX_SIMD_HAVE_REAL
992 if (lincsd->bTaskDep)
994 /* We need a barrier, since the matrix construction below
995 * can access entries in r of other threads.
1000 /* Construct the (sparse) LINCS matrix */
1001 for (b = b0; b < b1; b++)
1003 for (n = blnr[b]; n < blnr[b+1]; n++)
1005 blcc[n] = blmf[n]*iprod(r[b], r[blbnb[n]]);
1008 /* Together: 26*ncons + 6*nrtot flops */
1010 lincs_matrix_expand(lincsd, &lincsd->task[th], blcc, rhs1, rhs2, sol);
1011 /* nrec*(ncons+2*nrtot) flops */
1013 #if GMX_SIMD_HAVE_REAL
1014 for (b = b0; b < b1; b += GMX_SIMD_REAL_WIDTH)
1016 SimdReal t1 = load<SimdReal>(blc + b);
1017 SimdReal t2 = load<SimdReal>(sol + b);
1018 store(mlambda + b, t1 * t2);
1021 for (b = b0; b < b1; b++)
1023 mlambda[b] = blc[b]*sol[b];
1025 #endif // GMX_SIMD_HAVE_REAL
1027 /* Update the coordinates */
1028 lincs_update_atoms(lincsd, th, 1.0, mlambda, r, invmass, xp);
1031 ******** Correction for centripetal effects ********
1036 wfac = std::cos(DEG2RAD*wangle);
1039 for (iter = 0; iter < lincsd->nIter; iter++)
1041 if ((lincsd->bCommIter && DOMAINDECOMP(cr) && cr->dd->constraints))
1046 /* Communicate the corrected non-local coordinates */
1047 if (DOMAINDECOMP(cr))
1049 dd_move_x_constraints(cr->dd, box, xp, nullptr, FALSE);
1054 else if (lincsd->bTaskDep)
1059 #if GMX_SIMD_HAVE_REAL
1060 calc_dist_iter_simd(b0, b1, bla, xp, bllen, blc, pbc_simd, wfac,
1063 calc_dist_iter(b0, b1, bla, xp, bllen, blc, pbc, wfac,
1065 /* 20*ncons flops */
1066 #endif // GMX_SIMD_HAVE_REAL
1068 lincs_matrix_expand(lincsd, &lincsd->task[th], blcc, rhs1, rhs2, sol);
1069 /* nrec*(ncons+2*nrtot) flops */
1071 #if GMX_SIMD_HAVE_REAL
1072 for (b = b0; b < b1; b += GMX_SIMD_REAL_WIDTH)
1074 SimdReal t1 = load<SimdReal>(blc + b);
1075 SimdReal t2 = load<SimdReal>(sol + b);
1076 SimdReal mvb = t1 * t2;
1077 store(blc_sol + b, mvb);
1078 store(mlambda + b, load<SimdReal>(mlambda + b) + mvb);
1081 for (b = b0; b < b1; b++)
1085 mvb = blc[b]*sol[b];
1089 #endif // GMX_SIMD_HAVE_REAL
1091 /* Update the coordinates */
1092 lincs_update_atoms(lincsd, th, 1.0, blc_sol, r, invmass, xp);
1094 /* nit*ncons*(37+9*nrec) flops */
1098 /* Update the velocities */
1099 lincs_update_atoms(lincsd, th, invdt, mlambda, r, invmass, v);
1100 /* 16 ncons flops */
1103 if (nlocat != nullptr && (bCalcDHDL || bCalcVir))
1105 if (lincsd->bTaskDep)
1107 /* In lincs_update_atoms threads might cross-read mlambda */
1111 /* Only account for local atoms */
1112 for (b = b0; b < b1; b++)
1114 mlambda[b] *= 0.5*nlocat[b];
1123 for (b = b0; b < b1; b++)
1125 /* Note that this this is dhdl*dt^2, the dt^2 factor is corrected
1126 * later after the contributions are reduced over the threads.
1128 dhdl -= lincsd->mlambda[b]*lincsd->ddist[b];
1130 lincsd->task[th].dhdlambda = dhdl;
1135 /* Constraint virial */
1136 for (b = b0; b < b1; b++)
1140 tmp0 = -bllen[b]*mlambda[b];
1141 for (i = 0; i < DIM; i++)
1143 tmp1 = tmp0*r[b][i];
1144 for (j = 0; j < DIM; j++)
1146 vir_r_m_dr[i][j] -= tmp1*r[b][j];
1149 } /* 22 ncons flops */
1153 * 26*ncons + 6*nrtot + nrec*(ncons+2*nrtot)
1154 * + nit * (20*ncons + nrec*(ncons+2*nrtot) + 17 ncons)
1156 * (26+nrec)*ncons + (6+2*nrec)*nrtot
1157 * + nit * ((37+nrec)*ncons + 2*nrec*nrtot)
1159 * (63+nrec)*ncons + (6+4*nrec)*nrtot
1163 /* Sets the elements in the LINCS matrix for task li_task */
1164 static void set_lincs_matrix_task(gmx_lincsdata *li,
1165 lincs_task_t *li_task,
1166 const real *invmass,
1168 int *nCrossTaskTriangles)
1172 /* Construct the coupling coefficient matrix blmf */
1173 li_task->ntriangle = 0;
1175 *nCrossTaskTriangles = 0;
1176 for (i = li_task->b0; i < li_task->b1; i++)
1181 a2 = li->bla[2*i+1];
1182 for (n = li->blnr[i]; (n < li->blnr[i+1]); n++)
1184 int k, sign, center, end;
1188 /* If we are using multiple, independent tasks for LINCS,
1189 * the calls to check_assign_connected should have
1190 * put all connected constraints in our task.
1192 assert(li->bTaskDep || (k >= li_task->b0 && k < li_task->b1));
1194 if (a1 == li->bla[2*k] || a2 == li->bla[2*k+1])
1202 if (a1 == li->bla[2*k] || a1 == li->bla[2*k+1])
1212 li->blmf[n] = sign*invmass[center]*li->blc[i]*li->blc[k];
1213 li->blmf1[n] = sign*0.5;
1214 if (li->ncg_triangle > 0)
1218 /* Look for constraint triangles */
1219 for (nk = li->blnr[k]; (nk < li->blnr[k+1]); nk++)
1222 if (kk != i && kk != k &&
1223 (li->bla[2*kk] == end || li->bla[2*kk+1] == end))
1225 /* Check if the constraints in this triangle actually
1226 * belong to a different task. We still assign them
1227 * here, since it's convenient for the triangle
1228 * iterations, but we then need an extra barrier.
1230 if (k < li_task->b0 || k >= li_task->b1 ||
1231 kk < li_task->b0 || kk >= li_task->b1)
1233 (*nCrossTaskTriangles)++;
1236 if (li_task->ntriangle == 0 ||
1237 li_task->triangle[li_task->ntriangle - 1] < i)
1239 /* Add this constraint to the triangle list */
1240 li_task->triangle[li_task->ntriangle] = i;
1241 li_task->tri_bits[li_task->ntriangle] = 0;
1242 li_task->ntriangle++;
1243 if (li->blnr[i+1] - li->blnr[i] > static_cast<int>(sizeof(li_task->tri_bits[0])*8 - 1))
1245 gmx_fatal(FARGS, "A constraint is connected to %d constraints, this is more than the %d allowed for constraints participating in triangles",
1246 li->blnr[i+1] - li->blnr[i],
1247 sizeof(li_task->tri_bits[0])*8-1);
1250 li_task->tri_bits[li_task->ntriangle-1] |= (1 << (n - li->blnr[i]));
1259 /* Sets the elements in the LINCS matrix */
1260 static void set_lincs_matrix(gmx_lincsdata *li, real *invmass, real lambda)
1263 const real invsqrt2 = 0.7071067811865475244;
1265 for (i = 0; (i < li->nc); i++)
1270 a2 = li->bla[2*i+1];
1271 li->blc[i] = gmx::invsqrt(invmass[a1] + invmass[a2]);
1272 li->blc1[i] = invsqrt2;
1275 /* Construct the coupling coefficient matrix blmf */
1276 int th, ntriangle = 0, ncc_triangle = 0, nCrossTaskTriangles = 0;
1277 #pragma omp parallel for reduction(+: ntriangle, ncc_triangle, nCrossTaskTriangles) num_threads(li->ntask) schedule(static)
1278 for (th = 0; th < li->ntask; th++)
1282 set_lincs_matrix_task(li, &li->task[th], invmass,
1283 &ncc_triangle, &nCrossTaskTriangles);
1284 ntriangle = li->task[th].ntriangle;
1286 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1288 li->ntriangle = ntriangle;
1289 li->ncc_triangle = ncc_triangle;
1290 li->bTaskDepTri = (nCrossTaskTriangles > 0);
1294 fprintf(debug, "The %d constraints participate in %d triangles\n",
1295 li->nc, li->ntriangle);
1296 fprintf(debug, "There are %d constraint couplings, of which %d in triangles\n",
1297 li->ncc, li->ncc_triangle);
1298 if (li->ntriangle > 0 && li->ntask > 1)
1300 fprintf(debug, "%d constraint triangles contain constraints assigned to different tasks\n",
1301 nCrossTaskTriangles);
1306 * so we know with which lambda value the masses have been set.
1308 li->matlam = lambda;
1311 static int count_triangle_constraints(const t_ilist *ilist,
1312 const t_blocka *at2con)
1314 int ncon1, ncon_tot;
1315 int c0, a00, a01, n1, c1, a10, a11, ac1, n2, c2, a20, a21;
1318 t_iatom *ia1, *ia2, *iap;
1320 ncon1 = ilist[F_CONSTR].nr/3;
1321 ncon_tot = ncon1 + ilist[F_CONSTRNC].nr/3;
1323 ia1 = ilist[F_CONSTR].iatoms;
1324 ia2 = ilist[F_CONSTRNC].iatoms;
1327 for (c0 = 0; c0 < ncon_tot; c0++)
1330 iap = constr_iatomptr(ncon1, ia1, ia2, c0);
1333 for (n1 = at2con->index[a01]; n1 < at2con->index[a01+1]; n1++)
1338 iap = constr_iatomptr(ncon1, ia1, ia2, c1);
1349 for (n2 = at2con->index[ac1]; n2 < at2con->index[ac1+1]; n2++)
1352 if (c2 != c0 && c2 != c1)
1354 iap = constr_iatomptr(ncon1, ia1, ia2, c2);
1357 if (a20 == a00 || a21 == a00)
1371 return ncon_triangle;
1374 static gmx_bool more_than_two_sequential_constraints(const t_ilist *ilist,
1375 const t_blocka *at2con)
1377 t_iatom *ia1, *ia2, *iap;
1378 int ncon1, ncon_tot, c;
1380 gmx_bool bMoreThanTwoSequentialConstraints;
1382 ncon1 = ilist[F_CONSTR].nr/3;
1383 ncon_tot = ncon1 + ilist[F_CONSTRNC].nr/3;
1385 ia1 = ilist[F_CONSTR].iatoms;
1386 ia2 = ilist[F_CONSTRNC].iatoms;
1388 bMoreThanTwoSequentialConstraints = FALSE;
1389 for (c = 0; c < ncon_tot && !bMoreThanTwoSequentialConstraints; c++)
1391 iap = constr_iatomptr(ncon1, ia1, ia2, c);
1394 /* Check if this constraint has constraints connected at both atoms */
1395 if (at2con->index[a1+1] - at2con->index[a1] > 1 &&
1396 at2con->index[a2+1] - at2con->index[a2] > 1)
1398 bMoreThanTwoSequentialConstraints = TRUE;
1402 return bMoreThanTwoSequentialConstraints;
1405 static int int_comp(const void *a, const void *b)
1407 return (*(int *)a) - (*(int *)b);
1410 gmx_lincsdata_t init_lincs(FILE *fplog, const gmx_mtop_t *mtop,
1411 int nflexcon_global, const t_blocka *at2con,
1412 gmx_bool bPLINCS, int nIter, int nProjOrder)
1415 gmx_bool bMoreThanTwoSeq;
1419 fprintf(fplog, "\nInitializing%s LINear Constraint Solver\n",
1420 bPLINCS ? " Parallel" : "");
1426 gmx_mtop_ftype_count(mtop, F_CONSTR) +
1427 gmx_mtop_ftype_count(mtop, F_CONSTRNC);
1428 li->ncg_flex = nflexcon_global;
1431 li->nOrder = nProjOrder;
1433 li->max_connect = 0;
1434 for (size_t mt = 0; mt < mtop->moltype.size(); mt++)
1436 for (int a = 0; a < mtop->moltype[mt].atoms.nr; a++)
1438 li->max_connect = std::max(li->max_connect,
1439 at2con[mt].index[a + 1] - at2con[mt].index[a]);
1443 li->ncg_triangle = 0;
1444 bMoreThanTwoSeq = FALSE;
1445 for (const gmx_molblock_t &molb : mtop->molblock)
1447 const gmx_moltype_t &molt = mtop->moltype[molb.type];
1451 count_triangle_constraints(molt.ilist, &at2con[molb.type]);
1453 if (!bMoreThanTwoSeq &&
1454 more_than_two_sequential_constraints(molt.ilist, &at2con[molb.type]))
1456 bMoreThanTwoSeq = TRUE;
1460 /* Check if we need to communicate not only before LINCS,
1461 * but also before each iteration.
1462 * The check for only two sequential constraints is only
1463 * useful for the common case of H-bond only constraints.
1464 * With more effort we could also make it useful for small
1465 * molecules with nr. sequential constraints <= nOrder-1.
1467 li->bCommIter = (bPLINCS && (li->nOrder < 1 || bMoreThanTwoSeq));
1469 if (debug && bPLINCS)
1471 fprintf(debug, "PLINCS communication before each iteration: %d\n",
1475 /* LINCS can run on any number of threads.
1476 * Currently the number is fixed for the whole simulation,
1477 * but it could be set in set_lincs().
1478 * The current constraint to task assignment code can create independent
1479 * tasks only when not more than two constraints are connected sequentially.
1481 li->ntask = gmx_omp_nthreads_get(emntLINCS);
1482 li->bTaskDep = (li->ntask > 1 && bMoreThanTwoSeq);
1485 fprintf(debug, "LINCS: using %d threads, tasks are %sdependent\n",
1486 li->ntask, li->bTaskDep ? "" : "in");
1494 /* Allocate an extra elements for "task-overlap" constraints */
1495 snew(li->task, li->ntask + 1);
1498 if (bPLINCS || li->ncg_triangle > 0)
1500 please_cite(fplog, "Hess2008a");
1504 please_cite(fplog, "Hess97a");
1509 fprintf(fplog, "The number of constraints is %d\n", li->ncg);
1512 fprintf(fplog, "There are inter charge-group constraints,\n"
1513 "will communicate selected coordinates each lincs iteration\n");
1515 if (li->ncg_triangle > 0)
1518 "%d constraints are involved in constraint triangles,\n"
1519 "will apply an additional matrix expansion of order %d for couplings\n"
1520 "between constraints inside triangles\n",
1521 li->ncg_triangle, li->nOrder);
1528 /* Sets up the work division over the threads */
1529 static void lincs_thread_setup(gmx_lincsdata *li, int natoms)
1536 if (natoms > li->atf_nalloc)
1538 li->atf_nalloc = over_alloc_large(natoms);
1539 srenew(li->atf, li->atf_nalloc);
1543 /* Clear the atom flags */
1544 for (a = 0; a < natoms; a++)
1546 bitmask_clear(&atf[a]);
1549 if (li->ntask > BITMASK_SIZE)
1551 gmx_fatal(FARGS, "More than %d threads is not supported for LINCS.", BITMASK_SIZE);
1554 for (th = 0; th < li->ntask; th++)
1556 lincs_task_t *li_task;
1559 li_task = &li->task[th];
1561 /* For each atom set a flag for constraints from each */
1562 for (b = li_task->b0; b < li_task->b1; b++)
1564 bitmask_set_bit(&atf[li->bla[b*2 ]], th);
1565 bitmask_set_bit(&atf[li->bla[b*2 + 1]], th);
1569 #pragma omp parallel for num_threads(li->ntask) schedule(static)
1570 for (th = 0; th < li->ntask; th++)
1574 lincs_task_t *li_task;
1578 li_task = &li->task[th];
1580 if (li_task->b1 - li_task->b0 > li_task->ind_nalloc)
1582 li_task->ind_nalloc = over_alloc_large(li_task->b1-li_task->b0);
1583 srenew(li_task->ind, li_task->ind_nalloc);
1584 srenew(li_task->ind_r, li_task->ind_nalloc);
1587 bitmask_init_low_bits(&mask, th);
1590 li_task->nind_r = 0;
1591 for (b = li_task->b0; b < li_task->b1; b++)
1593 /* We let the constraint with the lowest thread index
1594 * operate on atoms with constraints from multiple threads.
1596 if (bitmask_is_disjoint(atf[li->bla[b*2]], mask) &&
1597 bitmask_is_disjoint(atf[li->bla[b*2+1]], mask))
1599 /* Add the constraint to the local atom update index */
1600 li_task->ind[li_task->nind++] = b;
1604 /* Add the constraint to the rest block */
1605 li_task->ind_r[li_task->nind_r++] = b;
1609 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
1612 /* We need to copy all constraints which have not be assigned
1613 * to a thread to a separate list which will be handled by one thread.
1615 li_m = &li->task[li->ntask];
1618 for (th = 0; th < li->ntask; th++)
1620 lincs_task_t *li_task;
1623 li_task = &li->task[th];
1625 if (li_m->nind + li_task->nind_r > li_m->ind_nalloc)
1627 li_m->ind_nalloc = over_alloc_large(li_m->nind+li_task->nind_r);
1628 srenew(li_m->ind, li_m->ind_nalloc);
1631 for (b = 0; b < li_task->nind_r; b++)
1633 li_m->ind[li_m->nind++] = li_task->ind_r[b];
1638 fprintf(debug, "LINCS thread %d: %d constraints\n",
1645 fprintf(debug, "LINCS thread r: %d constraints\n",
1650 /* There is no realloc with alignment, so here we make one for reals.
1651 * Note that this function does not preserve the contents of the memory.
1653 static void resize_real_aligned(real **ptr, int nelem)
1655 sfree_aligned(*ptr);
1656 snew_aligned(*ptr, nelem, align_bytes);
1659 static void assign_constraint(gmx_lincsdata *li,
1660 int constraint_index,
1662 real lenA, real lenB,
1663 const t_blocka *at2con)
1669 /* Make an mapping of local topology constraint index to LINCS index */
1670 li->con_index[constraint_index] = con;
1672 li->bllen0[con] = lenA;
1673 li->ddist[con] = lenB - lenA;
1674 /* Set the length to the topology A length */
1675 li->bllen[con] = lenA;
1676 li->bla[2*con] = a1;
1677 li->bla[2*con+1] = a2;
1679 /* Make space in the constraint connection matrix for constraints
1680 * connected to both end of the current constraint.
1683 at2con->index[a1 + 1] - at2con->index[a1] - 1 +
1684 at2con->index[a2 + 1] - at2con->index[a2] - 1;
1686 li->blnr[con + 1] = li->ncc;
1688 /* Increase the constraint count */
1692 /* Check if constraint with topology index constraint_index is connected
1693 * to other constraints, and if so add those connected constraints to our task.
1695 static void check_assign_connected(gmx_lincsdata *li,
1696 const t_iatom *iatom,
1700 const t_blocka *at2con)
1702 /* Currently this function only supports constraint groups
1703 * in which all constraints share at least one atom
1704 * (e.g. H-bond constraints).
1705 * Check both ends of the current constraint for
1706 * connected constraints. We need to assign those
1711 for (end = 0; end < 2; end++)
1715 a = (end == 0 ? a1 : a2);
1717 for (k = at2con->index[a]; k < at2con->index[a + 1]; k++)
1722 /* Check if constraint cc has not yet been assigned */
1723 if (li->con_index[cc] == -1)
1729 lenA = idef->iparams[type].constr.dA;
1730 lenB = idef->iparams[type].constr.dB;
1732 if (bDynamics || lenA != 0 || lenB != 0)
1734 assign_constraint(li, cc, iatom[3*cc + 1], iatom[3*cc + 2], lenA, lenB, at2con);
1741 /* Check if constraint with topology index constraint_index is involved
1742 * in a constraint triangle, and if so add the other two constraints
1743 * in the triangle to our task.
1745 static void check_assign_triangle(gmx_lincsdata *li,
1746 const t_iatom *iatom,
1749 int constraint_index,
1751 const t_blocka *at2con)
1753 int nca, cc[32], ca[32], k;
1754 int c_triangle[2] = { -1, -1 };
1757 for (k = at2con->index[a1]; k < at2con->index[a1 + 1]; k++)
1762 if (c != constraint_index)
1766 aa1 = iatom[c*3 + 1];
1767 aa2 = iatom[c*3 + 2];
1783 for (k = at2con->index[a2]; k < at2con->index[a2 + 1]; k++)
1788 if (c != constraint_index)
1792 aa1 = iatom[c*3 + 1];
1793 aa2 = iatom[c*3 + 2];
1796 for (i = 0; i < nca; i++)
1800 c_triangle[0] = cc[i];
1807 for (i = 0; i < nca; i++)
1811 c_triangle[0] = cc[i];
1819 if (c_triangle[0] >= 0)
1823 for (end = 0; end < 2; end++)
1825 /* Check if constraint c_triangle[end] has not yet been assigned */
1826 if (li->con_index[c_triangle[end]] == -1)
1831 i = c_triangle[end]*3;
1833 lenA = idef->iparams[type].constr.dA;
1834 lenB = idef->iparams[type].constr.dB;
1836 if (bDynamics || lenA != 0 || lenB != 0)
1838 assign_constraint(li, c_triangle[end], iatom[i + 1], iatom[i + 2], lenA, lenB, at2con);
1845 static void set_matrix_indices(gmx_lincsdata *li,
1846 const lincs_task_t *li_task,
1847 const t_blocka *at2con,
1848 gmx_bool bSortMatrix)
1852 for (b = li_task->b0; b < li_task->b1; b++)
1857 a2 = li->bla[b*2 + 1];
1860 for (k = at2con->index[a1]; k < at2con->index[a1 + 1]; k++)
1864 concon = li->con_index[at2con->a[k]];
1867 li->blbnb[i++] = concon;
1870 for (k = at2con->index[a2]; k < at2con->index[a2 + 1]; k++)
1874 concon = li->con_index[at2con->a[k]];
1877 li->blbnb[i++] = concon;
1883 /* Order the blbnb matrix to optimize memory access */
1884 qsort(&(li->blbnb[li->blnr[b]]), li->blnr[b + 1] - li->blnr[b],
1885 sizeof(li->blbnb[0]), int_comp);
1890 void set_lincs(const t_idef *idef,
1891 const t_mdatoms *md,
1893 const t_commrec *cr,
1896 int natoms, nflexcon;
1899 int i, ncc_alloc_old, ncon_tot;
1904 /* Zero the thread index ranges.
1905 * Otherwise without local constraints we could return with old ranges.
1907 for (i = 0; i < li->ntask; i++)
1911 li->task[i].nind = 0;
1915 li->task[li->ntask].nind = 0;
1918 /* This is the local topology, so there are only F_CONSTR constraints */
1919 if (idef->il[F_CONSTR].nr == 0)
1921 /* There are no constraints,
1922 * we do not need to fill any data structures.
1929 fprintf(debug, "Building the LINCS connectivity\n");
1932 if (DOMAINDECOMP(cr))
1934 if (cr->dd->constraints)
1938 dd_get_constraint_range(cr->dd, &start, &natoms);
1942 natoms = cr->dd->nat_home;
1947 natoms = md->homenr;
1949 at2con = make_at2con(0, natoms, idef->il, idef->iparams, bDynamics,
1952 ncon_tot = idef->il[F_CONSTR].nr/3;
1954 /* Ensure we have enough padding for aligned loads for each thread */
1955 if (ncon_tot + li->ntask*simd_width > li->nc_alloc || li->nc_alloc == 0)
1957 li->nc_alloc = over_alloc_dd(ncon_tot + li->ntask*simd_width);
1958 srenew(li->con_index, li->nc_alloc);
1959 resize_real_aligned(&li->bllen0, li->nc_alloc);
1960 resize_real_aligned(&li->ddist, li->nc_alloc);
1961 srenew(li->bla, 2*li->nc_alloc);
1962 resize_real_aligned(&li->blc, li->nc_alloc);
1963 resize_real_aligned(&li->blc1, li->nc_alloc);
1964 srenew(li->blnr, li->nc_alloc + 1);
1965 resize_real_aligned(&li->bllen, li->nc_alloc);
1966 srenew(li->tmpv, li->nc_alloc);
1967 if (DOMAINDECOMP(cr))
1969 srenew(li->nlocat, li->nc_alloc);
1971 resize_real_aligned(&li->tmp1, li->nc_alloc);
1972 resize_real_aligned(&li->tmp2, li->nc_alloc);
1973 resize_real_aligned(&li->tmp3, li->nc_alloc);
1974 resize_real_aligned(&li->tmp4, li->nc_alloc);
1975 resize_real_aligned(&li->mlambda, li->nc_alloc);
1978 iatom = idef->il[F_CONSTR].iatoms;
1980 ncc_alloc_old = li->ncc_alloc;
1981 li->blnr[0] = li->ncc;
1983 /* Assign the constraints for li->ntask LINCS tasks.
1984 * We target a uniform distribution of constraints over the tasks.
1985 * Note that when flexible constraints are present, but are removed here
1986 * (e.g. because we are doing EM) we get imbalance, but since that doesn't
1987 * happen during normal MD, that's ok.
1989 int ncon_assign, ncon_target, con, th;
1991 /* Determine the number of constraints we need to assign here */
1992 ncon_assign = ncon_tot;
1995 /* With energy minimization, flexible constraints are ignored
1996 * (and thus minimized, as they should be).
1998 ncon_assign -= nflexcon;
2001 /* Set the target constraint count per task to exactly uniform,
2002 * this might be overridden below.
2004 ncon_target = (ncon_assign + li->ntask - 1)/li->ntask;
2006 /* Mark all constraints as unassigned by setting their index to -1 */
2007 for (con = 0; con < ncon_tot; con++)
2009 li->con_index[con] = -1;
2013 for (th = 0; th < li->ntask; th++)
2015 lincs_task_t *li_task;
2017 li_task = &li->task[th];
2019 #if GMX_SIMD_HAVE_REAL
2020 /* With indepedent tasks we likely have H-bond constraints or constraint
2021 * pairs. The connected constraints will be pulled into the task, so the
2022 * constraints per task will often exceed ncon_target.
2023 * Triangle constraints can also increase the count, but there are
2024 * relatively few of those, so we usually expect to get ncon_target.
2028 /* We round ncon_target to a multiple of GMX_SIMD_WIDTH,
2029 * since otherwise a lot of operations can be wasted.
2030 * There are several ways to round here, we choose the one
2031 * that alternates block sizes, which helps with Intel HT.
2033 ncon_target = ((ncon_assign*(th + 1))/li->ntask - li->nc_real + GMX_SIMD_REAL_WIDTH - 1) & ~(GMX_SIMD_REAL_WIDTH - 1);
2035 #endif // GMX_SIMD==2 && GMX_SIMD_HAVE_REAL
2037 /* Continue filling the arrays where we left off with the previous task,
2038 * including padding for SIMD.
2040 li_task->b0 = li->nc;
2042 while (con < ncon_tot && li->nc - li_task->b0 < ncon_target)
2044 if (li->con_index[con] == -1)
2049 type = iatom[3*con];
2050 a1 = iatom[3*con + 1];
2051 a2 = iatom[3*con + 2];
2052 lenA = idef->iparams[type].constr.dA;
2053 lenB = idef->iparams[type].constr.dB;
2054 /* Skip the flexible constraints when not doing dynamics */
2055 if (bDynamics || lenA != 0 || lenB != 0)
2057 assign_constraint(li, con, a1, a2, lenA, lenB, &at2con);
2059 if (li->ntask > 1 && !li->bTaskDep)
2061 /* We can generate independent tasks. Check if we
2062 * need to assign connected constraints to our task.
2064 check_assign_connected(li, iatom, idef, bDynamics,
2067 if (li->ntask > 1 && li->ncg_triangle > 0)
2069 /* Ensure constraints in one triangle are assigned
2072 check_assign_triangle(li, iatom, idef, bDynamics,
2073 con, a1, a2, &at2con);
2081 li_task->b1 = li->nc;
2085 /* Copy the last atom pair indices and lengths for constraints
2086 * up to a multiple of simd_width, such that we can do all
2087 * SIMD operations without having to worry about end effects.
2091 li->nc = ((li_task->b1 + simd_width - 1)/simd_width)*simd_width;
2092 last = li_task->b1 - 1;
2093 for (i = li_task->b1; i < li->nc; i++)
2095 li->bla[i*2 ] = li->bla[last*2 ];
2096 li->bla[i*2 + 1] = li->bla[last*2 + 1];
2097 li->bllen0[i] = li->bllen0[last];
2098 li->ddist[i] = li->ddist[last];
2099 li->bllen[i] = li->bllen[last];
2100 li->blnr[i + 1] = li->blnr[last + 1];
2104 /* Keep track of how many constraints we assigned */
2105 li->nc_real += li_task->b1 - li_task->b0;
2109 fprintf(debug, "LINCS task %d constraints %d - %d\n",
2110 th, li_task->b0, li_task->b1);
2114 assert(li->nc_real == ncon_assign);
2116 gmx_bool bSortMatrix;
2118 /* Without DD we order the blbnb matrix to optimize memory access.
2119 * With DD the overhead of sorting is more than the gain during access.
2121 bSortMatrix = !DOMAINDECOMP(cr);
2123 if (li->ncc > li->ncc_alloc)
2125 li->ncc_alloc = over_alloc_small(li->ncc);
2126 srenew(li->blbnb, li->ncc_alloc);
2129 #pragma omp parallel for num_threads(li->ntask) schedule(static)
2130 for (th = 0; th < li->ntask; th++)
2134 lincs_task_t *li_task;
2136 li_task = &li->task[th];
2138 if (li->ncg_triangle > 0 &&
2139 li_task->b1 - li_task->b0 > li_task->tri_alloc)
2141 /* This is allocating too much, but it is difficult to improve */
2142 li_task->tri_alloc = over_alloc_dd(li_task->b1 - li_task->b0);
2143 srenew(li_task->triangle, li_task->tri_alloc);
2144 srenew(li_task->tri_bits, li_task->tri_alloc);
2147 set_matrix_indices(li, li_task, &at2con, bSortMatrix);
2149 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2152 done_blocka(&at2con);
2154 if (cr->dd == nullptr)
2156 /* Since the matrix is static, we should free some memory */
2157 li->ncc_alloc = li->ncc;
2158 srenew(li->blbnb, li->ncc_alloc);
2161 if (li->ncc_alloc > ncc_alloc_old)
2163 srenew(li->blmf, li->ncc_alloc);
2164 srenew(li->blmf1, li->ncc_alloc);
2165 srenew(li->tmpncc, li->ncc_alloc);
2168 if (DOMAINDECOMP(cr) && dd_constraints_nlocalatoms(cr->dd) != nullptr)
2172 nlocat_dd = dd_constraints_nlocalatoms(cr->dd);
2174 /* Convert nlocat from local topology to LINCS constraint indexing */
2175 for (con = 0; con < ncon_tot; con++)
2177 li->nlocat[li->con_index[con]] = nlocat_dd[con];
2182 li->nlocat = nullptr;
2187 fprintf(debug, "Number of constraints is %d, padded %d, couplings %d\n",
2188 li->nc_real, li->nc, li->ncc);
2193 lincs_thread_setup(li, md->nr);
2196 set_lincs_matrix(li, md->invmass, md->lambda);
2199 static void lincs_warning(FILE *fplog,
2200 gmx_domdec_t *dd, rvec *x, rvec *xprime, t_pbc *pbc,
2201 int ncons, int *bla, real *bllen, real wangle,
2202 int maxwarn, int *warncount)
2206 real wfac, d0, d1, cosine;
2209 wfac = std::cos(DEG2RAD*wangle);
2211 sprintf(buf, "bonds that rotated more than %g degrees:\n"
2212 " atom 1 atom 2 angle previous, current, constraint length\n",
2214 fprintf(stderr, "%s", buf);
2217 fprintf(fplog, "%s", buf);
2220 for (b = 0; b < ncons; b++)
2226 pbc_dx_aiuc(pbc, x[i], x[j], v0);
2227 pbc_dx_aiuc(pbc, xprime[i], xprime[j], v1);
2231 rvec_sub(x[i], x[j], v0);
2232 rvec_sub(xprime[i], xprime[j], v1);
2236 cosine = iprod(v0, v1)/(d0*d1);
2239 sprintf(buf, " %6d %6d %5.1f %8.4f %8.4f %8.4f\n",
2240 ddglatnr(dd, i), ddglatnr(dd, j),
2241 RAD2DEG*std::acos(cosine), d0, d1, bllen[b]);
2242 fprintf(stderr, "%s", buf);
2245 fprintf(fplog, "%s", buf);
2247 if (!std::isfinite(d1))
2249 gmx_fatal(FARGS, "Bond length not finite.");
2255 if (*warncount > maxwarn)
2257 too_many_constraint_warnings(econtLINCS, *warncount);
2261 static void cconerr(const gmx_lincsdata *lincsd,
2262 rvec *x, t_pbc *pbc,
2263 real *ncons_loc, real *ssd, real *max, int *imax)
2265 const int *bla, *nlocat;
2268 int count, im, task;
2271 bllen = lincsd->bllen;
2272 nlocat = lincsd->nlocat;
2278 for (task = 0; task < lincsd->ntask; task++)
2282 for (b = lincsd->task[task].b0; b < lincsd->task[task].b1; b++)
2289 pbc_dx_aiuc(pbc, x[bla[2*b]], x[bla[2*b+1]], dx);
2293 rvec_sub(x[bla[2*b]], x[bla[2*b+1]], dx);
2296 len = r2*gmx::invsqrt(r2);
2297 d = std::abs(len/bllen[b]-1);
2298 if (d > ma && (nlocat == nullptr || nlocat[b]))
2303 if (nlocat == nullptr)
2310 ssd2 += nlocat[b]*d*d;
2316 *ncons_loc = (nlocat ? 0.5 : 1)*count;
2317 *ssd = (nlocat ? 0.5 : 1)*ssd2;
2322 gmx_bool constrain_lincs(FILE *fplog, gmx_bool bLog, gmx_bool bEner,
2323 const t_inputrec *ir,
2325 gmx_lincsdata *lincsd, t_mdatoms *md,
2326 const t_commrec *cr,
2327 const gmx_multisim_t *ms,
2328 rvec *x, rvec *xprime, rvec *min_proj,
2329 matrix box, t_pbc *pbc,
2330 real lambda, real *dvdlambda,
2331 real invdt, rvec *v,
2332 gmx_bool bCalcVir, tensor vir_r_m_dr,
2335 int maxwarn, int *warncount)
2338 char buf[STRLEN], buf2[22], buf3[STRLEN];
2340 real ncons_loc, p_ssd, p_max = 0;
2342 gmx_bool bOK, bWarn;
2346 /* This boolean should be set by a flag passed to this routine.
2347 * We can also easily check if any constraint length is changed,
2348 * if not dH/dlambda=0 and we can also set the boolean to FALSE.
2350 bCalcDHDL = (ir->efep != efepNO && dvdlambda != nullptr);
2352 if (lincsd->nc == 0 && cr->dd == nullptr)
2356 lincsd->rmsd_data[0] = 0;
2357 lincsd->rmsd_data[1] = 0;
2363 if (econq == econqCoord)
2365 /* We can't use bCalcDHDL here, since NULL can be passed for dvdlambda
2366 * also with efep!=fepNO.
2368 if (ir->efep != efepNO)
2370 if (md->nMassPerturbed && lincsd->matlam != md->lambda)
2372 set_lincs_matrix(lincsd, md->invmass, md->lambda);
2375 for (i = 0; i < lincsd->nc; i++)
2377 lincsd->bllen[i] = lincsd->bllen0[i] + lambda*lincsd->ddist[i];
2381 if (lincsd->ncg_flex)
2383 /* Set the flexible constraint lengths to the old lengths */
2386 for (i = 0; i < lincsd->nc; i++)
2388 if (lincsd->bllen[i] == 0)
2390 pbc_dx_aiuc(pbc, x[lincsd->bla[2*i]], x[lincsd->bla[2*i+1]], dx);
2391 lincsd->bllen[i] = norm(dx);
2397 for (i = 0; i < lincsd->nc; i++)
2399 if (lincsd->bllen[i] == 0)
2402 std::sqrt(distance2(x[lincsd->bla[2*i]],
2403 x[lincsd->bla[2*i+1]]));
2411 cconerr(lincsd, xprime, pbc,
2412 &ncons_loc, &p_ssd, &p_max, &p_imax);
2415 /* This bWarn var can be updated by multiple threads
2416 * at the same time. But as we only need to detect
2417 * if a warning occurred or not, this is not an issue.
2421 /* The OpenMP parallel region of constrain_lincs for coords */
2422 #pragma omp parallel num_threads(lincsd->ntask)
2426 int th = gmx_omp_get_thread_num();
2428 clear_mat(lincsd->task[th].vir_r_m_dr);
2430 do_lincs(x, xprime, box, pbc, lincsd, th,
2433 ir->LincsWarnAngle, &bWarn,
2435 th == 0 ? vir_r_m_dr : lincsd->task[th].vir_r_m_dr);
2437 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2440 if (bLog && fplog && lincsd->nc > 0)
2442 fprintf(fplog, " Rel. Constraint Deviation: RMS MAX between atoms\n");
2443 fprintf(fplog, " Before LINCS %.6f %.6f %6d %6d\n",
2444 std::sqrt(p_ssd/ncons_loc), p_max,
2445 ddglatnr(cr->dd, lincsd->bla[2*p_imax]),
2446 ddglatnr(cr->dd, lincsd->bla[2*p_imax+1]));
2450 cconerr(lincsd, xprime, pbc,
2451 &ncons_loc, &p_ssd, &p_max, &p_imax);
2452 lincsd->rmsd_data[0] = ncons_loc;
2453 lincsd->rmsd_data[1] = p_ssd;
2457 lincsd->rmsd_data[0] = 0;
2458 lincsd->rmsd_data[1] = 0;
2459 lincsd->rmsd_data[2] = 0;
2461 if (bLog && fplog && lincsd->nc > 0)
2464 " After LINCS %.6f %.6f %6d %6d\n\n",
2465 std::sqrt(p_ssd/ncons_loc), p_max,
2466 ddglatnr(cr->dd, lincsd->bla[2*p_imax]),
2467 ddglatnr(cr->dd, lincsd->bla[2*p_imax+1]));
2472 if (maxwarn < INT_MAX)
2474 cconerr(lincsd, xprime, pbc,
2475 &ncons_loc, &p_ssd, &p_max, &p_imax);
2478 sprintf(buf3, " in simulation %d", ms->sim);
2484 sprintf(buf, "\nStep %s, time %g (ps) LINCS WARNING%s\n"
2485 "relative constraint deviation after LINCS:\n"
2486 "rms %.6f, max %.6f (between atoms %d and %d)\n",
2487 gmx_step_str(step, buf2), ir->init_t+step*ir->delta_t,
2489 std::sqrt(p_ssd/ncons_loc), p_max,
2490 ddglatnr(cr->dd, lincsd->bla[2*p_imax]),
2491 ddglatnr(cr->dd, lincsd->bla[2*p_imax+1]));
2494 fprintf(fplog, "%s", buf);
2496 fprintf(stderr, "%s", buf);
2497 lincs_warning(fplog, cr->dd, x, xprime, pbc,
2498 lincsd->nc, lincsd->bla, lincsd->bllen,
2499 ir->LincsWarnAngle, maxwarn, warncount);
2501 bOK = (p_max < 0.5);
2504 if (lincsd->ncg_flex)
2506 for (i = 0; (i < lincsd->nc); i++)
2508 if (lincsd->bllen0[i] == 0 && lincsd->ddist[i] == 0)
2510 lincsd->bllen[i] = 0;
2517 /* The OpenMP parallel region of constrain_lincs for derivatives */
2518 #pragma omp parallel num_threads(lincsd->ntask)
2522 int th = gmx_omp_get_thread_num();
2524 do_lincsp(x, xprime, min_proj, pbc, lincsd, th,
2525 md->invmass, econq, bCalcDHDL,
2526 bCalcVir, th == 0 ? vir_r_m_dr : lincsd->task[th].vir_r_m_dr);
2528 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2534 /* Reduce the dH/dlambda contributions over the threads */
2539 for (th = 0; th < lincsd->ntask; th++)
2541 dhdlambda += lincsd->task[th].dhdlambda;
2543 if (econq == econqCoord)
2545 /* dhdlambda contains dH/dlambda*dt^2, correct for this */
2546 /* TODO This should probably use invdt, so that sd integrator scaling works properly */
2547 dhdlambda /= ir->delta_t*ir->delta_t;
2549 *dvdlambda += dhdlambda;
2552 if (bCalcVir && lincsd->ntask > 1)
2554 for (i = 1; i < lincsd->ntask; i++)
2556 m_add(vir_r_m_dr, lincsd->task[i].vir_r_m_dr, vir_r_m_dr);
2560 /* count assuming nit=1 */
2561 inc_nrnb(nrnb, eNR_LINCS, lincsd->nc_real);
2562 inc_nrnb(nrnb, eNR_LINCSMAT, (2+lincsd->nOrder)*lincsd->ncc);
2563 if (lincsd->ntriangle > 0)
2565 inc_nrnb(nrnb, eNR_LINCSMAT, lincsd->nOrder*lincsd->ncc_triangle);
2569 inc_nrnb(nrnb, eNR_CONSTR_V, lincsd->nc_real*2);
2573 inc_nrnb(nrnb, eNR_CONSTR_VIR, lincsd->nc_real);