2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /* This file is completely threadsafe - keep it that way! */
49 #include "gromacs/utility/smalloc.h"
53 #include "mtop_util.h"
54 #include "gmx_omp_nthreads.h"
56 #include "gromacs/fileio/gmxfio.h"
57 #include "gromacs/utility/gmxomp.h"
60 int b0; /* first constraint for this thread */
61 int b1; /* b1-1 is the last constraint for this thread */
62 int nind; /* number of indices */
63 int *ind; /* constraint index for updating atom data */
64 int nind_r; /* number of indices */
65 int *ind_r; /* constraint index for updating atom data */
66 int ind_nalloc; /* allocation size of ind and ind_r */
67 tensor vir_r_m_dr; /* temporary variable for virial calculation */
70 typedef struct gmx_lincsdata {
71 int ncg; /* the global number of constraints */
72 int ncg_flex; /* the global number of flexible constraints */
73 int ncg_triangle; /* the global number of constraints in triangles */
74 int nIter; /* the number of iterations */
75 int nOrder; /* the order of the matrix expansion */
76 int nc; /* the number of constraints */
77 int nc_alloc; /* the number we allocated memory for */
78 int ncc; /* the number of constraint connections */
79 int ncc_alloc; /* the number we allocated memory for */
80 real matlam; /* the FE lambda value used for filling blc and blmf */
81 real *bllen0; /* the reference distance in topology A */
82 real *ddist; /* the reference distance in top B - the r.d. in top A */
83 int *bla; /* the atom pairs involved in the constraints */
84 real *blc; /* 1/sqrt(invmass1 + invmass2) */
85 real *blc1; /* as blc, but with all masses 1 */
86 int *blnr; /* index into blbnb and blmf */
87 int *blbnb; /* list of constraint connections */
88 int ntriangle; /* the local number of constraints in triangles */
89 int *triangle; /* the list of triangle constraints */
90 int *tri_bits; /* the bits tell if the matrix element should be used */
91 int ncc_triangle; /* the number of constraint connections in triangles */
92 gmx_bool bCommIter; /* communicate before each LINCS interation */
93 real *blmf; /* matrix of mass factors for constraint connections */
94 real *blmf1; /* as blmf, but with all masses 1 */
95 real *bllen; /* the reference bond length */
96 int nth; /* The number of threads doing LINCS */
97 lincs_thread_t *th; /* LINCS thread division */
98 unsigned *atf; /* atom flags for thread parallelization */
99 int atf_nalloc; /* allocation size of atf */
100 /* arrays for temporary storage in the LINCS algorithm */
107 real *mlambda; /* the Lagrange multipliers * -1 */
108 /* storage for the constraint RMS relative deviation output */
112 real *lincs_rmsd_data(struct gmx_lincsdata *lincsd)
114 return lincsd->rmsd_data;
117 real lincs_rmsd(struct gmx_lincsdata *lincsd, gmx_bool bSD2)
119 if (lincsd->rmsd_data[0] > 0)
121 return sqrt(lincsd->rmsd_data[bSD2 ? 2 : 1]/lincsd->rmsd_data[0]);
129 /* Do a set of nrec LINCS matrix multiplications.
130 * This function will return with up to date thread-local
131 * constraint data, without an OpenMP barrier.
133 static void lincs_matrix_expand(const struct gmx_lincsdata *lincsd,
136 real *rhs1, real *rhs2, real *sol)
138 int nrec, rec, b, j, n, nr0, nr1;
140 int ntriangle, tb, bits;
141 const int *blnr = lincsd->blnr, *blbnb = lincsd->blbnb;
142 const int *triangle = lincsd->triangle, *tri_bits = lincsd->tri_bits;
144 ntriangle = lincsd->ntriangle;
145 nrec = lincsd->nOrder;
147 for (rec = 0; rec < nrec; rec++)
150 for (b = b0; b < b1; b++)
153 for (n = blnr[b]; n < blnr[b+1]; n++)
156 mvb = mvb + blcc[n]*rhs1[j];
159 sol[b] = sol[b] + mvb;
164 } /* nrec*(ncons+2*nrtot) flops */
168 /* Perform an extra nrec recursions for only the constraints
169 * involved in rigid triangles.
170 * In this way their accuracy should come close to those of the other
171 * constraints, since traingles of constraints can produce eigenvalues
172 * around 0.7, while the effective eigenvalue for bond constraints
173 * is around 0.4 (and 0.7*0.7=0.5).
175 /* We need to copy the temporary array, since only the elements
176 * for constraints involved in triangles are updated and then
177 * the pointers are swapped. This saving copying the whole arrary.
178 * We need barrier as other threads might still be reading from rhs2.
181 for (b = b0; b < b1; b++)
188 for (rec = 0; rec < nrec; rec++)
190 for (tb = 0; tb < ntriangle; tb++)
197 for (n = nr0; n < nr1; n++)
199 if (bits & (1<<(n-nr0)))
202 mvb = mvb + blcc[n]*rhs1[j];
206 sol[b] = sol[b] + mvb;
212 } /* flops count is missing here */
214 /* We need a barrier here as the calling routine will continue
215 * to operate on the thread-local constraints without barrier.
221 static void lincs_update_atoms_noind(int ncons, const int *bla,
223 const real *fac, rvec *r,
228 real mvb, im1, im2, tmp0, tmp1, tmp2;
232 for (b = 0; b < ncons; b++)
248 } /* 16 ncons flops */
252 for (b = 0; b < ncons; b++)
270 static void lincs_update_atoms_ind(int ncons, const int *ind, const int *bla,
272 const real *fac, rvec *r,
277 real mvb, im1, im2, tmp0, tmp1, tmp2;
281 for (bi = 0; bi < ncons; bi++)
298 } /* 16 ncons flops */
302 for (bi = 0; bi < ncons; bi++)
317 } /* 16 ncons flops */
321 static void lincs_update_atoms(struct gmx_lincsdata *li, int th,
323 const real *fac, rvec *r,
329 /* Single thread, we simply update for all constraints */
330 lincs_update_atoms_noind(li->nc, li->bla, prefac, fac, r, invmass, x);
334 /* Update the atom vector components for our thread local
335 * constraints that only access our local atom range.
336 * This can be done without a barrier.
338 lincs_update_atoms_ind(li->th[th].nind, li->th[th].ind,
339 li->bla, prefac, fac, r, invmass, x);
341 if (li->th[li->nth].nind > 0)
343 /* Update the constraints that operate on atoms
344 * in multiple thread atom blocks on the master thread.
349 lincs_update_atoms_ind(li->th[li->nth].nind,
351 li->bla, prefac, fac, r, invmass, x);
357 /* LINCS projection, works on derivatives of the coordinates */
358 static void do_lincsp(rvec *x, rvec *f, rvec *fp, t_pbc *pbc,
359 struct gmx_lincsdata *lincsd, int th,
361 int econq, real *dvdlambda,
362 gmx_bool bCalcVir, tensor rmdf)
364 int b0, b1, b, i, j, k, n;
365 real tmp0, tmp1, tmp2, im1, im2, mvb, rlen, len, wfac, lam;
367 int *bla, *blnr, *blbnb;
369 real *blc, *blmf, *blcc, *rhs1, *rhs2, *sol;
371 b0 = lincsd->th[th].b0;
372 b1 = lincsd->th[th].b1;
377 blbnb = lincsd->blbnb;
378 if (econq != econqForce)
380 /* Use mass-weighted parameters */
386 /* Use non mass-weighted parameters */
388 blmf = lincsd->blmf1;
390 blcc = lincsd->tmpncc;
395 /* Compute normalized i-j vectors */
398 for (b = b0; b < b1; b++)
400 pbc_dx_aiuc(pbc, x[bla[2*b]], x[bla[2*b+1]], dx);
406 for (b = b0; b < b1; b++)
408 rvec_sub(x[bla[2*b]], x[bla[2*b+1]], dx);
410 } /* 16 ncons flops */
414 for (b = b0; b < b1; b++)
421 for (n = blnr[b]; n < blnr[b+1]; n++)
424 blcc[n] = blmf[n]*(tmp0*r[k][0] + tmp1*r[k][1] + tmp2*r[k][2]);
426 mvb = blc[b]*(tmp0*(f[i][0] - f[j][0]) +
427 tmp1*(f[i][1] - f[j][1]) +
428 tmp2*(f[i][2] - f[j][2]));
433 /* Together: 23*ncons + 6*nrtot flops */
435 lincs_matrix_expand(lincsd, b0, b1, blcc, rhs1, rhs2, sol);
436 /* nrec*(ncons+2*nrtot) flops */
438 if (econq == econqDeriv_FlexCon)
440 /* We only want to constraint the flexible constraints,
441 * so we mask out the normal ones by setting sol to 0.
443 for (b = b0; b < b1; b++)
445 if (!(lincsd->bllen0[b] == 0 && lincsd->ddist[b] == 0))
452 /* We multiply sol by blc, so we can use lincs_update_atoms for OpenMP */
453 for (b = b0; b < b1; b++)
458 /* When constraining forces, we should not use mass weighting,
459 * so we pass invmass=NULL, which results in the use of 1 for all atoms.
461 lincs_update_atoms(lincsd, th, 1.0, sol, r,
462 (econq != econqForce) ? invmass : NULL, fp);
464 if (dvdlambda != NULL)
467 for (b = b0; b < b1; b++)
469 *dvdlambda -= sol[b]*lincsd->ddist[b];
476 /* Constraint virial,
477 * determines sum r_bond x delta f,
478 * where delta f is the constraint correction
479 * of the quantity that is being constrained.
481 for (b = b0; b < b1; b++)
483 mvb = lincsd->bllen[b]*sol[b];
484 for (i = 0; i < DIM; i++)
487 for (j = 0; j < DIM; j++)
489 rmdf[i][j] += tmp1*r[b][j];
492 } /* 23 ncons flops */
496 static void do_lincs(rvec *x, rvec *xp, matrix box, t_pbc *pbc,
497 struct gmx_lincsdata *lincsd, int th,
500 gmx_bool bCalcLambda,
501 real wangle, int *warn,
503 gmx_bool bCalcVir, tensor vir_r_m_dr)
505 int b0, b1, b, i, j, k, n, iter;
506 real tmp0, tmp1, tmp2, im1, im2, mvb, rlen, len, len2, dlen2, wfac;
508 int *bla, *blnr, *blbnb;
510 real *blc, *blmf, *bllen, *blcc, *rhs1, *rhs2, *sol, *blc_sol, *mlambda;
513 b0 = lincsd->th[th].b0;
514 b1 = lincsd->th[th].b1;
519 blbnb = lincsd->blbnb;
522 bllen = lincsd->bllen;
523 blcc = lincsd->tmpncc;
527 blc_sol = lincsd->tmp4;
528 mlambda = lincsd->mlambda;
530 if (DOMAINDECOMP(cr) && cr->dd->constraints)
532 nlocat = dd_constraints_nlocalatoms(cr->dd);
541 /* Compute normalized i-j vectors */
542 for (b = b0; b < b1; b++)
544 pbc_dx_aiuc(pbc, x[bla[2*b]], x[bla[2*b+1]], dx);
548 for (b = b0; b < b1; b++)
550 for (n = blnr[b]; n < blnr[b+1]; n++)
552 blcc[n] = blmf[n]*iprod(r[b], r[blbnb[n]]);
554 pbc_dx_aiuc(pbc, xp[bla[2*b]], xp[bla[2*b+1]], dx);
555 mvb = blc[b]*(iprod(r[b], dx) - bllen[b]);
562 /* Compute normalized i-j vectors */
563 for (b = b0; b < b1; b++)
567 tmp0 = x[i][0] - x[j][0];
568 tmp1 = x[i][1] - x[j][1];
569 tmp2 = x[i][2] - x[j][2];
570 rlen = gmx_invsqrt(tmp0*tmp0+tmp1*tmp1+tmp2*tmp2);
574 } /* 16 ncons flops */
577 for (b = b0; b < b1; b++)
585 for (n = blnr[b]; n < blnr[b+1]; n++)
588 blcc[n] = blmf[n]*(tmp0*r[k][0] + tmp1*r[k][1] + tmp2*r[k][2]);
590 mvb = blc[b]*(tmp0*(xp[i][0] - xp[j][0]) +
591 tmp1*(xp[i][1] - xp[j][1]) +
592 tmp2*(xp[i][2] - xp[j][2]) - len);
597 /* Together: 26*ncons + 6*nrtot flops */
600 lincs_matrix_expand(lincsd, b0, b1, blcc, rhs1, rhs2, sol);
601 /* nrec*(ncons+2*nrtot) flops */
603 for (b = b0; b < b1; b++)
605 mlambda[b] = blc[b]*sol[b];
608 /* Update the coordinates */
609 lincs_update_atoms(lincsd, th, 1.0, mlambda, r, invmass, xp);
612 ******** Correction for centripetal effects ********
615 wfac = cos(DEG2RAD*wangle);
618 for (iter = 0; iter < lincsd->nIter; iter++)
620 if ((lincsd->bCommIter && DOMAINDECOMP(cr) && cr->dd->constraints))
625 /* Communicate the corrected non-local coordinates */
626 if (DOMAINDECOMP(cr))
628 dd_move_x_constraints(cr->dd, box, xp, NULL, FALSE);
634 for (b = b0; b < b1; b++)
639 pbc_dx_aiuc(pbc, xp[bla[2*b]], xp[bla[2*b+1]], dx);
643 rvec_sub(xp[bla[2*b]], xp[bla[2*b+1]], dx);
646 dlen2 = 2*len2 - norm2(dx);
647 if (dlen2 < wfac*len2 && (nlocat == NULL || nlocat[b]))
653 mvb = blc[b]*(len - dlen2*gmx_invsqrt(dlen2));
661 } /* 20*ncons flops */
663 lincs_matrix_expand(lincsd, b0, b1, blcc, rhs1, rhs2, sol);
664 /* nrec*(ncons+2*nrtot) flops */
666 for (b = b0; b < b1; b++)
673 /* Update the coordinates */
674 lincs_update_atoms(lincsd, th, 1.0, blc_sol, r, invmass, xp);
676 /* nit*ncons*(37+9*nrec) flops */
680 /* Update the velocities */
681 lincs_update_atoms(lincsd, th, invdt, mlambda, r, invmass, v);
685 if (nlocat != NULL && bCalcLambda)
687 /* In lincs_update_atoms thread might cross-read mlambda */
690 /* Only account for local atoms */
691 for (b = b0; b < b1; b++)
693 mlambda[b] *= 0.5*nlocat[b];
699 /* Constraint virial */
700 for (b = b0; b < b1; b++)
702 tmp0 = -bllen[b]*mlambda[b];
703 for (i = 0; i < DIM; i++)
706 for (j = 0; j < DIM; j++)
708 vir_r_m_dr[i][j] -= tmp1*r[b][j];
711 } /* 22 ncons flops */
715 * 26*ncons + 6*nrtot + nrec*(ncons+2*nrtot)
716 * + nit * (20*ncons + nrec*(ncons+2*nrtot) + 17 ncons)
718 * (26+nrec)*ncons + (6+2*nrec)*nrtot
719 * + nit * ((37+nrec)*ncons + 2*nrec*nrtot)
721 * (63+nrec)*ncons + (6+4*nrec)*nrtot
725 void set_lincs_matrix(struct gmx_lincsdata *li, real *invmass, real lambda)
727 int i, a1, a2, n, k, sign, center;
729 const real invsqrt2 = 0.7071067811865475244;
731 for (i = 0; (i < li->nc); i++)
735 li->blc[i] = gmx_invsqrt(invmass[a1] + invmass[a2]);
736 li->blc1[i] = invsqrt2;
739 /* Construct the coupling coefficient matrix blmf */
741 li->ncc_triangle = 0;
742 for (i = 0; (i < li->nc); i++)
746 for (n = li->blnr[i]; (n < li->blnr[i+1]); n++)
749 if (a1 == li->bla[2*k] || a2 == li->bla[2*k+1])
757 if (a1 == li->bla[2*k] || a1 == li->bla[2*k+1])
767 li->blmf[n] = sign*invmass[center]*li->blc[i]*li->blc[k];
768 li->blmf1[n] = sign*0.5;
769 if (li->ncg_triangle > 0)
771 /* Look for constraint triangles */
772 for (nk = li->blnr[k]; (nk < li->blnr[k+1]); nk++)
775 if (kk != i && kk != k &&
776 (li->bla[2*kk] == end || li->bla[2*kk+1] == end))
778 if (li->ntriangle == 0 ||
779 li->triangle[li->ntriangle-1] < i)
781 /* Add this constraint to the triangle list */
782 li->triangle[li->ntriangle] = i;
783 li->tri_bits[li->ntriangle] = 0;
785 if (li->blnr[i+1] - li->blnr[i] > sizeof(li->tri_bits[0])*8 - 1)
787 gmx_fatal(FARGS, "A constraint is connected to %d constraints, this is more than the %d allowed for constraints participating in triangles",
788 li->blnr[i+1] - li->blnr[i],
789 sizeof(li->tri_bits[0])*8-1);
792 li->tri_bits[li->ntriangle-1] |= (1<<(n-li->blnr[i]));
802 fprintf(debug, "Of the %d constraints %d participate in triangles\n",
803 li->nc, li->ntriangle);
804 fprintf(debug, "There are %d couplings of which %d in triangles\n",
805 li->ncc, li->ncc_triangle);
809 * so we know with which lambda value the masses have been set.
814 static int count_triangle_constraints(t_ilist *ilist, t_blocka *at2con)
817 int c0, a00, a01, n1, c1, a10, a11, ac1, n2, c2, a20, a21;
820 t_iatom *ia1, *ia2, *iap;
822 ncon1 = ilist[F_CONSTR].nr/3;
823 ncon_tot = ncon1 + ilist[F_CONSTRNC].nr/3;
825 ia1 = ilist[F_CONSTR].iatoms;
826 ia2 = ilist[F_CONSTRNC].iatoms;
829 for (c0 = 0; c0 < ncon_tot; c0++)
832 iap = constr_iatomptr(ncon1, ia1, ia2, c0);
835 for (n1 = at2con->index[a01]; n1 < at2con->index[a01+1]; n1++)
840 iap = constr_iatomptr(ncon1, ia1, ia2, c1);
851 for (n2 = at2con->index[ac1]; n2 < at2con->index[ac1+1]; n2++)
854 if (c2 != c0 && c2 != c1)
856 iap = constr_iatomptr(ncon1, ia1, ia2, c2);
859 if (a20 == a00 || a21 == a00)
873 return ncon_triangle;
876 static gmx_bool more_than_two_sequential_constraints(const t_ilist *ilist,
877 const t_blocka *at2con)
879 t_iatom *ia1, *ia2, *iap;
880 int ncon1, ncon_tot, c;
882 gmx_bool bMoreThanTwoSequentialConstraints;
884 ncon1 = ilist[F_CONSTR].nr/3;
885 ncon_tot = ncon1 + ilist[F_CONSTRNC].nr/3;
887 ia1 = ilist[F_CONSTR].iatoms;
888 ia2 = ilist[F_CONSTRNC].iatoms;
890 bMoreThanTwoSequentialConstraints = FALSE;
891 for (c = 0; c < ncon_tot && !bMoreThanTwoSequentialConstraints; c++)
893 iap = constr_iatomptr(ncon1, ia1, ia2, c);
896 /* Check if this constraint has constraints connected at both atoms */
897 if (at2con->index[a1+1] - at2con->index[a1] > 1 &&
898 at2con->index[a2+1] - at2con->index[a2] > 1)
900 bMoreThanTwoSequentialConstraints = TRUE;
904 return bMoreThanTwoSequentialConstraints;
907 static int int_comp(const void *a, const void *b)
909 return (*(int *)a) - (*(int *)b);
912 gmx_lincsdata_t init_lincs(FILE *fplog, gmx_mtop_t *mtop,
913 int nflexcon_global, t_blocka *at2con,
914 gmx_bool bPLINCS, int nIter, int nProjOrder)
916 struct gmx_lincsdata *li;
922 fprintf(fplog, "\nInitializing%s LINear Constraint Solver\n",
923 bPLINCS ? " Parallel" : "");
929 gmx_mtop_ftype_count(mtop, F_CONSTR) +
930 gmx_mtop_ftype_count(mtop, F_CONSTRNC);
931 li->ncg_flex = nflexcon_global;
934 li->nOrder = nProjOrder;
936 li->ncg_triangle = 0;
937 li->bCommIter = FALSE;
938 for (mb = 0; mb < mtop->nmolblock; mb++)
940 molt = &mtop->moltype[mtop->molblock[mb].type];
942 mtop->molblock[mb].nmol*
943 count_triangle_constraints(molt->ilist,
944 &at2con[mtop->molblock[mb].type]);
945 if (bPLINCS && li->bCommIter == FALSE)
947 /* Check if we need to communicate not only before LINCS,
948 * but also before each iteration.
949 * The check for only two sequential constraints is only
950 * useful for the common case of H-bond only constraints.
951 * With more effort we could also make it useful for small
952 * molecules with nr. sequential constraints <= nOrder-1.
954 li->bCommIter = (li->nOrder < 1 || more_than_two_sequential_constraints(molt->ilist, &at2con[mtop->molblock[mb].type]));
957 if (debug && bPLINCS)
959 fprintf(debug, "PLINCS communication before each iteration: %d\n",
963 /* LINCS can run on any number of threads.
964 * Currently the number is fixed for the whole simulation,
965 * but it could be set in set_lincs().
967 li->nth = gmx_omp_nthreads_get(emntLINCS);
974 /* Allocate an extra elements for "thread-overlap" constraints */
975 snew(li->th, li->nth+1);
979 fprintf(debug, "LINCS: using %d threads\n", li->nth);
982 if (bPLINCS || li->ncg_triangle > 0)
984 please_cite(fplog, "Hess2008a");
988 please_cite(fplog, "Hess97a");
993 fprintf(fplog, "The number of constraints is %d\n", li->ncg);
996 fprintf(fplog, "There are inter charge-group constraints,\n"
997 "will communicate selected coordinates each lincs iteration\n");
999 if (li->ncg_triangle > 0)
1002 "%d constraints are involved in constraint triangles,\n"
1003 "will apply an additional matrix expansion of order %d for couplings\n"
1004 "between constraints inside triangles\n",
1005 li->ncg_triangle, li->nOrder);
1012 /* Sets up the work division over the threads */
1013 static void lincs_thread_setup(struct gmx_lincsdata *li, int natoms)
1015 lincs_thread_t *li_m;
1020 if (natoms > li->atf_nalloc)
1022 li->atf_nalloc = over_alloc_large(natoms);
1023 srenew(li->atf, li->atf_nalloc);
1027 /* Clear the atom flags */
1028 for (a = 0; a < natoms; a++)
1033 for (th = 0; th < li->nth; th++)
1035 lincs_thread_t *li_th;
1038 li_th = &li->th[th];
1040 /* The constraints are divided equally over the threads */
1041 li_th->b0 = (li->nc* th )/li->nth;
1042 li_th->b1 = (li->nc*(th+1))/li->nth;
1044 if (th < sizeof(*atf)*8)
1046 /* For each atom set a flag for constraints from each */
1047 for (b = li_th->b0; b < li_th->b1; b++)
1049 atf[li->bla[b*2] ] |= (1U<<th);
1050 atf[li->bla[b*2+1]] |= (1U<<th);
1055 #pragma omp parallel for num_threads(li->nth) schedule(static)
1056 for (th = 0; th < li->nth; th++)
1058 lincs_thread_t *li_th;
1062 li_th = &li->th[th];
1064 if (li_th->b1 - li_th->b0 > li_th->ind_nalloc)
1066 li_th->ind_nalloc = over_alloc_large(li_th->b1-li_th->b0);
1067 srenew(li_th->ind, li_th->ind_nalloc);
1068 srenew(li_th->ind_r, li_th->ind_nalloc);
1071 if (th < sizeof(*atf)*8)
1073 mask = (1U<<th) - 1U;
1077 for (b = li_th->b0; b < li_th->b1; b++)
1079 /* We let the constraint with the lowest thread index
1080 * operate on atoms with constraints from multiple threads.
1082 if (((atf[li->bla[b*2]] & mask) == 0) &&
1083 ((atf[li->bla[b*2+1]] & mask) == 0))
1085 /* Add the constraint to the local atom update index */
1086 li_th->ind[li_th->nind++] = b;
1090 /* Add the constraint to the rest block */
1091 li_th->ind_r[li_th->nind_r++] = b;
1097 /* We are out of bits, assign all constraints to rest */
1098 for (b = li_th->b0; b < li_th->b1; b++)
1100 li_th->ind_r[li_th->nind_r++] = b;
1105 /* We need to copy all constraints which have not be assigned
1106 * to a thread to a separate list which will be handled by one thread.
1108 li_m = &li->th[li->nth];
1111 for (th = 0; th < li->nth; th++)
1113 lincs_thread_t *li_th;
1116 li_th = &li->th[th];
1118 if (li_m->nind + li_th->nind_r > li_m->ind_nalloc)
1120 li_m->ind_nalloc = over_alloc_large(li_m->nind+li_th->nind_r);
1121 srenew(li_m->ind, li_m->ind_nalloc);
1124 for (b = 0; b < li_th->nind_r; b++)
1126 li_m->ind[li_m->nind++] = li_th->ind_r[b];
1131 fprintf(debug, "LINCS thread %d: %d constraints\n",
1138 fprintf(debug, "LINCS thread r: %d constraints\n",
1144 void set_lincs(t_idef *idef, t_mdatoms *md,
1145 gmx_bool bDynamics, t_commrec *cr,
1146 struct gmx_lincsdata *li)
1148 int start, natoms, nflexcon;
1151 int i, k, ncc_alloc, ni, con, nconnect, concon;
1153 real lenA = 0, lenB;
1158 /* Zero the thread index ranges.
1159 * Otherwise without local constraints we could return with old ranges.
1161 for (i = 0; i < li->nth; i++)
1169 li->th[li->nth].nind = 0;
1172 /* This is the local topology, so there are only F_CONSTR constraints */
1173 if (idef->il[F_CONSTR].nr == 0)
1175 /* There are no constraints,
1176 * we do not need to fill any data structures.
1183 fprintf(debug, "Building the LINCS connectivity\n");
1186 if (DOMAINDECOMP(cr))
1188 if (cr->dd->constraints)
1190 dd_get_constraint_range(cr->dd, &start, &natoms);
1194 natoms = cr->dd->nat_home;
1201 natoms = md->homenr;
1203 at2con = make_at2con(start, natoms, idef->il, idef->iparams, bDynamics,
1207 if (idef->il[F_CONSTR].nr/3 > li->nc_alloc || li->nc_alloc == 0)
1209 li->nc_alloc = over_alloc_dd(idef->il[F_CONSTR].nr/3);
1210 srenew(li->bllen0, li->nc_alloc);
1211 srenew(li->ddist, li->nc_alloc);
1212 srenew(li->bla, 2*li->nc_alloc);
1213 srenew(li->blc, li->nc_alloc);
1214 srenew(li->blc1, li->nc_alloc);
1215 srenew(li->blnr, li->nc_alloc+1);
1216 srenew(li->bllen, li->nc_alloc);
1217 srenew(li->tmpv, li->nc_alloc);
1218 srenew(li->tmp1, li->nc_alloc);
1219 srenew(li->tmp2, li->nc_alloc);
1220 srenew(li->tmp3, li->nc_alloc);
1221 srenew(li->tmp4, li->nc_alloc);
1222 srenew(li->mlambda, li->nc_alloc);
1223 if (li->ncg_triangle > 0)
1225 /* This is allocating too much, but it is difficult to improve */
1226 srenew(li->triangle, li->nc_alloc);
1227 srenew(li->tri_bits, li->nc_alloc);
1231 iatom = idef->il[F_CONSTR].iatoms;
1233 ncc_alloc = li->ncc_alloc;
1236 ni = idef->il[F_CONSTR].nr/3;
1240 li->blnr[con] = nconnect;
1241 for (i = 0; i < ni; i++)
1247 lenA = idef->iparams[type].constr.dA;
1248 lenB = idef->iparams[type].constr.dB;
1249 /* Skip the flexible constraints when not doing dynamics */
1250 if (bDynamics || lenA != 0 || lenB != 0)
1252 li->bllen0[con] = lenA;
1253 li->ddist[con] = lenB - lenA;
1254 /* Set the length to the topology A length */
1255 li->bllen[con] = li->bllen0[con];
1256 li->bla[2*con] = a1;
1257 li->bla[2*con+1] = a2;
1258 /* Construct the constraint connection matrix blbnb */
1259 for (k = at2con.index[a1-start]; k < at2con.index[a1-start+1]; k++)
1261 concon = at2con.a[k];
1264 if (nconnect >= ncc_alloc)
1266 ncc_alloc = over_alloc_small(nconnect+1);
1267 srenew(li->blbnb, ncc_alloc);
1269 li->blbnb[nconnect++] = concon;
1272 for (k = at2con.index[a2-start]; k < at2con.index[a2-start+1]; k++)
1274 concon = at2con.a[k];
1277 if (nconnect+1 > ncc_alloc)
1279 ncc_alloc = over_alloc_small(nconnect+1);
1280 srenew(li->blbnb, ncc_alloc);
1282 li->blbnb[nconnect++] = concon;
1285 li->blnr[con+1] = nconnect;
1289 /* Order the blbnb matrix to optimize memory access */
1290 qsort(&(li->blbnb[li->blnr[con]]), li->blnr[con+1]-li->blnr[con],
1291 sizeof(li->blbnb[0]), int_comp);
1293 /* Increase the constraint count */
1298 done_blocka(&at2con);
1300 /* This is the real number of constraints,
1301 * without dynamics the flexible constraints are not present.
1305 li->ncc = li->blnr[con];
1308 /* Since the matrix is static, we can free some memory */
1309 ncc_alloc = li->ncc;
1310 srenew(li->blbnb, ncc_alloc);
1313 if (ncc_alloc > li->ncc_alloc)
1315 li->ncc_alloc = ncc_alloc;
1316 srenew(li->blmf, li->ncc_alloc);
1317 srenew(li->blmf1, li->ncc_alloc);
1318 srenew(li->tmpncc, li->ncc_alloc);
1323 fprintf(debug, "Number of constraints is %d, couplings %d\n",
1330 li->th[0].b1 = li->nc;
1334 lincs_thread_setup(li, md->nr);
1337 set_lincs_matrix(li, md->invmass, md->lambda);
1340 static void lincs_warning(FILE *fplog,
1341 gmx_domdec_t *dd, rvec *x, rvec *xprime, t_pbc *pbc,
1342 int ncons, int *bla, real *bllen, real wangle,
1343 int maxwarn, int *warncount)
1347 real wfac, d0, d1, cosine;
1350 wfac = cos(DEG2RAD*wangle);
1352 sprintf(buf, "bonds that rotated more than %g degrees:\n"
1353 " atom 1 atom 2 angle previous, current, constraint length\n",
1355 fprintf(stderr, "%s", buf);
1358 fprintf(fplog, "%s", buf);
1361 for (b = 0; b < ncons; b++)
1367 pbc_dx_aiuc(pbc, x[i], x[j], v0);
1368 pbc_dx_aiuc(pbc, xprime[i], xprime[j], v1);
1372 rvec_sub(x[i], x[j], v0);
1373 rvec_sub(xprime[i], xprime[j], v1);
1377 cosine = iprod(v0, v1)/(d0*d1);
1380 sprintf(buf, " %6d %6d %5.1f %8.4f %8.4f %8.4f\n",
1381 ddglatnr(dd, i), ddglatnr(dd, j),
1382 RAD2DEG*acos(cosine), d0, d1, bllen[b]);
1383 fprintf(stderr, "%s", buf);
1386 fprintf(fplog, "%s", buf);
1388 if (!gmx_isfinite(d1))
1390 gmx_fatal(FARGS, "Bond length not finite.");
1396 if (*warncount > maxwarn)
1398 too_many_constraint_warnings(econtLINCS, *warncount);
1402 static void cconerr(gmx_domdec_t *dd,
1403 int ncons, int *bla, real *bllen, rvec *x, t_pbc *pbc,
1404 real *ncons_loc, real *ssd, real *max, int *imax)
1406 real len, d, ma, ssd2, r2;
1407 int *nlocat, count, b, im;
1410 if (dd && dd->constraints)
1412 nlocat = dd_constraints_nlocalatoms(dd);
1423 for (b = 0; b < ncons; b++)
1427 pbc_dx_aiuc(pbc, x[bla[2*b]], x[bla[2*b+1]], dx);
1431 rvec_sub(x[bla[2*b]], x[bla[2*b+1]], dx);
1434 len = r2*gmx_invsqrt(r2);
1435 d = fabs(len/bllen[b]-1);
1436 if (d > ma && (nlocat == NULL || nlocat[b]))
1448 ssd2 += nlocat[b]*d*d;
1453 *ncons_loc = (nlocat ? 0.5 : 1)*count;
1454 *ssd = (nlocat ? 0.5 : 1)*ssd2;
1459 gmx_bool constrain_lincs(FILE *fplog, gmx_bool bLog, gmx_bool bEner,
1462 struct gmx_lincsdata *lincsd, t_mdatoms *md,
1464 rvec *x, rvec *xprime, rvec *min_proj,
1465 matrix box, t_pbc *pbc,
1466 real lambda, real *dvdlambda,
1467 real invdt, rvec *v,
1468 gmx_bool bCalcVir, tensor vir_r_m_dr,
1471 int maxwarn, int *warncount)
1473 char buf[STRLEN], buf2[22], buf3[STRLEN];
1474 int i, warn, p_imax, error;
1475 real ncons_loc, p_ssd, p_max = 0;
1481 if (lincsd->nc == 0 && cr->dd == NULL)
1485 lincsd->rmsd_data[0] = 0;
1486 if (ir->eI == eiSD2 && v == NULL)
1494 lincsd->rmsd_data[i] = 0;
1500 if (econq == econqCoord)
1502 if (ir->efep != efepNO)
1504 if (md->nMassPerturbed && lincsd->matlam != md->lambda)
1506 set_lincs_matrix(lincsd, md->invmass, md->lambda);
1509 for (i = 0; i < lincsd->nc; i++)
1511 lincsd->bllen[i] = lincsd->bllen0[i] + lambda*lincsd->ddist[i];
1515 if (lincsd->ncg_flex)
1517 /* Set the flexible constraint lengths to the old lengths */
1520 for (i = 0; i < lincsd->nc; i++)
1522 if (lincsd->bllen[i] == 0)
1524 pbc_dx_aiuc(pbc, x[lincsd->bla[2*i]], x[lincsd->bla[2*i+1]], dx);
1525 lincsd->bllen[i] = norm(dx);
1531 for (i = 0; i < lincsd->nc; i++)
1533 if (lincsd->bllen[i] == 0)
1536 sqrt(distance2(x[lincsd->bla[2*i]],
1537 x[lincsd->bla[2*i+1]]));
1545 cconerr(cr->dd, lincsd->nc, lincsd->bla, lincsd->bllen, xprime, pbc,
1546 &ncons_loc, &p_ssd, &p_max, &p_imax);
1549 /* This warn var can be updated by multiple threads
1550 * at the same time. But as we only need to detect
1551 * if a warning occured or not, this is not an issue.
1555 /* The OpenMP parallel region of constrain_lincs for coords */
1556 #pragma omp parallel num_threads(lincsd->nth)
1558 int th = gmx_omp_get_thread_num();
1560 clear_mat(lincsd->th[th].vir_r_m_dr);
1562 do_lincs(x, xprime, box, pbc, lincsd, th,
1564 bCalcVir || (ir->efep != efepNO),
1565 ir->LincsWarnAngle, &warn,
1567 th == 0 ? vir_r_m_dr : lincsd->th[th].vir_r_m_dr);
1570 if (ir->efep != efepNO)
1572 real dt_2, dvdl = 0;
1574 dt_2 = 1.0/(ir->delta_t*ir->delta_t);
1575 for (i = 0; (i < lincsd->nc); i++)
1577 dvdl -= lincsd->mlambda[i]*dt_2*lincsd->ddist[i];
1582 if (bLog && fplog && lincsd->nc > 0)
1584 fprintf(fplog, " Rel. Constraint Deviation: RMS MAX between atoms\n");
1585 fprintf(fplog, " Before LINCS %.6f %.6f %6d %6d\n",
1586 sqrt(p_ssd/ncons_loc), p_max,
1587 ddglatnr(cr->dd, lincsd->bla[2*p_imax]),
1588 ddglatnr(cr->dd, lincsd->bla[2*p_imax+1]));
1592 cconerr(cr->dd, lincsd->nc, lincsd->bla, lincsd->bllen, xprime, pbc,
1593 &ncons_loc, &p_ssd, &p_max, &p_imax);
1594 /* Check if we are doing the second part of SD */
1595 if (ir->eI == eiSD2 && v == NULL)
1603 lincsd->rmsd_data[0] = ncons_loc;
1604 lincsd->rmsd_data[i] = p_ssd;
1608 lincsd->rmsd_data[0] = 0;
1609 lincsd->rmsd_data[1] = 0;
1610 lincsd->rmsd_data[2] = 0;
1612 if (bLog && fplog && lincsd->nc > 0)
1615 " After LINCS %.6f %.6f %6d %6d\n\n",
1616 sqrt(p_ssd/ncons_loc), p_max,
1617 ddglatnr(cr->dd, lincsd->bla[2*p_imax]),
1618 ddglatnr(cr->dd, lincsd->bla[2*p_imax+1]));
1625 cconerr(cr->dd, lincsd->nc, lincsd->bla, lincsd->bllen, xprime, pbc,
1626 &ncons_loc, &p_ssd, &p_max, &p_imax);
1629 sprintf(buf3, " in simulation %d", cr->ms->sim);
1635 sprintf(buf, "\nStep %s, time %g (ps) LINCS WARNING%s\n"
1636 "relative constraint deviation after LINCS:\n"
1637 "rms %.6f, max %.6f (between atoms %d and %d)\n",
1638 gmx_step_str(step, buf2), ir->init_t+step*ir->delta_t,
1640 sqrt(p_ssd/ncons_loc), p_max,
1641 ddglatnr(cr->dd, lincsd->bla[2*p_imax]),
1642 ddglatnr(cr->dd, lincsd->bla[2*p_imax+1]));
1645 fprintf(fplog, "%s", buf);
1647 fprintf(stderr, "%s", buf);
1648 lincs_warning(fplog, cr->dd, x, xprime, pbc,
1649 lincsd->nc, lincsd->bla, lincsd->bllen,
1650 ir->LincsWarnAngle, maxwarn, warncount);
1652 bOK = (p_max < 0.5);
1655 if (lincsd->ncg_flex)
1657 for (i = 0; (i < lincsd->nc); i++)
1659 if (lincsd->bllen0[i] == 0 && lincsd->ddist[i] == 0)
1661 lincsd->bllen[i] = 0;
1668 /* The OpenMP parallel region of constrain_lincs for derivatives */
1669 #pragma omp parallel num_threads(lincsd->nth)
1671 int th = gmx_omp_get_thread_num();
1673 do_lincsp(x, xprime, min_proj, pbc, lincsd, th,
1674 md->invmass, econq, ir->efep != efepNO ? dvdlambda : NULL,
1675 bCalcVir, th == 0 ? vir_r_m_dr : lincsd->th[th].vir_r_m_dr);
1679 if (bCalcVir && lincsd->nth > 1)
1681 for (i = 1; i < lincsd->nth; i++)
1683 m_add(vir_r_m_dr, lincsd->th[i].vir_r_m_dr, vir_r_m_dr);
1687 /* count assuming nit=1 */
1688 inc_nrnb(nrnb, eNR_LINCS, lincsd->nc);
1689 inc_nrnb(nrnb, eNR_LINCSMAT, (2+lincsd->nOrder)*lincsd->ncc);
1690 if (lincsd->ntriangle > 0)
1692 inc_nrnb(nrnb, eNR_LINCSMAT, lincsd->nOrder*lincsd->ncc_triangle);
1696 inc_nrnb(nrnb, eNR_CONSTR_V, lincsd->nc*2);
1700 inc_nrnb(nrnb, eNR_CONSTR_VIR, lincsd->nc);