2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /* This file is completely threadsafe - keep it that way! */
49 #include "gromacs/utility/smalloc.h"
53 #include "mtop_util.h"
54 #include "gmx_omp_nthreads.h"
56 #include "gromacs/fileio/gmxfio.h"
57 #include "gmx_fatal.h"
58 #include "gromacs/utility/gmxomp.h"
61 int b0; /* first constraint for this thread */
62 int b1; /* b1-1 is the last constraint for this thread */
63 int nind; /* number of indices */
64 int *ind; /* constraint index for updating atom data */
65 int nind_r; /* number of indices */
66 int *ind_r; /* constraint index for updating atom data */
67 int ind_nalloc; /* allocation size of ind and ind_r */
68 tensor vir_r_m_dr; /* temporary variable for virial calculation */
71 typedef struct gmx_lincsdata {
72 int ncg; /* the global number of constraints */
73 int ncg_flex; /* the global number of flexible constraints */
74 int ncg_triangle; /* the global number of constraints in triangles */
75 int nIter; /* the number of iterations */
76 int nOrder; /* the order of the matrix expansion */
77 int nc; /* the number of constraints */
78 int nc_alloc; /* the number we allocated memory for */
79 int ncc; /* the number of constraint connections */
80 int ncc_alloc; /* the number we allocated memory for */
81 real matlam; /* the FE lambda value used for filling blc and blmf */
82 real *bllen0; /* the reference distance in topology A */
83 real *ddist; /* the reference distance in top B - the r.d. in top A */
84 int *bla; /* the atom pairs involved in the constraints */
85 real *blc; /* 1/sqrt(invmass1 + invmass2) */
86 real *blc1; /* as blc, but with all masses 1 */
87 int *blnr; /* index into blbnb and blmf */
88 int *blbnb; /* list of constraint connections */
89 int ntriangle; /* the local number of constraints in triangles */
90 int *triangle; /* the list of triangle constraints */
91 int *tri_bits; /* the bits tell if the matrix element should be used */
92 int ncc_triangle; /* the number of constraint connections in triangles */
93 gmx_bool bCommIter; /* communicate before each LINCS interation */
94 real *blmf; /* matrix of mass factors for constraint connections */
95 real *blmf1; /* as blmf, but with all masses 1 */
96 real *bllen; /* the reference bond length */
97 int nth; /* The number of threads doing LINCS */
98 lincs_thread_t *th; /* LINCS thread division */
99 unsigned *atf; /* atom flags for thread parallelization */
100 int atf_nalloc; /* allocation size of atf */
101 /* arrays for temporary storage in the LINCS algorithm */
108 real *mlambda; /* the Lagrange multipliers * -1 */
109 /* storage for the constraint RMS relative deviation output */
113 real *lincs_rmsd_data(struct gmx_lincsdata *lincsd)
115 return lincsd->rmsd_data;
118 real lincs_rmsd(struct gmx_lincsdata *lincsd, gmx_bool bSD2)
120 if (lincsd->rmsd_data[0] > 0)
122 return sqrt(lincsd->rmsd_data[bSD2 ? 2 : 1]/lincsd->rmsd_data[0]);
130 /* Do a set of nrec LINCS matrix multiplications.
131 * This function will return with up to date thread-local
132 * constraint data, without an OpenMP barrier.
134 static void lincs_matrix_expand(const struct gmx_lincsdata *lincsd,
137 real *rhs1, real *rhs2, real *sol)
139 int nrec, rec, b, j, n, nr0, nr1;
141 int ntriangle, tb, bits;
142 const int *blnr = lincsd->blnr, *blbnb = lincsd->blbnb;
143 const int *triangle = lincsd->triangle, *tri_bits = lincsd->tri_bits;
145 ntriangle = lincsd->ntriangle;
146 nrec = lincsd->nOrder;
148 for (rec = 0; rec < nrec; rec++)
151 for (b = b0; b < b1; b++)
154 for (n = blnr[b]; n < blnr[b+1]; n++)
157 mvb = mvb + blcc[n]*rhs1[j];
160 sol[b] = sol[b] + mvb;
165 } /* nrec*(ncons+2*nrtot) flops */
169 /* Perform an extra nrec recursions for only the constraints
170 * involved in rigid triangles.
171 * In this way their accuracy should come close to those of the other
172 * constraints, since traingles of constraints can produce eigenvalues
173 * around 0.7, while the effective eigenvalue for bond constraints
174 * is around 0.4 (and 0.7*0.7=0.5).
176 /* We need to copy the temporary array, since only the elements
177 * for constraints involved in triangles are updated and then
178 * the pointers are swapped. This saving copying the whole arrary.
179 * We need barrier as other threads might still be reading from rhs2.
182 for (b = b0; b < b1; b++)
189 for (rec = 0; rec < nrec; rec++)
191 for (tb = 0; tb < ntriangle; tb++)
198 for (n = nr0; n < nr1; n++)
200 if (bits & (1<<(n-nr0)))
203 mvb = mvb + blcc[n]*rhs1[j];
207 sol[b] = sol[b] + mvb;
213 } /* flops count is missing here */
215 /* We need a barrier here as the calling routine will continue
216 * to operate on the thread-local constraints without barrier.
222 static void lincs_update_atoms_noind(int ncons, const int *bla,
224 const real *fac, rvec *r,
229 real mvb, im1, im2, tmp0, tmp1, tmp2;
233 for (b = 0; b < ncons; b++)
249 } /* 16 ncons flops */
253 for (b = 0; b < ncons; b++)
271 static void lincs_update_atoms_ind(int ncons, const int *ind, const int *bla,
273 const real *fac, rvec *r,
278 real mvb, im1, im2, tmp0, tmp1, tmp2;
282 for (bi = 0; bi < ncons; bi++)
299 } /* 16 ncons flops */
303 for (bi = 0; bi < ncons; bi++)
318 } /* 16 ncons flops */
322 static void lincs_update_atoms(struct gmx_lincsdata *li, int th,
324 const real *fac, rvec *r,
330 /* Single thread, we simply update for all constraints */
331 lincs_update_atoms_noind(li->nc, li->bla, prefac, fac, r, invmass, x);
335 /* Update the atom vector components for our thread local
336 * constraints that only access our local atom range.
337 * This can be done without a barrier.
339 lincs_update_atoms_ind(li->th[th].nind, li->th[th].ind,
340 li->bla, prefac, fac, r, invmass, x);
342 if (li->th[li->nth].nind > 0)
344 /* Update the constraints that operate on atoms
345 * in multiple thread atom blocks on the master thread.
350 lincs_update_atoms_ind(li->th[li->nth].nind,
352 li->bla, prefac, fac, r, invmass, x);
358 /* LINCS projection, works on derivatives of the coordinates */
359 static void do_lincsp(rvec *x, rvec *f, rvec *fp, t_pbc *pbc,
360 struct gmx_lincsdata *lincsd, int th,
362 int econq, real *dvdlambda,
363 gmx_bool bCalcVir, tensor rmdf)
365 int b0, b1, b, i, j, k, n;
366 real tmp0, tmp1, tmp2, im1, im2, mvb, rlen, len, wfac, lam;
368 int *bla, *blnr, *blbnb;
370 real *blc, *blmf, *blcc, *rhs1, *rhs2, *sol;
372 b0 = lincsd->th[th].b0;
373 b1 = lincsd->th[th].b1;
378 blbnb = lincsd->blbnb;
379 if (econq != econqForce)
381 /* Use mass-weighted parameters */
387 /* Use non mass-weighted parameters */
389 blmf = lincsd->blmf1;
391 blcc = lincsd->tmpncc;
396 /* Compute normalized i-j vectors */
399 for (b = b0; b < b1; b++)
401 pbc_dx_aiuc(pbc, x[bla[2*b]], x[bla[2*b+1]], dx);
407 for (b = b0; b < b1; b++)
409 rvec_sub(x[bla[2*b]], x[bla[2*b+1]], dx);
411 } /* 16 ncons flops */
415 for (b = b0; b < b1; b++)
422 for (n = blnr[b]; n < blnr[b+1]; n++)
425 blcc[n] = blmf[n]*(tmp0*r[k][0] + tmp1*r[k][1] + tmp2*r[k][2]);
427 mvb = blc[b]*(tmp0*(f[i][0] - f[j][0]) +
428 tmp1*(f[i][1] - f[j][1]) +
429 tmp2*(f[i][2] - f[j][2]));
434 /* Together: 23*ncons + 6*nrtot flops */
436 lincs_matrix_expand(lincsd, b0, b1, blcc, rhs1, rhs2, sol);
437 /* nrec*(ncons+2*nrtot) flops */
439 if (econq == econqDeriv_FlexCon)
441 /* We only want to constraint the flexible constraints,
442 * so we mask out the normal ones by setting sol to 0.
444 for (b = b0; b < b1; b++)
446 if (!(lincsd->bllen0[b] == 0 && lincsd->ddist[b] == 0))
453 /* We multiply sol by blc, so we can use lincs_update_atoms for OpenMP */
454 for (b = b0; b < b1; b++)
459 /* When constraining forces, we should not use mass weighting,
460 * so we pass invmass=NULL, which results in the use of 1 for all atoms.
462 lincs_update_atoms(lincsd, th, 1.0, sol, r,
463 (econq != econqForce) ? invmass : NULL, fp);
465 if (dvdlambda != NULL)
468 for (b = b0; b < b1; b++)
470 *dvdlambda -= sol[b]*lincsd->ddist[b];
477 /* Constraint virial,
478 * determines sum r_bond x delta f,
479 * where delta f is the constraint correction
480 * of the quantity that is being constrained.
482 for (b = b0; b < b1; b++)
484 mvb = lincsd->bllen[b]*sol[b];
485 for (i = 0; i < DIM; i++)
488 for (j = 0; j < DIM; j++)
490 rmdf[i][j] += tmp1*r[b][j];
493 } /* 23 ncons flops */
497 static void do_lincs(rvec *x, rvec *xp, matrix box, t_pbc *pbc,
498 struct gmx_lincsdata *lincsd, int th,
501 gmx_bool bCalcLambda,
502 real wangle, int *warn,
504 gmx_bool bCalcVir, tensor vir_r_m_dr)
506 int b0, b1, b, i, j, k, n, iter;
507 real tmp0, tmp1, tmp2, im1, im2, mvb, rlen, len, len2, dlen2, wfac;
509 int *bla, *blnr, *blbnb;
511 real *blc, *blmf, *bllen, *blcc, *rhs1, *rhs2, *sol, *blc_sol, *mlambda;
514 b0 = lincsd->th[th].b0;
515 b1 = lincsd->th[th].b1;
520 blbnb = lincsd->blbnb;
523 bllen = lincsd->bllen;
524 blcc = lincsd->tmpncc;
528 blc_sol = lincsd->tmp4;
529 mlambda = lincsd->mlambda;
531 if (DOMAINDECOMP(cr) && cr->dd->constraints)
533 nlocat = dd_constraints_nlocalatoms(cr->dd);
542 /* Compute normalized i-j vectors */
543 for (b = b0; b < b1; b++)
545 pbc_dx_aiuc(pbc, x[bla[2*b]], x[bla[2*b+1]], dx);
549 for (b = b0; b < b1; b++)
551 for (n = blnr[b]; n < blnr[b+1]; n++)
553 blcc[n] = blmf[n]*iprod(r[b], r[blbnb[n]]);
555 pbc_dx_aiuc(pbc, xp[bla[2*b]], xp[bla[2*b+1]], dx);
556 mvb = blc[b]*(iprod(r[b], dx) - bllen[b]);
563 /* Compute normalized i-j vectors */
564 for (b = b0; b < b1; b++)
568 tmp0 = x[i][0] - x[j][0];
569 tmp1 = x[i][1] - x[j][1];
570 tmp2 = x[i][2] - x[j][2];
571 rlen = gmx_invsqrt(tmp0*tmp0+tmp1*tmp1+tmp2*tmp2);
575 } /* 16 ncons flops */
578 for (b = b0; b < b1; b++)
586 for (n = blnr[b]; n < blnr[b+1]; n++)
589 blcc[n] = blmf[n]*(tmp0*r[k][0] + tmp1*r[k][1] + tmp2*r[k][2]);
591 mvb = blc[b]*(tmp0*(xp[i][0] - xp[j][0]) +
592 tmp1*(xp[i][1] - xp[j][1]) +
593 tmp2*(xp[i][2] - xp[j][2]) - len);
598 /* Together: 26*ncons + 6*nrtot flops */
601 lincs_matrix_expand(lincsd, b0, b1, blcc, rhs1, rhs2, sol);
602 /* nrec*(ncons+2*nrtot) flops */
604 for (b = b0; b < b1; b++)
606 mlambda[b] = blc[b]*sol[b];
609 /* Update the coordinates */
610 lincs_update_atoms(lincsd, th, 1.0, mlambda, r, invmass, xp);
613 ******** Correction for centripetal effects ********
616 wfac = cos(DEG2RAD*wangle);
619 for (iter = 0; iter < lincsd->nIter; iter++)
621 if ((lincsd->bCommIter && DOMAINDECOMP(cr) && cr->dd->constraints))
626 /* Communicate the corrected non-local coordinates */
627 if (DOMAINDECOMP(cr))
629 dd_move_x_constraints(cr->dd, box, xp, NULL, FALSE);
635 for (b = b0; b < b1; b++)
640 pbc_dx_aiuc(pbc, xp[bla[2*b]], xp[bla[2*b+1]], dx);
644 rvec_sub(xp[bla[2*b]], xp[bla[2*b+1]], dx);
647 dlen2 = 2*len2 - norm2(dx);
648 if (dlen2 < wfac*len2 && (nlocat == NULL || nlocat[b]))
654 mvb = blc[b]*(len - dlen2*gmx_invsqrt(dlen2));
662 } /* 20*ncons flops */
664 lincs_matrix_expand(lincsd, b0, b1, blcc, rhs1, rhs2, sol);
665 /* nrec*(ncons+2*nrtot) flops */
667 for (b = b0; b < b1; b++)
674 /* Update the coordinates */
675 lincs_update_atoms(lincsd, th, 1.0, blc_sol, r, invmass, xp);
677 /* nit*ncons*(37+9*nrec) flops */
681 /* Update the velocities */
682 lincs_update_atoms(lincsd, th, invdt, mlambda, r, invmass, v);
686 if (nlocat != NULL && bCalcLambda)
688 /* In lincs_update_atoms thread might cross-read mlambda */
691 /* Only account for local atoms */
692 for (b = b0; b < b1; b++)
694 mlambda[b] *= 0.5*nlocat[b];
700 /* Constraint virial */
701 for (b = b0; b < b1; b++)
703 tmp0 = -bllen[b]*mlambda[b];
704 for (i = 0; i < DIM; i++)
707 for (j = 0; j < DIM; j++)
709 vir_r_m_dr[i][j] -= tmp1*r[b][j];
712 } /* 22 ncons flops */
716 * 26*ncons + 6*nrtot + nrec*(ncons+2*nrtot)
717 * + nit * (20*ncons + nrec*(ncons+2*nrtot) + 17 ncons)
719 * (26+nrec)*ncons + (6+2*nrec)*nrtot
720 * + nit * ((37+nrec)*ncons + 2*nrec*nrtot)
722 * (63+nrec)*ncons + (6+4*nrec)*nrtot
726 void set_lincs_matrix(struct gmx_lincsdata *li, real *invmass, real lambda)
728 int i, a1, a2, n, k, sign, center;
730 const real invsqrt2 = 0.7071067811865475244;
732 for (i = 0; (i < li->nc); i++)
736 li->blc[i] = gmx_invsqrt(invmass[a1] + invmass[a2]);
737 li->blc1[i] = invsqrt2;
740 /* Construct the coupling coefficient matrix blmf */
742 li->ncc_triangle = 0;
743 for (i = 0; (i < li->nc); i++)
747 for (n = li->blnr[i]; (n < li->blnr[i+1]); n++)
750 if (a1 == li->bla[2*k] || a2 == li->bla[2*k+1])
758 if (a1 == li->bla[2*k] || a1 == li->bla[2*k+1])
768 li->blmf[n] = sign*invmass[center]*li->blc[i]*li->blc[k];
769 li->blmf1[n] = sign*0.5;
770 if (li->ncg_triangle > 0)
772 /* Look for constraint triangles */
773 for (nk = li->blnr[k]; (nk < li->blnr[k+1]); nk++)
776 if (kk != i && kk != k &&
777 (li->bla[2*kk] == end || li->bla[2*kk+1] == end))
779 if (li->ntriangle == 0 ||
780 li->triangle[li->ntriangle-1] < i)
782 /* Add this constraint to the triangle list */
783 li->triangle[li->ntriangle] = i;
784 li->tri_bits[li->ntriangle] = 0;
786 if (li->blnr[i+1] - li->blnr[i] > sizeof(li->tri_bits[0])*8 - 1)
788 gmx_fatal(FARGS, "A constraint is connected to %d constraints, this is more than the %d allowed for constraints participating in triangles",
789 li->blnr[i+1] - li->blnr[i],
790 sizeof(li->tri_bits[0])*8-1);
793 li->tri_bits[li->ntriangle-1] |= (1<<(n-li->blnr[i]));
803 fprintf(debug, "Of the %d constraints %d participate in triangles\n",
804 li->nc, li->ntriangle);
805 fprintf(debug, "There are %d couplings of which %d in triangles\n",
806 li->ncc, li->ncc_triangle);
810 * so we know with which lambda value the masses have been set.
815 static int count_triangle_constraints(t_ilist *ilist, t_blocka *at2con)
818 int c0, a00, a01, n1, c1, a10, a11, ac1, n2, c2, a20, a21;
821 t_iatom *ia1, *ia2, *iap;
823 ncon1 = ilist[F_CONSTR].nr/3;
824 ncon_tot = ncon1 + ilist[F_CONSTRNC].nr/3;
826 ia1 = ilist[F_CONSTR].iatoms;
827 ia2 = ilist[F_CONSTRNC].iatoms;
830 for (c0 = 0; c0 < ncon_tot; c0++)
833 iap = constr_iatomptr(ncon1, ia1, ia2, c0);
836 for (n1 = at2con->index[a01]; n1 < at2con->index[a01+1]; n1++)
841 iap = constr_iatomptr(ncon1, ia1, ia2, c1);
852 for (n2 = at2con->index[ac1]; n2 < at2con->index[ac1+1]; n2++)
855 if (c2 != c0 && c2 != c1)
857 iap = constr_iatomptr(ncon1, ia1, ia2, c2);
860 if (a20 == a00 || a21 == a00)
874 return ncon_triangle;
877 static gmx_bool more_than_two_sequential_constraints(const t_ilist *ilist,
878 const t_blocka *at2con)
880 t_iatom *ia1, *ia2, *iap;
881 int ncon1, ncon_tot, c;
883 gmx_bool bMoreThanTwoSequentialConstraints;
885 ncon1 = ilist[F_CONSTR].nr/3;
886 ncon_tot = ncon1 + ilist[F_CONSTRNC].nr/3;
888 ia1 = ilist[F_CONSTR].iatoms;
889 ia2 = ilist[F_CONSTRNC].iatoms;
891 bMoreThanTwoSequentialConstraints = FALSE;
892 for (c = 0; c < ncon_tot && !bMoreThanTwoSequentialConstraints; c++)
894 iap = constr_iatomptr(ncon1, ia1, ia2, c);
897 /* Check if this constraint has constraints connected at both atoms */
898 if (at2con->index[a1+1] - at2con->index[a1] > 1 &&
899 at2con->index[a2+1] - at2con->index[a2] > 1)
901 bMoreThanTwoSequentialConstraints = TRUE;
905 return bMoreThanTwoSequentialConstraints;
908 static int int_comp(const void *a, const void *b)
910 return (*(int *)a) - (*(int *)b);
913 gmx_lincsdata_t init_lincs(FILE *fplog, gmx_mtop_t *mtop,
914 int nflexcon_global, t_blocka *at2con,
915 gmx_bool bPLINCS, int nIter, int nProjOrder)
917 struct gmx_lincsdata *li;
923 fprintf(fplog, "\nInitializing%s LINear Constraint Solver\n",
924 bPLINCS ? " Parallel" : "");
930 gmx_mtop_ftype_count(mtop, F_CONSTR) +
931 gmx_mtop_ftype_count(mtop, F_CONSTRNC);
932 li->ncg_flex = nflexcon_global;
935 li->nOrder = nProjOrder;
937 li->ncg_triangle = 0;
938 li->bCommIter = FALSE;
939 for (mb = 0; mb < mtop->nmolblock; mb++)
941 molt = &mtop->moltype[mtop->molblock[mb].type];
943 mtop->molblock[mb].nmol*
944 count_triangle_constraints(molt->ilist,
945 &at2con[mtop->molblock[mb].type]);
946 if (bPLINCS && li->bCommIter == FALSE)
948 /* Check if we need to communicate not only before LINCS,
949 * but also before each iteration.
950 * The check for only two sequential constraints is only
951 * useful for the common case of H-bond only constraints.
952 * With more effort we could also make it useful for small
953 * molecules with nr. sequential constraints <= nOrder-1.
955 li->bCommIter = (li->nOrder < 1 || more_than_two_sequential_constraints(molt->ilist, &at2con[mtop->molblock[mb].type]));
958 if (debug && bPLINCS)
960 fprintf(debug, "PLINCS communication before each iteration: %d\n",
964 /* LINCS can run on any number of threads.
965 * Currently the number is fixed for the whole simulation,
966 * but it could be set in set_lincs().
968 li->nth = gmx_omp_nthreads_get(emntLINCS);
975 /* Allocate an extra elements for "thread-overlap" constraints */
976 snew(li->th, li->nth+1);
980 fprintf(debug, "LINCS: using %d threads\n", li->nth);
983 if (bPLINCS || li->ncg_triangle > 0)
985 please_cite(fplog, "Hess2008a");
989 please_cite(fplog, "Hess97a");
994 fprintf(fplog, "The number of constraints is %d\n", li->ncg);
997 fprintf(fplog, "There are inter charge-group constraints,\n"
998 "will communicate selected coordinates each lincs iteration\n");
1000 if (li->ncg_triangle > 0)
1003 "%d constraints are involved in constraint triangles,\n"
1004 "will apply an additional matrix expansion of order %d for couplings\n"
1005 "between constraints inside triangles\n",
1006 li->ncg_triangle, li->nOrder);
1013 /* Sets up the work division over the threads */
1014 static void lincs_thread_setup(struct gmx_lincsdata *li, int natoms)
1016 lincs_thread_t *li_m;
1021 if (natoms > li->atf_nalloc)
1023 li->atf_nalloc = over_alloc_large(natoms);
1024 srenew(li->atf, li->atf_nalloc);
1028 /* Clear the atom flags */
1029 for (a = 0; a < natoms; a++)
1034 for (th = 0; th < li->nth; th++)
1036 lincs_thread_t *li_th;
1039 li_th = &li->th[th];
1041 /* The constraints are divided equally over the threads */
1042 li_th->b0 = (li->nc* th )/li->nth;
1043 li_th->b1 = (li->nc*(th+1))/li->nth;
1045 if (th < sizeof(*atf)*8)
1047 /* For each atom set a flag for constraints from each */
1048 for (b = li_th->b0; b < li_th->b1; b++)
1050 atf[li->bla[b*2] ] |= (1U<<th);
1051 atf[li->bla[b*2+1]] |= (1U<<th);
1056 #pragma omp parallel for num_threads(li->nth) schedule(static)
1057 for (th = 0; th < li->nth; th++)
1059 lincs_thread_t *li_th;
1063 li_th = &li->th[th];
1065 if (li_th->b1 - li_th->b0 > li_th->ind_nalloc)
1067 li_th->ind_nalloc = over_alloc_large(li_th->b1-li_th->b0);
1068 srenew(li_th->ind, li_th->ind_nalloc);
1069 srenew(li_th->ind_r, li_th->ind_nalloc);
1072 if (th < sizeof(*atf)*8)
1074 mask = (1U<<th) - 1U;
1078 for (b = li_th->b0; b < li_th->b1; b++)
1080 /* We let the constraint with the lowest thread index
1081 * operate on atoms with constraints from multiple threads.
1083 if (((atf[li->bla[b*2]] & mask) == 0) &&
1084 ((atf[li->bla[b*2+1]] & mask) == 0))
1086 /* Add the constraint to the local atom update index */
1087 li_th->ind[li_th->nind++] = b;
1091 /* Add the constraint to the rest block */
1092 li_th->ind_r[li_th->nind_r++] = b;
1098 /* We are out of bits, assign all constraints to rest */
1099 for (b = li_th->b0; b < li_th->b1; b++)
1101 li_th->ind_r[li_th->nind_r++] = b;
1106 /* We need to copy all constraints which have not be assigned
1107 * to a thread to a separate list which will be handled by one thread.
1109 li_m = &li->th[li->nth];
1112 for (th = 0; th < li->nth; th++)
1114 lincs_thread_t *li_th;
1117 li_th = &li->th[th];
1119 if (li_m->nind + li_th->nind_r > li_m->ind_nalloc)
1121 li_m->ind_nalloc = over_alloc_large(li_m->nind+li_th->nind_r);
1122 srenew(li_m->ind, li_m->ind_nalloc);
1125 for (b = 0; b < li_th->nind_r; b++)
1127 li_m->ind[li_m->nind++] = li_th->ind_r[b];
1132 fprintf(debug, "LINCS thread %d: %d constraints\n",
1139 fprintf(debug, "LINCS thread r: %d constraints\n",
1145 void set_lincs(t_idef *idef, t_mdatoms *md,
1146 gmx_bool bDynamics, t_commrec *cr,
1147 struct gmx_lincsdata *li)
1149 int start, natoms, nflexcon;
1152 int i, k, ncc_alloc, ni, con, nconnect, concon;
1154 real lenA = 0, lenB;
1159 /* Zero the thread index ranges.
1160 * Otherwise without local constraints we could return with old ranges.
1162 for (i = 0; i < li->nth; i++)
1170 li->th[li->nth].nind = 0;
1173 /* This is the local topology, so there are only F_CONSTR constraints */
1174 if (idef->il[F_CONSTR].nr == 0)
1176 /* There are no constraints,
1177 * we do not need to fill any data structures.
1184 fprintf(debug, "Building the LINCS connectivity\n");
1187 if (DOMAINDECOMP(cr))
1189 if (cr->dd->constraints)
1191 dd_get_constraint_range(cr->dd, &start, &natoms);
1195 natoms = cr->dd->nat_home;
1202 natoms = md->homenr;
1204 at2con = make_at2con(start, natoms, idef->il, idef->iparams, bDynamics,
1208 if (idef->il[F_CONSTR].nr/3 > li->nc_alloc || li->nc_alloc == 0)
1210 li->nc_alloc = over_alloc_dd(idef->il[F_CONSTR].nr/3);
1211 srenew(li->bllen0, li->nc_alloc);
1212 srenew(li->ddist, li->nc_alloc);
1213 srenew(li->bla, 2*li->nc_alloc);
1214 srenew(li->blc, li->nc_alloc);
1215 srenew(li->blc1, li->nc_alloc);
1216 srenew(li->blnr, li->nc_alloc+1);
1217 srenew(li->bllen, li->nc_alloc);
1218 srenew(li->tmpv, li->nc_alloc);
1219 srenew(li->tmp1, li->nc_alloc);
1220 srenew(li->tmp2, li->nc_alloc);
1221 srenew(li->tmp3, li->nc_alloc);
1222 srenew(li->tmp4, li->nc_alloc);
1223 srenew(li->mlambda, li->nc_alloc);
1224 if (li->ncg_triangle > 0)
1226 /* This is allocating too much, but it is difficult to improve */
1227 srenew(li->triangle, li->nc_alloc);
1228 srenew(li->tri_bits, li->nc_alloc);
1232 iatom = idef->il[F_CONSTR].iatoms;
1234 ncc_alloc = li->ncc_alloc;
1237 ni = idef->il[F_CONSTR].nr/3;
1241 li->blnr[con] = nconnect;
1242 for (i = 0; i < ni; i++)
1248 lenA = idef->iparams[type].constr.dA;
1249 lenB = idef->iparams[type].constr.dB;
1250 /* Skip the flexible constraints when not doing dynamics */
1251 if (bDynamics || lenA != 0 || lenB != 0)
1253 li->bllen0[con] = lenA;
1254 li->ddist[con] = lenB - lenA;
1255 /* Set the length to the topology A length */
1256 li->bllen[con] = li->bllen0[con];
1257 li->bla[2*con] = a1;
1258 li->bla[2*con+1] = a2;
1259 /* Construct the constraint connection matrix blbnb */
1260 for (k = at2con.index[a1-start]; k < at2con.index[a1-start+1]; k++)
1262 concon = at2con.a[k];
1265 if (nconnect >= ncc_alloc)
1267 ncc_alloc = over_alloc_small(nconnect+1);
1268 srenew(li->blbnb, ncc_alloc);
1270 li->blbnb[nconnect++] = concon;
1273 for (k = at2con.index[a2-start]; k < at2con.index[a2-start+1]; k++)
1275 concon = at2con.a[k];
1278 if (nconnect+1 > ncc_alloc)
1280 ncc_alloc = over_alloc_small(nconnect+1);
1281 srenew(li->blbnb, ncc_alloc);
1283 li->blbnb[nconnect++] = concon;
1286 li->blnr[con+1] = nconnect;
1290 /* Order the blbnb matrix to optimize memory access */
1291 qsort(&(li->blbnb[li->blnr[con]]), li->blnr[con+1]-li->blnr[con],
1292 sizeof(li->blbnb[0]), int_comp);
1294 /* Increase the constraint count */
1299 done_blocka(&at2con);
1301 /* This is the real number of constraints,
1302 * without dynamics the flexible constraints are not present.
1306 li->ncc = li->blnr[con];
1309 /* Since the matrix is static, we can free some memory */
1310 ncc_alloc = li->ncc;
1311 srenew(li->blbnb, ncc_alloc);
1314 if (ncc_alloc > li->ncc_alloc)
1316 li->ncc_alloc = ncc_alloc;
1317 srenew(li->blmf, li->ncc_alloc);
1318 srenew(li->blmf1, li->ncc_alloc);
1319 srenew(li->tmpncc, li->ncc_alloc);
1324 fprintf(debug, "Number of constraints is %d, couplings %d\n",
1331 li->th[0].b1 = li->nc;
1335 lincs_thread_setup(li, md->nr);
1338 set_lincs_matrix(li, md->invmass, md->lambda);
1341 static void lincs_warning(FILE *fplog,
1342 gmx_domdec_t *dd, rvec *x, rvec *xprime, t_pbc *pbc,
1343 int ncons, int *bla, real *bllen, real wangle,
1344 int maxwarn, int *warncount)
1348 real wfac, d0, d1, cosine;
1351 wfac = cos(DEG2RAD*wangle);
1353 sprintf(buf, "bonds that rotated more than %g degrees:\n"
1354 " atom 1 atom 2 angle previous, current, constraint length\n",
1356 fprintf(stderr, "%s", buf);
1359 fprintf(fplog, "%s", buf);
1362 for (b = 0; b < ncons; b++)
1368 pbc_dx_aiuc(pbc, x[i], x[j], v0);
1369 pbc_dx_aiuc(pbc, xprime[i], xprime[j], v1);
1373 rvec_sub(x[i], x[j], v0);
1374 rvec_sub(xprime[i], xprime[j], v1);
1378 cosine = iprod(v0, v1)/(d0*d1);
1381 sprintf(buf, " %6d %6d %5.1f %8.4f %8.4f %8.4f\n",
1382 ddglatnr(dd, i), ddglatnr(dd, j),
1383 RAD2DEG*acos(cosine), d0, d1, bllen[b]);
1384 fprintf(stderr, "%s", buf);
1387 fprintf(fplog, "%s", buf);
1389 if (!gmx_isfinite(d1))
1391 gmx_fatal(FARGS, "Bond length not finite.");
1397 if (*warncount > maxwarn)
1399 too_many_constraint_warnings(econtLINCS, *warncount);
1403 static void cconerr(gmx_domdec_t *dd,
1404 int ncons, int *bla, real *bllen, rvec *x, t_pbc *pbc,
1405 real *ncons_loc, real *ssd, real *max, int *imax)
1407 real len, d, ma, ssd2, r2;
1408 int *nlocat, count, b, im;
1411 if (dd && dd->constraints)
1413 nlocat = dd_constraints_nlocalatoms(dd);
1424 for (b = 0; b < ncons; b++)
1428 pbc_dx_aiuc(pbc, x[bla[2*b]], x[bla[2*b+1]], dx);
1432 rvec_sub(x[bla[2*b]], x[bla[2*b+1]], dx);
1435 len = r2*gmx_invsqrt(r2);
1436 d = fabs(len/bllen[b]-1);
1437 if (d > ma && (nlocat == NULL || nlocat[b]))
1449 ssd2 += nlocat[b]*d*d;
1454 *ncons_loc = (nlocat ? 0.5 : 1)*count;
1455 *ssd = (nlocat ? 0.5 : 1)*ssd2;
1460 gmx_bool constrain_lincs(FILE *fplog, gmx_bool bLog, gmx_bool bEner,
1463 struct gmx_lincsdata *lincsd, t_mdatoms *md,
1465 rvec *x, rvec *xprime, rvec *min_proj,
1466 matrix box, t_pbc *pbc,
1467 real lambda, real *dvdlambda,
1468 real invdt, rvec *v,
1469 gmx_bool bCalcVir, tensor vir_r_m_dr,
1472 int maxwarn, int *warncount)
1474 char buf[STRLEN], buf2[22], buf3[STRLEN];
1475 int i, warn, p_imax, error;
1476 real ncons_loc, p_ssd, p_max = 0;
1482 if (lincsd->nc == 0 && cr->dd == NULL)
1486 lincsd->rmsd_data[0] = 0;
1487 if (ir->eI == eiSD2 && v == NULL)
1495 lincsd->rmsd_data[i] = 0;
1501 if (econq == econqCoord)
1503 if (ir->efep != efepNO)
1505 if (md->nMassPerturbed && lincsd->matlam != md->lambda)
1507 set_lincs_matrix(lincsd, md->invmass, md->lambda);
1510 for (i = 0; i < lincsd->nc; i++)
1512 lincsd->bllen[i] = lincsd->bllen0[i] + lambda*lincsd->ddist[i];
1516 if (lincsd->ncg_flex)
1518 /* Set the flexible constraint lengths to the old lengths */
1521 for (i = 0; i < lincsd->nc; i++)
1523 if (lincsd->bllen[i] == 0)
1525 pbc_dx_aiuc(pbc, x[lincsd->bla[2*i]], x[lincsd->bla[2*i+1]], dx);
1526 lincsd->bllen[i] = norm(dx);
1532 for (i = 0; i < lincsd->nc; i++)
1534 if (lincsd->bllen[i] == 0)
1537 sqrt(distance2(x[lincsd->bla[2*i]],
1538 x[lincsd->bla[2*i+1]]));
1546 cconerr(cr->dd, lincsd->nc, lincsd->bla, lincsd->bllen, xprime, pbc,
1547 &ncons_loc, &p_ssd, &p_max, &p_imax);
1550 /* This warn var can be updated by multiple threads
1551 * at the same time. But as we only need to detect
1552 * if a warning occured or not, this is not an issue.
1556 /* The OpenMP parallel region of constrain_lincs for coords */
1557 #pragma omp parallel num_threads(lincsd->nth)
1559 int th = gmx_omp_get_thread_num();
1561 clear_mat(lincsd->th[th].vir_r_m_dr);
1563 do_lincs(x, xprime, box, pbc, lincsd, th,
1565 bCalcVir || (ir->efep != efepNO),
1566 ir->LincsWarnAngle, &warn,
1568 th == 0 ? vir_r_m_dr : lincsd->th[th].vir_r_m_dr);
1571 if (ir->efep != efepNO)
1573 real dt_2, dvdl = 0;
1575 dt_2 = 1.0/(ir->delta_t*ir->delta_t);
1576 for (i = 0; (i < lincsd->nc); i++)
1578 dvdl -= lincsd->mlambda[i]*dt_2*lincsd->ddist[i];
1583 if (bLog && fplog && lincsd->nc > 0)
1585 fprintf(fplog, " Rel. Constraint Deviation: RMS MAX between atoms\n");
1586 fprintf(fplog, " Before LINCS %.6f %.6f %6d %6d\n",
1587 sqrt(p_ssd/ncons_loc), p_max,
1588 ddglatnr(cr->dd, lincsd->bla[2*p_imax]),
1589 ddglatnr(cr->dd, lincsd->bla[2*p_imax+1]));
1593 cconerr(cr->dd, lincsd->nc, lincsd->bla, lincsd->bllen, xprime, pbc,
1594 &ncons_loc, &p_ssd, &p_max, &p_imax);
1595 /* Check if we are doing the second part of SD */
1596 if (ir->eI == eiSD2 && v == NULL)
1604 lincsd->rmsd_data[0] = ncons_loc;
1605 lincsd->rmsd_data[i] = p_ssd;
1609 lincsd->rmsd_data[0] = 0;
1610 lincsd->rmsd_data[1] = 0;
1611 lincsd->rmsd_data[2] = 0;
1613 if (bLog && fplog && lincsd->nc > 0)
1616 " After LINCS %.6f %.6f %6d %6d\n\n",
1617 sqrt(p_ssd/ncons_loc), p_max,
1618 ddglatnr(cr->dd, lincsd->bla[2*p_imax]),
1619 ddglatnr(cr->dd, lincsd->bla[2*p_imax+1]));
1626 cconerr(cr->dd, lincsd->nc, lincsd->bla, lincsd->bllen, xprime, pbc,
1627 &ncons_loc, &p_ssd, &p_max, &p_imax);
1630 sprintf(buf3, " in simulation %d", cr->ms->sim);
1636 sprintf(buf, "\nStep %s, time %g (ps) LINCS WARNING%s\n"
1637 "relative constraint deviation after LINCS:\n"
1638 "rms %.6f, max %.6f (between atoms %d and %d)\n",
1639 gmx_step_str(step, buf2), ir->init_t+step*ir->delta_t,
1641 sqrt(p_ssd/ncons_loc), p_max,
1642 ddglatnr(cr->dd, lincsd->bla[2*p_imax]),
1643 ddglatnr(cr->dd, lincsd->bla[2*p_imax+1]));
1646 fprintf(fplog, "%s", buf);
1648 fprintf(stderr, "%s", buf);
1649 lincs_warning(fplog, cr->dd, x, xprime, pbc,
1650 lincsd->nc, lincsd->bla, lincsd->bllen,
1651 ir->LincsWarnAngle, maxwarn, warncount);
1653 bOK = (p_max < 0.5);
1656 if (lincsd->ncg_flex)
1658 for (i = 0; (i < lincsd->nc); i++)
1660 if (lincsd->bllen0[i] == 0 && lincsd->ddist[i] == 0)
1662 lincsd->bllen[i] = 0;
1669 /* The OpenMP parallel region of constrain_lincs for derivatives */
1670 #pragma omp parallel num_threads(lincsd->nth)
1672 int th = gmx_omp_get_thread_num();
1674 do_lincsp(x, xprime, min_proj, pbc, lincsd, th,
1675 md->invmass, econq, ir->efep != efepNO ? dvdlambda : NULL,
1676 bCalcVir, th == 0 ? vir_r_m_dr : lincsd->th[th].vir_r_m_dr);
1680 if (bCalcVir && lincsd->nth > 1)
1682 for (i = 1; i < lincsd->nth; i++)
1684 m_add(vir_r_m_dr, lincsd->th[i].vir_r_m_dr, vir_r_m_dr);
1688 /* count assuming nit=1 */
1689 inc_nrnb(nrnb, eNR_LINCS, lincsd->nc);
1690 inc_nrnb(nrnb, eNR_LINCSMAT, (2+lincsd->nOrder)*lincsd->ncc);
1691 if (lincsd->ntriangle > 0)
1693 inc_nrnb(nrnb, eNR_LINCSMAT, lincsd->nOrder*lincsd->ncc_triangle);
1697 inc_nrnb(nrnb, eNR_CONSTR_V, lincsd->nc*2);
1701 inc_nrnb(nrnb, eNR_CONSTR_VIR, lincsd->nc);