2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
37 /* This file is completely threadsafe - keep it that way! */
48 #include "gromacs/domdec/domdec.h"
49 #include "gromacs/fileio/gmxfio.h"
50 #include "gromacs/legacyheaders/constr.h"
51 #include "gromacs/legacyheaders/copyrite.h"
52 #include "gromacs/legacyheaders/gmx_omp_nthreads.h"
53 #include "gromacs/legacyheaders/mdrun.h"
54 #include "gromacs/legacyheaders/nrnb.h"
55 #include "gromacs/legacyheaders/types/commrec.h"
56 #include "gromacs/math/units.h"
57 #include "gromacs/math/vec.h"
58 #include "gromacs/pbcutil/pbc-simd.h"
59 #include "gromacs/pbcutil/pbc.h"
60 #include "gromacs/simd/simd.h"
61 #include "gromacs/simd/simd_math.h"
62 #include "gromacs/simd/vector_operations.h"
63 #include "gromacs/topology/block.h"
64 #include "gromacs/topology/mtop_util.h"
65 #include "gromacs/utility/bitmask.h"
66 #include "gromacs/utility/fatalerror.h"
67 #include "gromacs/utility/gmxomp.h"
68 #include "gromacs/utility/smalloc.h"
70 /* MSVC 2010 produces buggy SIMD PBC code, disable SIMD for MSVC <= 2010 */
71 #if defined GMX_SIMD_HAVE_REAL && !(defined _MSC_VER && _MSC_VER < 1700)
76 int b0; /* first constraint for this thread */
77 int b1; /* b1-1 is the last constraint for this thread */
78 int nind; /* number of indices */
79 int *ind; /* constraint index for updating atom data */
80 int nind_r; /* number of indices */
81 int *ind_r; /* constraint index for updating atom data */
82 int ind_nalloc; /* allocation size of ind and ind_r */
83 tensor vir_r_m_dr; /* temporary variable for virial calculation */
84 real dhdlambda; /* temporary variable for lambda derivative */
85 real *simd_buf; /* Aligned work array for SIMD */
88 typedef struct gmx_lincsdata {
89 int ncg; /* the global number of constraints */
90 int ncg_flex; /* the global number of flexible constraints */
91 int ncg_triangle; /* the global number of constraints in triangles */
92 int nIter; /* the number of iterations */
93 int nOrder; /* the order of the matrix expansion */
94 int nc; /* the number of constraints */
95 int nc_alloc; /* the number we allocated memory for */
96 int ncc; /* the number of constraint connections */
97 int ncc_alloc; /* the number we allocated memory for */
98 real matlam; /* the FE lambda value used for filling blc and blmf */
99 real *bllen0; /* the reference distance in topology A */
100 real *ddist; /* the reference distance in top B - the r.d. in top A */
101 int *bla; /* the atom pairs involved in the constraints */
102 real *blc; /* 1/sqrt(invmass1 + invmass2) */
103 real *blc1; /* as blc, but with all masses 1 */
104 int *blnr; /* index into blbnb and blmf */
105 int *blbnb; /* list of constraint connections */
106 int ntriangle; /* the local number of constraints in triangles */
107 int *triangle; /* the list of triangle constraints */
108 int *tri_bits; /* the bits tell if the matrix element should be used */
109 int ncc_triangle; /* the number of constraint connections in triangles */
110 gmx_bool bCommIter; /* communicate before each LINCS interation */
111 real *blmf; /* matrix of mass factors for constraint connections */
112 real *blmf1; /* as blmf, but with all masses 1 */
113 real *bllen; /* the reference bond length */
114 int nth; /* The number of threads doing LINCS */
115 lincs_thread_t *th; /* LINCS thread division */
116 gmx_bitmask_t *atf; /* atom flags for thread parallelization */
117 int atf_nalloc; /* allocation size of atf */
118 /* arrays for temporary storage in the LINCS algorithm */
125 real *mlambda; /* the Lagrange multipliers * -1 */
126 /* storage for the constraint RMS relative deviation output */
130 /* Define simd_width for memory allocation used for SIMD code */
132 static const int simd_width = GMX_SIMD_REAL_WIDTH;
134 static const int simd_width = 1;
136 /* We can't use small memory alignments on many systems, so use min 64 bytes*/
137 static const int align_bytes = std::max<int>(64, simd_width*sizeof(real));
139 real *lincs_rmsd_data(struct gmx_lincsdata *lincsd)
141 return lincsd->rmsd_data;
144 real lincs_rmsd(struct gmx_lincsdata *lincsd, gmx_bool bSD2)
146 if (lincsd->rmsd_data[0] > 0)
148 return sqrt(lincsd->rmsd_data[bSD2 ? 2 : 1]/lincsd->rmsd_data[0]);
156 /* Do a set of nrec LINCS matrix multiplications.
157 * This function will return with up to date thread-local
158 * constraint data, without an OpenMP barrier.
160 static void lincs_matrix_expand(const struct gmx_lincsdata *lincsd,
163 real *rhs1, real *rhs2, real *sol)
165 int nrec, rec, b, j, n, nr0, nr1;
167 int ntriangle, tb, bits;
168 const int *blnr = lincsd->blnr, *blbnb = lincsd->blbnb;
169 const int *triangle = lincsd->triangle, *tri_bits = lincsd->tri_bits;
171 ntriangle = lincsd->ntriangle;
172 nrec = lincsd->nOrder;
174 for (rec = 0; rec < nrec; rec++)
177 for (b = b0; b < b1; b++)
180 for (n = blnr[b]; n < blnr[b+1]; n++)
183 mvb = mvb + blcc[n]*rhs1[j];
186 sol[b] = sol[b] + mvb;
191 } /* nrec*(ncons+2*nrtot) flops */
195 /* Perform an extra nrec recursions for only the constraints
196 * involved in rigid triangles.
197 * In this way their accuracy should come close to those of the other
198 * constraints, since traingles of constraints can produce eigenvalues
199 * around 0.7, while the effective eigenvalue for bond constraints
200 * is around 0.4 (and 0.7*0.7=0.5).
202 /* We need to copy the temporary array, since only the elements
203 * for constraints involved in triangles are updated and then
204 * the pointers are swapped. This saves copying the whole array.
205 * We need a barrier as other threads might still be reading from rhs2.
208 for (b = b0; b < b1; b++)
215 for (rec = 0; rec < nrec; rec++)
217 for (tb = 0; tb < ntriangle; tb++)
224 for (n = nr0; n < nr1; n++)
226 if (bits & (1<<(n-nr0)))
229 mvb = mvb + blcc[n]*rhs1[j];
233 sol[b] = sol[b] + mvb;
239 } /* flops count is missing here */
241 /* We need a barrier here as the calling routine will continue
242 * to operate on the thread-local constraints without barrier.
248 static void lincs_update_atoms_noind(int ncons, const int *bla,
250 const real *fac, rvec *r,
255 real mvb, im1, im2, tmp0, tmp1, tmp2;
259 for (b = 0; b < ncons; b++)
275 } /* 16 ncons flops */
279 for (b = 0; b < ncons; b++)
297 static void lincs_update_atoms_ind(int ncons, const int *ind, const int *bla,
299 const real *fac, rvec *r,
304 real mvb, im1, im2, tmp0, tmp1, tmp2;
308 for (bi = 0; bi < ncons; bi++)
325 } /* 16 ncons flops */
329 for (bi = 0; bi < ncons; bi++)
344 } /* 16 ncons flops */
348 static void lincs_update_atoms(struct gmx_lincsdata *li, int th,
350 const real *fac, rvec *r,
356 /* Single thread, we simply update for all constraints */
357 lincs_update_atoms_noind(li->nc, li->bla, prefac, fac, r, invmass, x);
361 /* Update the atom vector components for our thread local
362 * constraints that only access our local atom range.
363 * This can be done without a barrier.
365 lincs_update_atoms_ind(li->th[th].nind, li->th[th].ind,
366 li->bla, prefac, fac, r, invmass, x);
368 if (li->th[li->nth].nind > 0)
370 /* Update the constraints that operate on atoms
371 * in multiple thread atom blocks on the master thread.
376 lincs_update_atoms_ind(li->th[li->nth].nind,
378 li->bla, prefac, fac, r, invmass, x);
385 /* Calculate distances between indexed atom coordinate pairs
386 * and store the result in 3 SIMD registers through an aligned buffer.
387 * Start from index is and load GMX_SIMD_REAL_WIDTH elements.
388 * Note that pair_index must contain valid indices up to is+GMX_SIMD_REAL_WIDTH.
390 static gmx_inline void gmx_simdcall
391 rvec_dist_to_simd(const rvec *x,
393 const int *pair_index,
401 for (s = 0; s < GMX_SIMD_REAL_WIDTH; s++)
403 /* Store the non PBC corrected distances packed and aligned */
404 for (m = 0; m < DIM; m++)
406 buf[m*GMX_SIMD_REAL_WIDTH + s] =
407 x[pair_index[2*(is+s)]][m] - x[pair_index[2*(is+s) + 1]][m];
410 *dx = gmx_simd_load_r(buf + 0*GMX_SIMD_REAL_WIDTH);
411 *dy = gmx_simd_load_r(buf + 1*GMX_SIMD_REAL_WIDTH);
412 *dz = gmx_simd_load_r(buf + 2*GMX_SIMD_REAL_WIDTH);
415 /* Store a SIMD vector in GMX_SIMD_REAL_WIDTH rvecs through an aligned buffer */
416 static gmx_inline void gmx_simdcall
417 copy_simd_vec_to_rvec(gmx_simd_real_t x,
426 gmx_simd_store_r(buf + 0*GMX_SIMD_REAL_WIDTH, x);
427 gmx_simd_store_r(buf + 1*GMX_SIMD_REAL_WIDTH, y);
428 gmx_simd_store_r(buf + 2*GMX_SIMD_REAL_WIDTH, z);
430 for (s = 0; s < GMX_SIMD_REAL_WIDTH; s++)
432 for (m = 0; m < DIM; m++)
434 v[is + s][m] = buf[m*GMX_SIMD_REAL_WIDTH + s];
439 /* Calculate the constraint distance vectors r to project on from x.
440 * Determine the right-hand side of the matrix equation using quantity f.
441 * This function only differs from calc_dr_x_xp_simd below in that
442 * no constraint length is subtracted and no PBC is used for f.
444 static void gmx_simdcall
445 calc_dr_x_f_simd(int b0,
448 const rvec * gmx_restrict x,
449 const rvec * gmx_restrict f,
450 const real * gmx_restrict blc,
451 const pbc_simd_t * pbc_simd,
452 real * gmx_restrict vbuf1,
453 real * gmx_restrict vbuf2,
454 rvec * gmx_restrict r,
455 real * gmx_restrict rhs,
456 real * gmx_restrict sol)
460 assert(b0 % GMX_SIMD_REAL_WIDTH == 0);
462 for (bs = b0; bs < b1; bs += GMX_SIMD_REAL_WIDTH)
464 gmx_simd_real_t rx_S, ry_S, rz_S, n2_S, il_S;
465 gmx_simd_real_t fx_S, fy_S, fz_S, ip_S, rhs_S;
467 rvec_dist_to_simd(x, bs, bla, vbuf1, &rx_S, &ry_S, &rz_S);
469 pbc_correct_dx_simd(&rx_S, &ry_S, &rz_S, pbc_simd);
471 n2_S = gmx_simd_norm2_r(rx_S, ry_S, rz_S);
472 il_S = gmx_simd_invsqrt_r(n2_S);
474 rx_S = gmx_simd_mul_r(rx_S, il_S);
475 ry_S = gmx_simd_mul_r(ry_S, il_S);
476 rz_S = gmx_simd_mul_r(rz_S, il_S);
478 copy_simd_vec_to_rvec(rx_S, ry_S, rz_S, vbuf1, r, bs);
480 rvec_dist_to_simd(f, bs, bla, vbuf2, &fx_S, &fy_S, &fz_S);
482 ip_S = gmx_simd_iprod_r(rx_S, ry_S, rz_S,
485 rhs_S = gmx_simd_mul_r(gmx_simd_load_r(blc + bs), ip_S);
487 gmx_simd_store_r(rhs + bs, rhs_S);
488 gmx_simd_store_r(sol + bs, rhs_S);
491 #endif /* LINCS_SIMD */
493 /* LINCS projection, works on derivatives of the coordinates */
494 static void do_lincsp(rvec *x, rvec *f, rvec *fp, t_pbc *pbc,
495 struct gmx_lincsdata *lincsd, int th,
497 int econq, gmx_bool bCalcDHDL,
498 gmx_bool bCalcVir, tensor rmdf)
501 int *bla, *blnr, *blbnb;
503 real *blc, *blmf, *blcc, *rhs1, *rhs2, *sol;
505 b0 = lincsd->th[th].b0;
506 b1 = lincsd->th[th].b1;
511 blbnb = lincsd->blbnb;
512 if (econq != econqForce)
514 /* Use mass-weighted parameters */
520 /* Use non mass-weighted parameters */
522 blmf = lincsd->blmf1;
524 blcc = lincsd->tmpncc;
531 /* This SIMD code does the same as the plain-C code after the #else.
532 * The only difference is that we always call pbc code, as with SIMD
533 * the overhead of pbc computation (when not needed) is small.
537 /* Convert the pbc struct for SIMD */
538 set_pbc_simd(pbc, &pbc_simd);
540 /* Compute normalized x i-j vectors, store in r.
541 * Compute the inner product of r and xp i-j and store in rhs1.
543 calc_dr_x_f_simd(b0, b1, bla, x, f, blc,
545 lincsd->th[th].simd_buf,
546 lincsd->th[th].simd_buf + GMX_SIMD_REAL_WIDTH*DIM,
549 #else /* LINCS_SIMD */
551 /* Compute normalized i-j vectors */
554 for (b = b0; b < b1; b++)
558 pbc_dx_aiuc(pbc, x[bla[2*b]], x[bla[2*b+1]], dx);
564 for (b = b0; b < b1; b++)
568 rvec_sub(x[bla[2*b]], x[bla[2*b+1]], dx);
570 } /* 16 ncons flops */
573 for (b = b0; b < b1; b++)
580 mvb = blc[b]*(r[b][0]*(f[i][0] - f[j][0]) +
581 r[b][1]*(f[i][1] - f[j][1]) +
582 r[b][2]*(f[i][2] - f[j][2]));
588 #endif /* LINCS_SIMD */
590 /* We need a barrier, since the matrix construction below
591 * can access entries in r of other threads.
595 /* Construct the (sparse) LINCS matrix */
596 for (b = b0; b < b1; b++)
600 for (n = blnr[b]; n < blnr[b+1]; n++)
602 blcc[n] = blmf[n]*iprod(r[b], r[blbnb[n]]);
605 /* Together: 23*ncons + 6*nrtot flops */
607 lincs_matrix_expand(lincsd, b0, b1, blcc, rhs1, rhs2, sol);
608 /* nrec*(ncons+2*nrtot) flops */
610 if (econq == econqDeriv_FlexCon)
612 /* We only want to constraint the flexible constraints,
613 * so we mask out the normal ones by setting sol to 0.
615 for (b = b0; b < b1; b++)
617 if (!(lincsd->bllen0[b] == 0 && lincsd->ddist[b] == 0))
624 /* We multiply sol by blc, so we can use lincs_update_atoms for OpenMP */
625 for (b = b0; b < b1; b++)
630 /* When constraining forces, we should not use mass weighting,
631 * so we pass invmass=NULL, which results in the use of 1 for all atoms.
633 lincs_update_atoms(lincsd, th, 1.0, sol, r,
634 (econq != econqForce) ? invmass : NULL, fp);
641 for (b = b0; b < b1; b++)
643 dhdlambda -= sol[b]*lincsd->ddist[b];
646 lincsd->th[th].dhdlambda = dhdlambda;
651 /* Constraint virial,
652 * determines sum r_bond x delta f,
653 * where delta f is the constraint correction
654 * of the quantity that is being constrained.
656 for (b = b0; b < b1; b++)
661 mvb = lincsd->bllen[b]*sol[b];
662 for (i = 0; i < DIM; i++)
665 for (j = 0; j < DIM; j++)
667 rmdf[i][j] += tmp1*r[b][j];
670 } /* 23 ncons flops */
675 /* Calculate the constraint distance vectors r to project on from x.
676 * Determine the right-hand side of the matrix equation using coordinates xp.
678 static void gmx_simdcall
679 calc_dr_x_xp_simd(int b0,
682 const rvec * gmx_restrict x,
683 const rvec * gmx_restrict xp,
684 const real * gmx_restrict bllen,
685 const real * gmx_restrict blc,
686 const pbc_simd_t * pbc_simd,
687 real * gmx_restrict vbuf1,
688 real * gmx_restrict vbuf2,
689 rvec * gmx_restrict r,
690 real * gmx_restrict rhs,
691 real * gmx_restrict sol)
695 assert(b0 % GMX_SIMD_REAL_WIDTH == 0);
697 for (bs = b0; bs < b1; bs += GMX_SIMD_REAL_WIDTH)
699 gmx_simd_real_t rx_S, ry_S, rz_S, n2_S, il_S;
700 gmx_simd_real_t rxp_S, ryp_S, rzp_S, ip_S, rhs_S;
702 rvec_dist_to_simd(x, bs, bla, vbuf1, &rx_S, &ry_S, &rz_S);
704 pbc_correct_dx_simd(&rx_S, &ry_S, &rz_S, pbc_simd);
706 n2_S = gmx_simd_norm2_r(rx_S, ry_S, rz_S);
707 il_S = gmx_simd_invsqrt_r(n2_S);
709 rx_S = gmx_simd_mul_r(rx_S, il_S);
710 ry_S = gmx_simd_mul_r(ry_S, il_S);
711 rz_S = gmx_simd_mul_r(rz_S, il_S);
713 copy_simd_vec_to_rvec(rx_S, ry_S, rz_S, vbuf1, r, bs);
715 rvec_dist_to_simd(xp, bs, bla, vbuf2, &rxp_S, &ryp_S, &rzp_S);
717 pbc_correct_dx_simd(&rxp_S, &ryp_S, &rzp_S, pbc_simd);
719 ip_S = gmx_simd_iprod_r(rx_S, ry_S, rz_S,
720 rxp_S, ryp_S, rzp_S);
722 rhs_S = gmx_simd_mul_r(gmx_simd_load_r(blc + bs),
723 gmx_simd_sub_r(ip_S, gmx_simd_load_r(bllen + bs)));
725 gmx_simd_store_r(rhs + bs, rhs_S);
726 gmx_simd_store_r(sol + bs, rhs_S);
729 #endif /* LINCS_SIMD */
731 /* Determine the distances and right-hand side for the next iteration */
732 static void calc_dist_iter(int b0,
735 const rvec * gmx_restrict xp,
736 const real * gmx_restrict bllen,
737 const real * gmx_restrict blc,
740 real * gmx_restrict rhs,
741 real * gmx_restrict sol,
746 for (b = b0; b < b1; b++)
748 real len, len2, dlen2, mvb;
754 pbc_dx_aiuc(pbc, xp[bla[2*b]], xp[bla[2*b+1]], dx);
758 rvec_sub(xp[bla[2*b]], xp[bla[2*b+1]], dx);
761 dlen2 = 2*len2 - norm2(dx);
762 if (dlen2 < wfac*len2)
764 /* not race free - see detailed comment in caller */
769 mvb = blc[b]*(len - dlen2*gmx_invsqrt(dlen2));
777 } /* 20*ncons flops */
781 /* As the function above, but using SIMD intrinsics */
782 static void gmx_simdcall
783 calc_dist_iter_simd(int b0,
786 const rvec * gmx_restrict x,
787 const real * gmx_restrict bllen,
788 const real * gmx_restrict blc,
789 const pbc_simd_t * pbc_simd,
791 real * gmx_restrict vbuf,
792 real * gmx_restrict rhs,
793 real * gmx_restrict sol,
796 gmx_simd_real_t min_S = gmx_simd_set1_r(GMX_REAL_MIN);
797 gmx_simd_real_t two_S = gmx_simd_set1_r(2.0);
798 gmx_simd_real_t wfac_S = gmx_simd_set1_r(wfac);
799 gmx_simd_bool_t warn_S;
803 assert(b0 % GMX_SIMD_REAL_WIDTH == 0);
805 /* Initialize all to FALSE */
806 warn_S = gmx_simd_cmplt_r(two_S, gmx_simd_setzero_r());
808 for (bs = b0; bs < b1; bs += GMX_SIMD_REAL_WIDTH)
810 gmx_simd_real_t rx_S, ry_S, rz_S, n2_S;
811 gmx_simd_real_t len_S, len2_S, dlen2_S, lc_S, blc_S;
813 rvec_dist_to_simd(x, bs, bla, vbuf, &rx_S, &ry_S, &rz_S);
815 pbc_correct_dx_simd(&rx_S, &ry_S, &rz_S, pbc_simd);
817 n2_S = gmx_simd_norm2_r(rx_S, ry_S, rz_S);
819 len_S = gmx_simd_load_r(bllen + bs);
820 len2_S = gmx_simd_mul_r(len_S, len_S);
822 dlen2_S = gmx_simd_fmsub_r(two_S, len2_S, n2_S);
824 warn_S = gmx_simd_or_b(warn_S,
825 gmx_simd_cmplt_r(dlen2_S,
826 gmx_simd_mul_r(wfac_S, len2_S)));
828 /* Avoid 1/0 by taking the max with REAL_MIN.
829 * Note: when dlen2 is close to zero (90 degree constraint rotation),
830 * the accuracy of the algorithm is no longer relevant.
832 dlen2_S = gmx_simd_max_r(dlen2_S, min_S);
834 lc_S = gmx_simd_fnmadd_r(dlen2_S, gmx_simd_invsqrt_r(dlen2_S), len_S);
836 blc_S = gmx_simd_load_r(blc + bs);
838 lc_S = gmx_simd_mul_r(blc_S, lc_S);
840 gmx_simd_store_r(rhs + bs, lc_S);
841 gmx_simd_store_r(sol + bs, lc_S);
844 if (gmx_simd_anytrue_b(warn_S))
849 #endif /* LINCS_SIMD */
851 static void do_lincs(rvec *x, rvec *xp, matrix box, t_pbc *pbc,
852 struct gmx_lincsdata *lincsd, int th,
856 real wangle, gmx_bool *bWarn,
857 real invdt, rvec * gmx_restrict v,
858 gmx_bool bCalcVir, tensor vir_r_m_dr)
860 int b0, b1, b, i, j, n, iter;
861 int *bla, *blnr, *blbnb;
863 real *blc, *blmf, *bllen, *blcc, *rhs1, *rhs2, *sol, *blc_sol, *mlambda;
866 b0 = lincsd->th[th].b0;
867 b1 = lincsd->th[th].b1;
872 blbnb = lincsd->blbnb;
875 bllen = lincsd->bllen;
876 blcc = lincsd->tmpncc;
880 blc_sol = lincsd->tmp4;
881 mlambda = lincsd->mlambda;
883 if (DOMAINDECOMP(cr) && cr->dd->constraints)
885 nlocat = dd_constraints_nlocalatoms(cr->dd);
894 /* This SIMD code does the same as the plain-C code after the #else.
895 * The only difference is that we always call pbc code, as with SIMD
896 * the overhead of pbc computation (when not needed) is small.
900 /* Convert the pbc struct for SIMD */
901 set_pbc_simd(pbc, &pbc_simd);
903 /* Compute normalized x i-j vectors, store in r.
904 * Compute the inner product of r and xp i-j and store in rhs1.
906 calc_dr_x_xp_simd(b0, b1, bla, x, xp, bllen, blc,
908 lincsd->th[th].simd_buf,
909 lincsd->th[th].simd_buf + GMX_SIMD_REAL_WIDTH*DIM,
912 #else /* LINCS_SIMD */
916 /* Compute normalized i-j vectors */
917 for (b = b0; b < b1; b++)
922 pbc_dx_aiuc(pbc, x[bla[2*b]], x[bla[2*b+1]], dx);
925 pbc_dx_aiuc(pbc, xp[bla[2*b]], xp[bla[2*b+1]], dx);
926 mvb = blc[b]*(iprod(r[b], dx) - bllen[b]);
933 /* Compute normalized i-j vectors */
934 for (b = b0; b < b1; b++)
936 real tmp0, tmp1, tmp2, rlen, mvb;
940 tmp0 = x[i][0] - x[j][0];
941 tmp1 = x[i][1] - x[j][1];
942 tmp2 = x[i][2] - x[j][2];
943 rlen = gmx_invsqrt(tmp0*tmp0 + tmp1*tmp1 + tmp2*tmp2);
951 mvb = blc[b]*(r[b][0]*(xp[i][0] - xp[j][0]) +
952 r[b][1]*(xp[i][1] - xp[j][1]) +
953 r[b][2]*(xp[i][2] - xp[j][2]) - bllen[b]);
958 /* Together: 26*ncons + 6*nrtot flops */
961 #endif /* LINCS_SIMD */
963 /* We need a barrier, since the matrix construction below
964 * can access entries in r of other threads.
968 /* Construct the (sparse) LINCS matrix */
969 for (b = b0; b < b1; b++)
971 for (n = blnr[b]; n < blnr[b+1]; n++)
973 blcc[n] = blmf[n]*iprod(r[b], r[blbnb[n]]);
976 /* Together: 26*ncons + 6*nrtot flops */
978 lincs_matrix_expand(lincsd, b0, b1, blcc, rhs1, rhs2, sol);
979 /* nrec*(ncons+2*nrtot) flops */
982 for (b = b0; b < b1; b++)
984 mlambda[b] = blc[b]*sol[b];
987 for (b = b0; b < b1; b += GMX_SIMD_REAL_WIDTH)
989 gmx_simd_store_r(mlambda + b,
990 gmx_simd_mul_r(gmx_simd_load_r(blc + b),
991 gmx_simd_load_r(sol + b)));
995 /* Update the coordinates */
996 lincs_update_atoms(lincsd, th, 1.0, mlambda, r, invmass, xp);
999 ******** Correction for centripetal effects ********
1004 wfac = cos(DEG2RAD*wangle);
1007 for (iter = 0; iter < lincsd->nIter; iter++)
1009 if ((lincsd->bCommIter && DOMAINDECOMP(cr) && cr->dd->constraints))
1014 /* Communicate the corrected non-local coordinates */
1015 if (DOMAINDECOMP(cr))
1017 dd_move_x_constraints(cr->dd, box, xp, NULL, FALSE);
1025 calc_dist_iter_simd(b0, b1, bla, xp, bllen, blc, &pbc_simd, wfac,
1026 lincsd->th[th].simd_buf, rhs1, sol, bWarn);
1028 calc_dist_iter(b0, b1, bla, xp, bllen, blc, pbc, wfac,
1030 /* 20*ncons flops */
1033 lincs_matrix_expand(lincsd, b0, b1, blcc, rhs1, rhs2, sol);
1034 /* nrec*(ncons+2*nrtot) flops */
1037 for (b = b0; b < b1; b++)
1041 mvb = blc[b]*sol[b];
1046 for (b = b0; b < b1; b += GMX_SIMD_REAL_WIDTH)
1048 gmx_simd_real_t mvb;
1050 mvb = gmx_simd_mul_r(gmx_simd_load_r(blc + b),
1051 gmx_simd_load_r(sol + b));
1052 gmx_simd_store_r(blc_sol + b, mvb);
1053 gmx_simd_store_r(mlambda + b,
1054 gmx_simd_add_r(gmx_simd_load_r(mlambda + b), mvb));
1058 /* Update the coordinates */
1059 lincs_update_atoms(lincsd, th, 1.0, blc_sol, r, invmass, xp);
1061 /* nit*ncons*(37+9*nrec) flops */
1065 /* Update the velocities */
1066 lincs_update_atoms(lincsd, th, invdt, mlambda, r, invmass, v);
1067 /* 16 ncons flops */
1070 if (nlocat != NULL && (bCalcDHDL || bCalcVir))
1072 /* In lincs_update_atoms threads might cross-read mlambda */
1075 /* Only account for local atoms */
1076 for (b = b0; b < b1; b++)
1078 mlambda[b] *= 0.5*nlocat[b];
1087 for (b = b0; b < b1; b++)
1089 /* Note that this this is dhdl*dt^2, the dt^2 factor is corrected
1090 * later after the contributions are reduced over the threads.
1092 dhdl -= lincsd->mlambda[b]*lincsd->ddist[b];
1094 lincsd->th[th].dhdlambda = dhdl;
1099 /* Constraint virial */
1100 for (b = b0; b < b1; b++)
1104 tmp0 = -bllen[b]*mlambda[b];
1105 for (i = 0; i < DIM; i++)
1107 tmp1 = tmp0*r[b][i];
1108 for (j = 0; j < DIM; j++)
1110 vir_r_m_dr[i][j] -= tmp1*r[b][j];
1113 } /* 22 ncons flops */
1117 * 26*ncons + 6*nrtot + nrec*(ncons+2*nrtot)
1118 * + nit * (20*ncons + nrec*(ncons+2*nrtot) + 17 ncons)
1120 * (26+nrec)*ncons + (6+2*nrec)*nrtot
1121 * + nit * ((37+nrec)*ncons + 2*nrec*nrtot)
1123 * (63+nrec)*ncons + (6+4*nrec)*nrtot
1127 void set_lincs_matrix(struct gmx_lincsdata *li, real *invmass, real lambda)
1129 int i, a1, a2, n, k, sign, center;
1131 const real invsqrt2 = 0.7071067811865475244;
1133 for (i = 0; (i < li->nc); i++)
1136 a2 = li->bla[2*i+1];
1137 li->blc[i] = gmx_invsqrt(invmass[a1] + invmass[a2]);
1138 li->blc1[i] = invsqrt2;
1141 /* Construct the coupling coefficient matrix blmf */
1143 li->ncc_triangle = 0;
1144 for (i = 0; (i < li->nc); i++)
1147 a2 = li->bla[2*i+1];
1148 for (n = li->blnr[i]; (n < li->blnr[i+1]); n++)
1151 if (a1 == li->bla[2*k] || a2 == li->bla[2*k+1])
1159 if (a1 == li->bla[2*k] || a1 == li->bla[2*k+1])
1169 li->blmf[n] = sign*invmass[center]*li->blc[i]*li->blc[k];
1170 li->blmf1[n] = sign*0.5;
1171 if (li->ncg_triangle > 0)
1173 /* Look for constraint triangles */
1174 for (nk = li->blnr[k]; (nk < li->blnr[k+1]); nk++)
1177 if (kk != i && kk != k &&
1178 (li->bla[2*kk] == end || li->bla[2*kk+1] == end))
1180 if (li->ntriangle == 0 ||
1181 li->triangle[li->ntriangle-1] < i)
1183 /* Add this constraint to the triangle list */
1184 li->triangle[li->ntriangle] = i;
1185 li->tri_bits[li->ntriangle] = 0;
1187 if (li->blnr[i+1] - li->blnr[i] > static_cast<int>(sizeof(li->tri_bits[0])*8 - 1))
1189 gmx_fatal(FARGS, "A constraint is connected to %d constraints, this is more than the %d allowed for constraints participating in triangles",
1190 li->blnr[i+1] - li->blnr[i],
1191 sizeof(li->tri_bits[0])*8-1);
1194 li->tri_bits[li->ntriangle-1] |= (1<<(n-li->blnr[i]));
1204 fprintf(debug, "Of the %d constraints %d participate in triangles\n",
1205 li->nc, li->ntriangle);
1206 fprintf(debug, "There are %d couplings of which %d in triangles\n",
1207 li->ncc, li->ncc_triangle);
1211 * so we know with which lambda value the masses have been set.
1213 li->matlam = lambda;
1216 static int count_triangle_constraints(t_ilist *ilist, t_blocka *at2con)
1218 int ncon1, ncon_tot;
1219 int c0, a00, a01, n1, c1, a10, a11, ac1, n2, c2, a20, a21;
1222 t_iatom *ia1, *ia2, *iap;
1224 ncon1 = ilist[F_CONSTR].nr/3;
1225 ncon_tot = ncon1 + ilist[F_CONSTRNC].nr/3;
1227 ia1 = ilist[F_CONSTR].iatoms;
1228 ia2 = ilist[F_CONSTRNC].iatoms;
1231 for (c0 = 0; c0 < ncon_tot; c0++)
1234 iap = constr_iatomptr(ncon1, ia1, ia2, c0);
1237 for (n1 = at2con->index[a01]; n1 < at2con->index[a01+1]; n1++)
1242 iap = constr_iatomptr(ncon1, ia1, ia2, c1);
1253 for (n2 = at2con->index[ac1]; n2 < at2con->index[ac1+1]; n2++)
1256 if (c2 != c0 && c2 != c1)
1258 iap = constr_iatomptr(ncon1, ia1, ia2, c2);
1261 if (a20 == a00 || a21 == a00)
1275 return ncon_triangle;
1278 static gmx_bool more_than_two_sequential_constraints(const t_ilist *ilist,
1279 const t_blocka *at2con)
1281 t_iatom *ia1, *ia2, *iap;
1282 int ncon1, ncon_tot, c;
1284 gmx_bool bMoreThanTwoSequentialConstraints;
1286 ncon1 = ilist[F_CONSTR].nr/3;
1287 ncon_tot = ncon1 + ilist[F_CONSTRNC].nr/3;
1289 ia1 = ilist[F_CONSTR].iatoms;
1290 ia2 = ilist[F_CONSTRNC].iatoms;
1292 bMoreThanTwoSequentialConstraints = FALSE;
1293 for (c = 0; c < ncon_tot && !bMoreThanTwoSequentialConstraints; c++)
1295 iap = constr_iatomptr(ncon1, ia1, ia2, c);
1298 /* Check if this constraint has constraints connected at both atoms */
1299 if (at2con->index[a1+1] - at2con->index[a1] > 1 &&
1300 at2con->index[a2+1] - at2con->index[a2] > 1)
1302 bMoreThanTwoSequentialConstraints = TRUE;
1306 return bMoreThanTwoSequentialConstraints;
1309 static int int_comp(const void *a, const void *b)
1311 return (*(int *)a) - (*(int *)b);
1314 gmx_lincsdata_t init_lincs(FILE *fplog, gmx_mtop_t *mtop,
1315 int nflexcon_global, t_blocka *at2con,
1316 gmx_bool bPLINCS, int nIter, int nProjOrder)
1318 struct gmx_lincsdata *li;
1320 gmx_moltype_t *molt;
1324 fprintf(fplog, "\nInitializing%s LINear Constraint Solver\n",
1325 bPLINCS ? " Parallel" : "");
1331 gmx_mtop_ftype_count(mtop, F_CONSTR) +
1332 gmx_mtop_ftype_count(mtop, F_CONSTRNC);
1333 li->ncg_flex = nflexcon_global;
1336 li->nOrder = nProjOrder;
1338 li->ncg_triangle = 0;
1339 li->bCommIter = FALSE;
1340 for (mb = 0; mb < mtop->nmolblock; mb++)
1342 molt = &mtop->moltype[mtop->molblock[mb].type];
1344 mtop->molblock[mb].nmol*
1345 count_triangle_constraints(molt->ilist,
1346 &at2con[mtop->molblock[mb].type]);
1347 if (bPLINCS && li->bCommIter == FALSE)
1349 /* Check if we need to communicate not only before LINCS,
1350 * but also before each iteration.
1351 * The check for only two sequential constraints is only
1352 * useful for the common case of H-bond only constraints.
1353 * With more effort we could also make it useful for small
1354 * molecules with nr. sequential constraints <= nOrder-1.
1356 li->bCommIter = (li->nOrder < 1 || more_than_two_sequential_constraints(molt->ilist, &at2con[mtop->molblock[mb].type]));
1359 if (debug && bPLINCS)
1361 fprintf(debug, "PLINCS communication before each iteration: %d\n",
1365 /* LINCS can run on any number of threads.
1366 * Currently the number is fixed for the whole simulation,
1367 * but it could be set in set_lincs().
1369 li->nth = gmx_omp_nthreads_get(emntLINCS);
1372 fprintf(debug, "LINCS: using %d threads\n", li->nth);
1380 /* Allocate an extra elements for "thread-overlap" constraints */
1381 snew(li->th, li->nth+1);
1384 #pragma omp parallel for num_threads(li->nth)
1385 for (th = 0; th < li->nth; th++)
1387 /* Per thread SIMD load buffer for loading 2 simd_width rvecs */
1388 snew_aligned(li->th[th].simd_buf, 2*simd_width*DIM,
1392 if (bPLINCS || li->ncg_triangle > 0)
1394 please_cite(fplog, "Hess2008a");
1398 please_cite(fplog, "Hess97a");
1403 fprintf(fplog, "The number of constraints is %d\n", li->ncg);
1406 fprintf(fplog, "There are inter charge-group constraints,\n"
1407 "will communicate selected coordinates each lincs iteration\n");
1409 if (li->ncg_triangle > 0)
1412 "%d constraints are involved in constraint triangles,\n"
1413 "will apply an additional matrix expansion of order %d for couplings\n"
1414 "between constraints inside triangles\n",
1415 li->ncg_triangle, li->nOrder);
1422 /* Sets up the work division over the threads */
1423 static void lincs_thread_setup(struct gmx_lincsdata *li, int natoms)
1425 lincs_thread_t *li_m;
1430 if (natoms > li->atf_nalloc)
1432 li->atf_nalloc = over_alloc_large(natoms);
1433 srenew(li->atf, li->atf_nalloc);
1437 /* Clear the atom flags */
1438 for (a = 0; a < natoms; a++)
1440 bitmask_clear(&atf[a]);
1443 if (li->nth > BITMASK_SIZE)
1445 gmx_fatal(FARGS, "More than %d threads is not supported for LINCS.", BITMASK_SIZE);
1448 int block = simd_width;
1450 for (th = 0; th < li->nth; th++)
1452 lincs_thread_t *li_th;
1455 li_th = &li->th[th];
1457 /* The constraints are divided equally over the threads,
1458 * in chunks of size block to ensure alignment for SIMD.
1460 li_th->b0 = (((li->nc + block - 1)* th )/(block*li->nth))*block;
1461 li_th->b1 = (((li->nc + block - 1)*(th+1))/(block*li->nth))*block;
1462 li_th->b1 = std::min<int>(li_th->b1, li->nc);
1464 /* For each atom set a flag for constraints from each */
1465 for (b = li_th->b0; b < li_th->b1; b++)
1467 bitmask_set_bit(&atf[li->bla[b*2]], th);
1468 bitmask_set_bit(&atf[li->bla[b*2+1]], th);
1472 #pragma omp parallel for num_threads(li->nth) schedule(static)
1473 for (th = 0; th < li->nth; th++)
1475 lincs_thread_t *li_th;
1479 li_th = &li->th[th];
1481 if (li_th->b1 - li_th->b0 > li_th->ind_nalloc)
1483 li_th->ind_nalloc = over_alloc_large(li_th->b1-li_th->b0);
1484 srenew(li_th->ind, li_th->ind_nalloc);
1485 srenew(li_th->ind_r, li_th->ind_nalloc);
1488 bitmask_init_low_bits(&mask, th);
1492 for (b = li_th->b0; b < li_th->b1; b++)
1494 /* We let the constraint with the lowest thread index
1495 * operate on atoms with constraints from multiple threads.
1497 if (bitmask_is_disjoint(atf[li->bla[b*2]], mask) &&
1498 bitmask_is_disjoint(atf[li->bla[b*2+1]], mask))
1500 /* Add the constraint to the local atom update index */
1501 li_th->ind[li_th->nind++] = b;
1505 /* Add the constraint to the rest block */
1506 li_th->ind_r[li_th->nind_r++] = b;
1511 /* We need to copy all constraints which have not be assigned
1512 * to a thread to a separate list which will be handled by one thread.
1514 li_m = &li->th[li->nth];
1517 for (th = 0; th < li->nth; th++)
1519 lincs_thread_t *li_th;
1522 li_th = &li->th[th];
1524 if (li_m->nind + li_th->nind_r > li_m->ind_nalloc)
1526 li_m->ind_nalloc = over_alloc_large(li_m->nind+li_th->nind_r);
1527 srenew(li_m->ind, li_m->ind_nalloc);
1530 for (b = 0; b < li_th->nind_r; b++)
1532 li_m->ind[li_m->nind++] = li_th->ind_r[b];
1537 fprintf(debug, "LINCS thread %d: %d constraints\n",
1544 fprintf(debug, "LINCS thread r: %d constraints\n",
1549 /* There is no realloc with alignment, so here we make one for reals.
1550 * Note that this function does not preserve the contents of the memory.
1552 static void resize_real_aligned(real **ptr, int nelem)
1554 sfree_aligned(*ptr);
1555 snew_aligned(*ptr, nelem, align_bytes);
1558 void set_lincs(t_idef *idef, t_mdatoms *md,
1559 gmx_bool bDynamics, t_commrec *cr,
1560 struct gmx_lincsdata *li)
1562 int start, natoms, nflexcon;
1565 int i, k, ncc_alloc, ncon_tot, con, nconnect, concon;
1567 real lenA = 0, lenB;
1571 /* Zero the thread index ranges.
1572 * Otherwise without local constraints we could return with old ranges.
1574 for (i = 0; i < li->nth; i++)
1582 li->th[li->nth].nind = 0;
1585 /* This is the local topology, so there are only F_CONSTR constraints */
1586 if (idef->il[F_CONSTR].nr == 0)
1588 /* There are no constraints,
1589 * we do not need to fill any data structures.
1596 fprintf(debug, "Building the LINCS connectivity\n");
1599 if (DOMAINDECOMP(cr))
1601 if (cr->dd->constraints)
1603 dd_get_constraint_range(cr->dd, &start, &natoms);
1607 natoms = cr->dd->nat_home;
1614 natoms = md->homenr;
1616 at2con = make_at2con(start, natoms, idef->il, idef->iparams, bDynamics,
1619 ncon_tot = idef->il[F_CONSTR].nr/3;
1621 /* Ensure we have enough space for aligned loads beyond ncon_tot */
1622 if (ncon_tot + simd_width > li->nc_alloc || li->nc_alloc == 0)
1624 li->nc_alloc = over_alloc_dd(ncon_tot + simd_width);
1625 resize_real_aligned(&li->bllen0, li->nc_alloc);
1626 resize_real_aligned(&li->ddist, li->nc_alloc);
1627 srenew(li->bla, 2*li->nc_alloc);
1628 resize_real_aligned(&li->blc, li->nc_alloc);
1629 resize_real_aligned(&li->blc1, li->nc_alloc);
1630 srenew(li->blnr, li->nc_alloc+1);
1631 resize_real_aligned(&li->bllen, li->nc_alloc);
1632 srenew(li->tmpv, li->nc_alloc);
1633 resize_real_aligned(&li->tmp1, li->nc_alloc);
1634 resize_real_aligned(&li->tmp2, li->nc_alloc);
1635 resize_real_aligned(&li->tmp3, li->nc_alloc);
1636 resize_real_aligned(&li->tmp4, li->nc_alloc);
1637 resize_real_aligned(&li->mlambda, li->nc_alloc);
1638 if (li->ncg_triangle > 0)
1640 /* This is allocating too much, but it is difficult to improve */
1641 srenew(li->triangle, li->nc_alloc);
1642 srenew(li->tri_bits, li->nc_alloc);
1646 iatom = idef->il[F_CONSTR].iatoms;
1648 ncc_alloc = li->ncc_alloc;
1653 li->blnr[con] = nconnect;
1654 for (i = 0; i < ncon_tot; i++)
1659 lenA = idef->iparams[type].constr.dA;
1660 lenB = idef->iparams[type].constr.dB;
1661 /* Skip the flexible constraints when not doing dynamics */
1662 if (bDynamics || lenA != 0 || lenB != 0)
1664 li->bllen0[con] = lenA;
1665 li->ddist[con] = lenB - lenA;
1666 /* Set the length to the topology A length */
1667 li->bllen[con] = li->bllen0[con];
1668 li->bla[2*con] = a1;
1669 li->bla[2*con+1] = a2;
1670 /* Construct the constraint connection matrix blbnb */
1671 for (k = at2con.index[a1-start]; k < at2con.index[a1-start+1]; k++)
1673 concon = at2con.a[k];
1676 if (nconnect >= ncc_alloc)
1678 ncc_alloc = over_alloc_small(nconnect+1);
1679 srenew(li->blbnb, ncc_alloc);
1681 li->blbnb[nconnect++] = concon;
1684 for (k = at2con.index[a2-start]; k < at2con.index[a2-start+1]; k++)
1686 concon = at2con.a[k];
1689 if (nconnect+1 > ncc_alloc)
1691 ncc_alloc = over_alloc_small(nconnect+1);
1692 srenew(li->blbnb, ncc_alloc);
1694 li->blbnb[nconnect++] = concon;
1697 li->blnr[con+1] = nconnect;
1701 /* Order the blbnb matrix to optimize memory access */
1702 qsort(&(li->blbnb[li->blnr[con]]), li->blnr[con+1]-li->blnr[con],
1703 sizeof(li->blbnb[0]), int_comp);
1705 /* Increase the constraint count */
1711 /* Copy the last atom pair indices and lengths for constraints
1712 * up to a multiple of simd_width, such that we can do all
1713 * SIMD operations without have to worry about end effects.
1717 con_roundup = ((con + simd_width - 1)/simd_width)*simd_width;
1718 for (i = con; i < con_roundup; i++)
1720 li->bla[i*2 ] = li->bla[(con - 1)*2 ];
1721 li->bla[i*2 + 1] = li->bla[(con - 1)*2 + 1];
1722 li->bllen[i] = li->bllen[con - 1];
1726 done_blocka(&at2con);
1728 /* This is the real number of constraints,
1729 * without dynamics the flexible constraints are not present.
1733 li->ncc = li->blnr[con];
1736 /* Since the matrix is static, we can free some memory */
1737 ncc_alloc = li->ncc;
1738 srenew(li->blbnb, ncc_alloc);
1741 if (ncc_alloc > li->ncc_alloc)
1743 li->ncc_alloc = ncc_alloc;
1744 srenew(li->blmf, li->ncc_alloc);
1745 srenew(li->blmf1, li->ncc_alloc);
1746 srenew(li->tmpncc, li->ncc_alloc);
1751 fprintf(debug, "Number of constraints is %d, couplings %d\n",
1758 li->th[0].b1 = li->nc;
1762 lincs_thread_setup(li, md->nr);
1765 set_lincs_matrix(li, md->invmass, md->lambda);
1768 static void lincs_warning(FILE *fplog,
1769 gmx_domdec_t *dd, rvec *x, rvec *xprime, t_pbc *pbc,
1770 int ncons, int *bla, real *bllen, real wangle,
1771 int maxwarn, int *warncount)
1775 real wfac, d0, d1, cosine;
1778 wfac = cos(DEG2RAD*wangle);
1780 sprintf(buf, "bonds that rotated more than %g degrees:\n"
1781 " atom 1 atom 2 angle previous, current, constraint length\n",
1783 fprintf(stderr, "%s", buf);
1786 fprintf(fplog, "%s", buf);
1789 for (b = 0; b < ncons; b++)
1795 pbc_dx_aiuc(pbc, x[i], x[j], v0);
1796 pbc_dx_aiuc(pbc, xprime[i], xprime[j], v1);
1800 rvec_sub(x[i], x[j], v0);
1801 rvec_sub(xprime[i], xprime[j], v1);
1805 cosine = iprod(v0, v1)/(d0*d1);
1808 sprintf(buf, " %6d %6d %5.1f %8.4f %8.4f %8.4f\n",
1809 ddglatnr(dd, i), ddglatnr(dd, j),
1810 RAD2DEG*acos(cosine), d0, d1, bllen[b]);
1811 fprintf(stderr, "%s", buf);
1814 fprintf(fplog, "%s", buf);
1816 if (!gmx_isfinite(d1))
1818 gmx_fatal(FARGS, "Bond length not finite.");
1824 if (*warncount > maxwarn)
1826 too_many_constraint_warnings(econtLINCS, *warncount);
1830 static void cconerr(gmx_domdec_t *dd,
1831 int ncons, int *bla, real *bllen, rvec *x, t_pbc *pbc,
1832 real *ncons_loc, real *ssd, real *max, int *imax)
1834 real len, d, ma, ssd2, r2;
1835 int *nlocat, count, b, im;
1838 if (dd && dd->constraints)
1840 nlocat = dd_constraints_nlocalatoms(dd);
1851 for (b = 0; b < ncons; b++)
1855 pbc_dx_aiuc(pbc, x[bla[2*b]], x[bla[2*b+1]], dx);
1859 rvec_sub(x[bla[2*b]], x[bla[2*b+1]], dx);
1862 len = r2*gmx_invsqrt(r2);
1863 d = fabs(len/bllen[b]-1);
1864 if (d > ma && (nlocat == NULL || nlocat[b]))
1876 ssd2 += nlocat[b]*d*d;
1881 *ncons_loc = (nlocat ? 0.5 : 1)*count;
1882 *ssd = (nlocat ? 0.5 : 1)*ssd2;
1887 gmx_bool constrain_lincs(FILE *fplog, gmx_bool bLog, gmx_bool bEner,
1890 struct gmx_lincsdata *lincsd, t_mdatoms *md,
1892 rvec *x, rvec *xprime, rvec *min_proj,
1893 matrix box, t_pbc *pbc,
1894 real lambda, real *dvdlambda,
1895 real invdt, rvec *v,
1896 gmx_bool bCalcVir, tensor vir_r_m_dr,
1899 int maxwarn, int *warncount)
1902 char buf[STRLEN], buf2[22], buf3[STRLEN];
1904 real ncons_loc, p_ssd, p_max = 0;
1906 gmx_bool bOK, bWarn;
1910 /* This boolean should be set by a flag passed to this routine.
1911 * We can also easily check if any constraint length is changed,
1912 * if not dH/dlambda=0 and we can also set the boolean to FALSE.
1914 bCalcDHDL = (ir->efep != efepNO && dvdlambda != NULL);
1916 if (lincsd->nc == 0 && cr->dd == NULL)
1920 lincsd->rmsd_data[0] = 0;
1921 if (ir->eI == eiSD2 && v == NULL)
1929 lincsd->rmsd_data[i] = 0;
1935 if (econq == econqCoord)
1937 /* We can't use bCalcDHDL here, since NULL can be passed for dvdlambda
1938 * also with efep!=fepNO.
1940 if (ir->efep != efepNO)
1942 if (md->nMassPerturbed && lincsd->matlam != md->lambda)
1944 set_lincs_matrix(lincsd, md->invmass, md->lambda);
1947 for (i = 0; i < lincsd->nc; i++)
1949 lincsd->bllen[i] = lincsd->bllen0[i] + lambda*lincsd->ddist[i];
1953 if (lincsd->ncg_flex)
1955 /* Set the flexible constraint lengths to the old lengths */
1958 for (i = 0; i < lincsd->nc; i++)
1960 if (lincsd->bllen[i] == 0)
1962 pbc_dx_aiuc(pbc, x[lincsd->bla[2*i]], x[lincsd->bla[2*i+1]], dx);
1963 lincsd->bllen[i] = norm(dx);
1969 for (i = 0; i < lincsd->nc; i++)
1971 if (lincsd->bllen[i] == 0)
1974 sqrt(distance2(x[lincsd->bla[2*i]],
1975 x[lincsd->bla[2*i+1]]));
1983 cconerr(cr->dd, lincsd->nc, lincsd->bla, lincsd->bllen, xprime, pbc,
1984 &ncons_loc, &p_ssd, &p_max, &p_imax);
1987 /* This bWarn var can be updated by multiple threads
1988 * at the same time. But as we only need to detect
1989 * if a warning occured or not, this is not an issue.
1993 /* The OpenMP parallel region of constrain_lincs for coords */
1994 #pragma omp parallel num_threads(lincsd->nth)
1996 int th = gmx_omp_get_thread_num();
1998 clear_mat(lincsd->th[th].vir_r_m_dr);
2000 do_lincs(x, xprime, box, pbc, lincsd, th,
2003 ir->LincsWarnAngle, &bWarn,
2005 th == 0 ? vir_r_m_dr : lincsd->th[th].vir_r_m_dr);
2008 if (bLog && fplog && lincsd->nc > 0)
2010 fprintf(fplog, " Rel. Constraint Deviation: RMS MAX between atoms\n");
2011 fprintf(fplog, " Before LINCS %.6f %.6f %6d %6d\n",
2012 sqrt(p_ssd/ncons_loc), p_max,
2013 ddglatnr(cr->dd, lincsd->bla[2*p_imax]),
2014 ddglatnr(cr->dd, lincsd->bla[2*p_imax+1]));
2018 cconerr(cr->dd, lincsd->nc, lincsd->bla, lincsd->bllen, xprime, pbc,
2019 &ncons_loc, &p_ssd, &p_max, &p_imax);
2020 /* Check if we are doing the second part of SD */
2021 if (ir->eI == eiSD2 && v == NULL)
2029 lincsd->rmsd_data[0] = ncons_loc;
2030 lincsd->rmsd_data[i] = p_ssd;
2034 lincsd->rmsd_data[0] = 0;
2035 lincsd->rmsd_data[1] = 0;
2036 lincsd->rmsd_data[2] = 0;
2038 if (bLog && fplog && lincsd->nc > 0)
2041 " After LINCS %.6f %.6f %6d %6d\n\n",
2042 sqrt(p_ssd/ncons_loc), p_max,
2043 ddglatnr(cr->dd, lincsd->bla[2*p_imax]),
2044 ddglatnr(cr->dd, lincsd->bla[2*p_imax+1]));
2051 cconerr(cr->dd, lincsd->nc, lincsd->bla, lincsd->bllen, xprime, pbc,
2052 &ncons_loc, &p_ssd, &p_max, &p_imax);
2055 sprintf(buf3, " in simulation %d", cr->ms->sim);
2061 sprintf(buf, "\nStep %s, time %g (ps) LINCS WARNING%s\n"
2062 "relative constraint deviation after LINCS:\n"
2063 "rms %.6f, max %.6f (between atoms %d and %d)\n",
2064 gmx_step_str(step, buf2), ir->init_t+step*ir->delta_t,
2066 sqrt(p_ssd/ncons_loc), p_max,
2067 ddglatnr(cr->dd, lincsd->bla[2*p_imax]),
2068 ddglatnr(cr->dd, lincsd->bla[2*p_imax+1]));
2071 fprintf(fplog, "%s", buf);
2073 fprintf(stderr, "%s", buf);
2074 lincs_warning(fplog, cr->dd, x, xprime, pbc,
2075 lincsd->nc, lincsd->bla, lincsd->bllen,
2076 ir->LincsWarnAngle, maxwarn, warncount);
2078 bOK = (p_max < 0.5);
2081 if (lincsd->ncg_flex)
2083 for (i = 0; (i < lincsd->nc); i++)
2085 if (lincsd->bllen0[i] == 0 && lincsd->ddist[i] == 0)
2087 lincsd->bllen[i] = 0;
2094 /* The OpenMP parallel region of constrain_lincs for derivatives */
2095 #pragma omp parallel num_threads(lincsd->nth)
2097 int th = gmx_omp_get_thread_num();
2099 do_lincsp(x, xprime, min_proj, pbc, lincsd, th,
2100 md->invmass, econq, bCalcDHDL,
2101 bCalcVir, th == 0 ? vir_r_m_dr : lincsd->th[th].vir_r_m_dr);
2107 /* Reduce the dH/dlambda contributions over the threads */
2112 for (th = 0; th < lincsd->nth; th++)
2114 dhdlambda += lincsd->th[th].dhdlambda;
2118 /* dhdlambda contains dH/dlambda*dt^2, correct for this */
2119 /* TODO This should probably use invdt, so that sd integrator scaling works properly */
2120 dhdlambda /= ir->delta_t*ir->delta_t;
2122 *dvdlambda += dhdlambda;
2125 if (bCalcVir && lincsd->nth > 1)
2127 for (i = 1; i < lincsd->nth; i++)
2129 m_add(vir_r_m_dr, lincsd->th[i].vir_r_m_dr, vir_r_m_dr);
2133 /* count assuming nit=1 */
2134 inc_nrnb(nrnb, eNR_LINCS, lincsd->nc);
2135 inc_nrnb(nrnb, eNR_LINCSMAT, (2+lincsd->nOrder)*lincsd->ncc);
2136 if (lincsd->ntriangle > 0)
2138 inc_nrnb(nrnb, eNR_LINCSMAT, lincsd->nOrder*lincsd->ncc_triangle);
2142 inc_nrnb(nrnb, eNR_CONSTR_V, lincsd->nc*2);
2146 inc_nrnb(nrnb, eNR_CONSTR_VIR, lincsd->nc);