1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
11 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
12 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
13 * Copyright (c) 2001-2004, The GROMACS development team,
14 * check out http://www.gromacs.org for more information.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version 2
19 * of the License, or (at your option) any later version.
21 * If you want to redistribute modifications, please consider that
22 * scientific software is very special. Version control is crucial -
23 * bugs must be traceable. We will be happy to consider code for
24 * inclusion in the official distribution, but derived work must not
25 * be called official GROMACS. Details are found in the README & COPYING
26 * files - if they are missing, get the official version at www.gromacs.org.
28 * To help us fund GROMACS development, we humbly ask that you cite
29 * the papers on the package - you can find them in the top README file.
31 * For more info, check our website at http://www.gromacs.org
34 * GROwing Monsters And Cloning Shrimps
53 #include "mtop_util.h"
54 #include "gmx_omp_nthreads.h"
57 /* Routines to send/recieve coordinates and force
58 * of constructing atoms.
61 static void move_construct_x(t_comm_vsites *vsitecomm, rvec x[], t_commrec *cr)
67 sendbuf = vsitecomm->send_buf;
68 recvbuf = vsitecomm->recv_buf;
71 /* Prepare pulse left by copying to send buffer */
72 for(i=0;i<vsitecomm->left_export_nconstruct;i++)
74 ia = vsitecomm->left_export_construct[i];
75 copy_rvec(x[ia],sendbuf[i]);
78 /* Pulse coordinates left */
79 gmx_tx_rx_real(cr,GMX_LEFT,(real *)sendbuf,3*vsitecomm->left_export_nconstruct,GMX_RIGHT,(real *)recvbuf,3*vsitecomm->right_import_nconstruct);
81 /* Copy from receive buffer to coordinate array */
82 for(i=0;i<vsitecomm->right_import_nconstruct;i++)
84 ia = vsitecomm->right_import_construct[i];
85 copy_rvec(recvbuf[i],x[ia]);
88 /* Prepare pulse right by copying to send buffer */
89 for(i=0;i<vsitecomm->right_export_nconstruct;i++)
91 ia = vsitecomm->right_export_construct[i];
92 copy_rvec(x[ia],sendbuf[i]);
95 /* Pulse coordinates right */
96 gmx_tx_rx_real(cr,GMX_RIGHT,(real *)sendbuf,3*vsitecomm->right_export_nconstruct,GMX_LEFT,(real *)recvbuf,3*vsitecomm->left_import_nconstruct);
98 /* Copy from receive buffer to coordinate array */
99 for(i=0;i<vsitecomm->left_import_nconstruct;i++)
101 ia = vsitecomm->left_import_construct[i];
102 copy_rvec(recvbuf[i],x[ia]);
107 static void move_construct_f(t_comm_vsites *vsitecomm, rvec f[], t_commrec *cr)
113 sendbuf = vsitecomm->send_buf;
114 recvbuf = vsitecomm->recv_buf;
116 /* Prepare pulse right by copying to send buffer */
117 for(i=0;i<vsitecomm->right_import_nconstruct;i++)
119 ia = vsitecomm->right_import_construct[i];
120 copy_rvec(f[ia],sendbuf[i]);
121 clear_rvec(f[ia]); /* Zero it here after moving, just to simplify debug book-keeping... */
124 /* Pulse forces right */
125 gmx_tx_rx_real(cr,GMX_RIGHT,(real *)sendbuf,3*vsitecomm->right_import_nconstruct,GMX_LEFT,(real *)recvbuf,3*vsitecomm->left_export_nconstruct);
127 /* Copy from receive buffer to coordinate array */
128 for(i=0;i<vsitecomm->left_export_nconstruct;i++)
130 ia = vsitecomm->left_export_construct[i];
131 rvec_inc(f[ia],recvbuf[i]);
134 /* Prepare pulse left by copying to send buffer */
135 for(i=0;i<vsitecomm->left_import_nconstruct;i++)
137 ia = vsitecomm->left_import_construct[i];
138 copy_rvec(f[ia],sendbuf[i]);
139 clear_rvec(f[ia]); /* Zero it here after moving, just to simplify debug book-keeping... */
142 /* Pulse coordinates left */
143 gmx_tx_rx_real(cr,GMX_LEFT,(real *)sendbuf,3*vsitecomm->left_import_nconstruct,GMX_RIGHT,(real *)recvbuf,3*vsitecomm->right_export_nconstruct);
145 /* Copy from receive buffer to coordinate array */
146 for(i=0;i<vsitecomm->right_export_nconstruct;i++)
148 ia = vsitecomm->right_export_construct[i];
149 rvec_inc(f[ia],recvbuf[i]);
152 /* All forces are now on the home processors */
157 pd_clear_nonlocal_constructs(t_comm_vsites *vsitecomm, rvec f[])
161 for(i=0;i<vsitecomm->left_import_nconstruct;i++)
163 ia = vsitecomm->left_import_construct[i];
166 for(i=0;i<vsitecomm->right_import_nconstruct;i++)
168 ia = vsitecomm->right_import_construct[i];
175 static int pbc_rvec_sub(const t_pbc *pbc,const rvec xi,const rvec xj,rvec dx)
178 return pbc_dx_aiuc(pbc,xi,xj,dx);
186 /* Vsite construction routines */
188 static void constr_vsite2(rvec xi,rvec xj,rvec x,real a,t_pbc *pbc)
197 pbc_dx_aiuc(pbc,xj,xi,dx);
198 x[XX] = xi[XX] + a*dx[XX];
199 x[YY] = xi[YY] + a*dx[YY];
200 x[ZZ] = xi[ZZ] + a*dx[ZZ];
202 x[XX] = b*xi[XX] + a*xj[XX];
203 x[YY] = b*xi[YY] + a*xj[YY];
204 x[ZZ] = b*xi[ZZ] + a*xj[ZZ];
208 /* TOTAL: 10 flops */
211 static void constr_vsite3(rvec xi,rvec xj,rvec xk,rvec x,real a,real b,
221 pbc_dx_aiuc(pbc,xj,xi,dxj);
222 pbc_dx_aiuc(pbc,xk,xi,dxk);
223 x[XX] = xi[XX] + a*dxj[XX] + b*dxk[XX];
224 x[YY] = xi[YY] + a*dxj[YY] + b*dxk[YY];
225 x[ZZ] = xi[ZZ] + a*dxj[ZZ] + b*dxk[ZZ];
227 x[XX] = c*xi[XX] + a*xj[XX] + b*xk[XX];
228 x[YY] = c*xi[YY] + a*xj[YY] + b*xk[YY];
229 x[ZZ] = c*xi[ZZ] + a*xj[ZZ] + b*xk[ZZ];
233 /* TOTAL: 17 flops */
236 static void constr_vsite3FD(rvec xi,rvec xj,rvec xk,rvec x,real a,real b,
242 pbc_rvec_sub(pbc,xj,xi,xij);
243 pbc_rvec_sub(pbc,xk,xj,xjk);
246 /* temp goes from i to a point on the line jk */
247 temp[XX] = xij[XX] + a*xjk[XX];
248 temp[YY] = xij[YY] + a*xjk[YY];
249 temp[ZZ] = xij[ZZ] + a*xjk[ZZ];
252 c=b*gmx_invsqrt(iprod(temp,temp));
255 x[XX] = xi[XX] + c*temp[XX];
256 x[YY] = xi[YY] + c*temp[YY];
257 x[ZZ] = xi[ZZ] + c*temp[ZZ];
260 /* TOTAL: 34 flops */
263 static void constr_vsite3FAD(rvec xi,rvec xj,rvec xk,rvec x,real a,real b, t_pbc *pbc)
266 real a1,b1,c1,invdij;
268 pbc_rvec_sub(pbc,xj,xi,xij);
269 pbc_rvec_sub(pbc,xk,xj,xjk);
272 invdij = gmx_invsqrt(iprod(xij,xij));
273 c1 = invdij * invdij * iprod(xij,xjk);
274 xp[XX] = xjk[XX] - c1*xij[XX];
275 xp[YY] = xjk[YY] - c1*xij[YY];
276 xp[ZZ] = xjk[ZZ] - c1*xij[ZZ];
278 b1 = b*gmx_invsqrt(iprod(xp,xp));
281 x[XX] = xi[XX] + a1*xij[XX] + b1*xp[XX];
282 x[YY] = xi[YY] + a1*xij[YY] + b1*xp[YY];
283 x[ZZ] = xi[ZZ] + a1*xij[ZZ] + b1*xp[ZZ];
286 /* TOTAL: 63 flops */
289 static void constr_vsite3OUT(rvec xi,rvec xj,rvec xk,rvec x,
290 real a,real b,real c,t_pbc *pbc)
294 pbc_rvec_sub(pbc,xj,xi,xij);
295 pbc_rvec_sub(pbc,xk,xi,xik);
299 x[XX] = xi[XX] + a*xij[XX] + b*xik[XX] + c*temp[XX];
300 x[YY] = xi[YY] + a*xij[YY] + b*xik[YY] + c*temp[YY];
301 x[ZZ] = xi[ZZ] + a*xij[ZZ] + b*xik[ZZ] + c*temp[ZZ];
304 /* TOTAL: 33 flops */
307 static void constr_vsite4FD(rvec xi,rvec xj,rvec xk,rvec xl,rvec x,
308 real a,real b,real c,t_pbc *pbc)
310 rvec xij,xjk,xjl,temp;
313 pbc_rvec_sub(pbc,xj,xi,xij);
314 pbc_rvec_sub(pbc,xk,xj,xjk);
315 pbc_rvec_sub(pbc,xl,xj,xjl);
318 /* temp goes from i to a point on the plane jkl */
319 temp[XX] = xij[XX] + a*xjk[XX] + b*xjl[XX];
320 temp[YY] = xij[YY] + a*xjk[YY] + b*xjl[YY];
321 temp[ZZ] = xij[ZZ] + a*xjk[ZZ] + b*xjl[ZZ];
324 d=c*gmx_invsqrt(iprod(temp,temp));
327 x[XX] = xi[XX] + d*temp[XX];
328 x[YY] = xi[YY] + d*temp[YY];
329 x[ZZ] = xi[ZZ] + d*temp[ZZ];
332 /* TOTAL: 43 flops */
336 static void constr_vsite4FDN(rvec xi,rvec xj,rvec xk,rvec xl,rvec x,
337 real a,real b,real c,t_pbc *pbc)
339 rvec xij,xik,xil,ra,rb,rja,rjb,rm;
342 pbc_rvec_sub(pbc,xj,xi,xij);
343 pbc_rvec_sub(pbc,xk,xi,xik);
344 pbc_rvec_sub(pbc,xl,xi,xil);
357 rvec_sub(ra,xij,rja);
358 rvec_sub(rb,xij,rjb);
364 d=c*gmx_invsqrt(norm2(rm));
367 x[XX] = xi[XX] + d*rm[XX];
368 x[YY] = xi[YY] + d*rm[YY];
369 x[ZZ] = xi[ZZ] + d*rm[ZZ];
372 /* TOTAL: 47 flops */
376 static int constr_vsiten(t_iatom *ia, t_iparams ip[],
384 n3 = 3*ip[ia[0]].vsiten.n;
389 for(i=3; i<n3; i+=3) {
391 a = ip[ia[i]].vsiten.a;
393 pbc_dx_aiuc(pbc,x[ai],x1,dx);
395 rvec_sub(x[ai],x1,dx);
397 dsum[XX] += a*dx[XX];
398 dsum[YY] += a*dx[YY];
399 dsum[ZZ] += a*dx[ZZ];
403 x[av][XX] = x1[XX] + dsum[XX];
404 x[av][YY] = x1[YY] + dsum[YY];
405 x[av][ZZ] = x1[ZZ] + dsum[ZZ];
411 void construct_vsites_thread(gmx_vsite_t *vsite,
412 rvec x[],t_nrnb *nrnb,
414 t_iparams ip[],t_ilist ilist[],
419 real a1,b1,c1,inv_dt;
420 int i,inc,ii,nra,nr,tp,ftype;
421 t_iatom avsite,ai,aj,ak,al,pbc_atom;
424 int *vsite_pbc,ishift;
425 rvec reftmp,vtmp,rtmp;
436 bPBCAll = (pbc_null != NULL && !vsite->bHaveChargeGroups);
440 for(ftype=0; (ftype<F_NRE); ftype++)
442 if ((interaction_function[ftype].flags & IF_VSITE) &&
445 nra = interaction_function[ftype].nratoms;
447 nr = ilist[ftype].nr;
448 ia = ilist[ftype].iatoms;
452 pbc_null2 = pbc_null;
454 else if (pbc_null != NULL)
456 vsite_pbc = vsite->vsite_pbc_loc[ftype-F_VSITE2];
463 /* The vsite and constructing atoms */
468 /* Constants for constructing vsites */
470 /* Check what kind of pbc we need to use */
473 /* No charge groups, vsite follows its own pbc */
475 copy_rvec(x[avsite],xpbc);
477 else if (vsite_pbc != NULL)
479 pbc_atom = vsite_pbc[i/(1+nra)];
484 /* We need to copy the coordinates here,
485 * single for single atom cg's pbc_atom
486 * is the vsite itself.
488 copy_rvec(x[pbc_atom],xpbc);
490 pbc_null2 = pbc_null;
501 /* Copy the old position */
502 copy_rvec(x[avsite],xv);
504 /* Construct the vsite depending on type */
508 constr_vsite2(x[ai],x[aj],x[avsite],a1,pbc_null2);
513 constr_vsite3(x[ai],x[aj],x[ak],x[avsite],a1,b1,pbc_null2);
518 constr_vsite3FD(x[ai],x[aj],x[ak],x[avsite],a1,b1,pbc_null2);
523 constr_vsite3FAD(x[ai],x[aj],x[ak],x[avsite],a1,b1,pbc_null2);
529 constr_vsite3OUT(x[ai],x[aj],x[ak],x[avsite],a1,b1,c1,pbc_null2);
536 constr_vsite4FD(x[ai],x[aj],x[ak],x[al],x[avsite],a1,b1,c1,
544 constr_vsite4FDN(x[ai],x[aj],x[ak],x[al],x[avsite],a1,b1,c1,
548 inc = constr_vsiten(ia,ip,x,pbc_null2);
551 gmx_fatal(FARGS,"No such vsite type %d in %s, line %d",
552 ftype,__FILE__,__LINE__);
557 /* Match the pbc of this vsite to the rest of its charge group */
558 ishift = pbc_dx_aiuc(pbc_null,x[avsite],xpbc,dx);
559 if (ishift != CENTRAL)
561 rvec_add(xpbc,dx,x[avsite]);
566 /* Calculate velocity of vsite... */
567 rvec_sub(x[avsite],xv,vv);
568 svmul(inv_dt,vv,v[avsite]);
571 /* Increment loop variables */
579 void construct_vsites(FILE *log,gmx_vsite_t *vsite,
580 rvec x[],t_nrnb *nrnb,
582 t_iparams ip[],t_ilist ilist[],
583 int ePBC,gmx_bool bMolPBC,t_graph *graph,
584 t_commrec *cr,matrix box)
590 bDomDec = cr && DOMAINDECOMP(cr);
592 /* We only need to do pbc when we have inter-cg vsites */
593 if (ePBC != epbcNONE && (bDomDec || bMolPBC) && vsite->n_intercg_vsite)
595 /* This is wasting some CPU time as we now do this multiple times
596 * per MD step. But how often do we have vsites with full pbc?
598 pbc_null = set_pbc_dd(&pbc,ePBC,cr!=NULL ? cr->dd : NULL,FALSE,box);
609 dd_move_x_vsites(cr->dd,box,x);
611 else if (vsite->bPDvsitecomm)
613 /* I'm not sure whether the periodicity and shift are guaranteed
614 * to be consistent between different nodes when running e.g. polymers
615 * in parallel. In this special case we thus unshift/shift,
616 * but only when necessary. This is to make sure the coordinates
617 * we move don't end up a box away...
621 unshift_self(graph,box,x);
624 move_construct_x(vsite->vsitecomm,x,cr);
628 shift_self(graph,box,x);
633 if (vsite->nthreads == 1)
635 construct_vsites_thread(vsite,
642 #pragma omp parallel num_threads(vsite->nthreads)
644 construct_vsites_thread(vsite,
646 ip,vsite->tdata[gmx_omp_get_thread_num()].ilist,
649 /* Now we can construct the vsites that might depend on other vsites */
650 construct_vsites_thread(vsite,
652 ip,vsite->tdata[vsite->nthreads].ilist,
657 static void spread_vsite2(t_iatom ia[],real a,
658 rvec x[],rvec f[],rvec fshift[],
659 t_pbc *pbc,t_graph *g)
680 ivec_sub(SHIFT_IVEC(g,ai),SHIFT_IVEC(g,av),di);
682 ivec_sub(SHIFT_IVEC(g,ai),SHIFT_IVEC(g,aj),di);
685 siv = pbc_dx_aiuc(pbc,x[ai],x[av],dx);
686 sij = pbc_dx_aiuc(pbc,x[ai],x[aj],dx);
692 if (fshift && (siv != CENTRAL || sij != CENTRAL)) {
693 rvec_inc(fshift[siv],f[av]);
694 rvec_dec(fshift[CENTRAL],fi);
695 rvec_dec(fshift[sij],fj);
698 /* TOTAL: 13 flops */
701 void construct_vsites_mtop(FILE *log,gmx_vsite_t *vsite,
702 gmx_mtop_t *mtop,rvec x[])
705 gmx_molblock_t *molb;
709 for(mb=0; mb<mtop->nmolblock; mb++) {
710 molb = &mtop->molblock[mb];
711 molt = &mtop->moltype[molb->type];
712 for(mol=0; mol<molb->nmol; mol++) {
713 construct_vsites(log,vsite,x+as,NULL,0.0,NULL,
714 mtop->ffparams.iparams,molt->ilist,
715 epbcNONE,TRUE,NULL,NULL,NULL);
716 as += molt->atoms.nr;
721 static void spread_vsite3(t_iatom ia[],real a,real b,
722 rvec x[],rvec f[],rvec fshift[],
723 t_pbc *pbc,t_graph *g)
735 svmul(1-a-b,f[av],fi);
746 ivec_sub(SHIFT_IVEC(g,ai),SHIFT_IVEC(g,ia[1]),di);
748 ivec_sub(SHIFT_IVEC(g,ai),SHIFT_IVEC(g,aj),di);
750 ivec_sub(SHIFT_IVEC(g,ai),SHIFT_IVEC(g,ak),di);
753 siv = pbc_dx_aiuc(pbc,x[ai],x[av],dx);
754 sij = pbc_dx_aiuc(pbc,x[ai],x[aj],dx);
755 sik = pbc_dx_aiuc(pbc,x[ai],x[ak],dx);
762 if (fshift && (siv!=CENTRAL || sij!=CENTRAL || sik!=CENTRAL)) {
763 rvec_inc(fshift[siv],f[av]);
764 rvec_dec(fshift[CENTRAL],fi);
765 rvec_dec(fshift[sij],fj);
766 rvec_dec(fshift[sik],fk);
769 /* TOTAL: 20 flops */
772 static void spread_vsite3FD(t_iatom ia[],real a,real b,
773 rvec x[],rvec f[],rvec fshift[],
774 gmx_bool VirCorr,matrix dxdf,
775 t_pbc *pbc,t_graph *g)
777 real fx,fy,fz,c,invl,fproj,a1;
778 rvec xvi,xij,xjk,xix,fv,temp;
789 sji = pbc_rvec_sub(pbc,x[aj],x[ai],xij);
790 skj = pbc_rvec_sub(pbc,x[ak],x[aj],xjk);
793 /* xix goes from i to point x on the line jk */
794 xix[XX]=xij[XX]+a*xjk[XX];
795 xix[YY]=xij[YY]+a*xjk[YY];
796 xix[ZZ]=xij[ZZ]+a*xjk[ZZ];
799 invl=gmx_invsqrt(iprod(xix,xix));
803 fproj=iprod(xix,fv)*invl*invl; /* = (xix . f)/(xix . xix) */
805 temp[XX]=c*(fv[XX]-fproj*xix[XX]);
806 temp[YY]=c*(fv[YY]-fproj*xix[YY]);
807 temp[ZZ]=c*(fv[ZZ]-fproj*xix[ZZ]);
810 /* c is already calculated in constr_vsite3FD
811 storing c somewhere will save 26 flops! */
814 f[ai][XX] += fv[XX] - temp[XX];
815 f[ai][YY] += fv[YY] - temp[YY];
816 f[ai][ZZ] += fv[ZZ] - temp[ZZ];
817 f[aj][XX] += a1*temp[XX];
818 f[aj][YY] += a1*temp[YY];
819 f[aj][ZZ] += a1*temp[ZZ];
820 f[ak][XX] += a*temp[XX];
821 f[ak][YY] += a*temp[YY];
822 f[ak][ZZ] += a*temp[ZZ];
826 ivec_sub(SHIFT_IVEC(g,ia[1]),SHIFT_IVEC(g,ai),di);
828 ivec_sub(SHIFT_IVEC(g,aj),SHIFT_IVEC(g,ai),di);
830 ivec_sub(SHIFT_IVEC(g,ak),SHIFT_IVEC(g,aj),di);
833 svi = pbc_rvec_sub(pbc,x[av],x[ai],xvi);
838 if (fshift && (svi!=CENTRAL || sji!=CENTRAL || skj!=CENTRAL)) {
839 rvec_dec(fshift[svi],fv);
840 fshift[CENTRAL][XX] += fv[XX] - (1 + a)*temp[XX];
841 fshift[CENTRAL][YY] += fv[YY] - (1 + a)*temp[YY];
842 fshift[CENTRAL][ZZ] += fv[ZZ] - (1 + a)*temp[ZZ];
843 fshift[ sji][XX] += temp[XX];
844 fshift[ sji][YY] += temp[YY];
845 fshift[ sji][ZZ] += temp[ZZ];
846 fshift[ skj][XX] += a*temp[XX];
847 fshift[ skj][YY] += a*temp[YY];
848 fshift[ skj][ZZ] += a*temp[ZZ];
853 /* When VirCorr=TRUE, the virial for the current forces is not
854 * calculated from the redistributed forces. This means that
855 * the effect of non-linear virtual site constructions on the virial
856 * needs to be added separately. This contribution can be calculated
857 * in many ways, but the simplest and cheapest way is to use
858 * the first constructing atom ai as a reference position in space:
859 * subtract (xv-xi)*fv and add (xj-xi)*fj + (xk-xi)*fk.
864 pbc_rvec_sub(pbc,x[av],x[ai],xiv);
870 /* As xix is a linear combination of j and k, use that here */
871 dxdf[i][j] += -xiv[i]*fv[j] + xix[i]*temp[j];
876 /* TOTAL: 61 flops */
879 static void spread_vsite3FAD(t_iatom ia[],real a,real b,
880 rvec x[],rvec f[],rvec fshift[],
881 gmx_bool VirCorr,matrix dxdf,
882 t_pbc *pbc,t_graph *g)
884 rvec xvi,xij,xjk,xperp,Fpij,Fppp,fv,f1,f2,f3;
885 real a1,b1,c1,c2,invdij,invdij2,invdp,fproj;
894 copy_rvec(f[ia[1]],fv);
896 sji = pbc_rvec_sub(pbc,x[aj],x[ai],xij);
897 skj = pbc_rvec_sub(pbc,x[ak],x[aj],xjk);
900 invdij = gmx_invsqrt(iprod(xij,xij));
901 invdij2 = invdij * invdij;
902 c1 = iprod(xij,xjk) * invdij2;
903 xperp[XX] = xjk[XX] - c1*xij[XX];
904 xperp[YY] = xjk[YY] - c1*xij[YY];
905 xperp[ZZ] = xjk[ZZ] - c1*xij[ZZ];
906 /* xperp in plane ijk, perp. to ij */
907 invdp = gmx_invsqrt(iprod(xperp,xperp));
912 /* a1, b1 and c1 are already calculated in constr_vsite3FAD
913 storing them somewhere will save 45 flops! */
915 fproj=iprod(xij ,fv)*invdij2;
916 svmul(fproj, xij, Fpij); /* proj. f on xij */
917 svmul(iprod(xperp,fv)*invdp*invdp,xperp,Fppp); /* proj. f on xperp */
918 svmul(b1*fproj, xperp,f3);
921 rvec_sub(fv,Fpij,f1); /* f1 = f - Fpij */
922 rvec_sub(f1,Fppp,f2); /* f2 = f - Fpij - Fppp */
923 for (d=0; (d<DIM); d++) {
930 f[ai][XX] += fv[XX] - f1[XX] + c1*f2[XX] + f3[XX];
931 f[ai][YY] += fv[YY] - f1[YY] + c1*f2[YY] + f3[YY];
932 f[ai][ZZ] += fv[ZZ] - f1[ZZ] + c1*f2[ZZ] + f3[ZZ];
933 f[aj][XX] += f1[XX] - c2*f2[XX] - f3[XX];
934 f[aj][YY] += f1[YY] - c2*f2[YY] - f3[YY];
935 f[aj][ZZ] += f1[ZZ] - c2*f2[ZZ] - f3[ZZ];
942 ivec_sub(SHIFT_IVEC(g,ia[1]),SHIFT_IVEC(g,ai),di);
944 ivec_sub(SHIFT_IVEC(g,aj),SHIFT_IVEC(g,ai),di);
946 ivec_sub(SHIFT_IVEC(g,ak),SHIFT_IVEC(g,aj),di);
949 svi = pbc_rvec_sub(pbc,x[av],x[ai],xvi);
954 if (fshift && (svi!=CENTRAL || sji!=CENTRAL || skj!=CENTRAL)) {
955 rvec_dec(fshift[svi],fv);
956 fshift[CENTRAL][XX] += fv[XX] - f1[XX] - (1-c1)*f2[XX] + f3[XX];
957 fshift[CENTRAL][YY] += fv[YY] - f1[YY] - (1-c1)*f2[YY] + f3[YY];
958 fshift[CENTRAL][ZZ] += fv[ZZ] - f1[ZZ] - (1-c1)*f2[ZZ] + f3[ZZ];
959 fshift[ sji][XX] += f1[XX] - c1 *f2[XX] - f3[XX];
960 fshift[ sji][YY] += f1[YY] - c1 *f2[YY] - f3[YY];
961 fshift[ sji][ZZ] += f1[ZZ] - c1 *f2[ZZ] - f3[ZZ];
962 fshift[ skj][XX] += f2[XX];
963 fshift[ skj][YY] += f2[YY];
964 fshift[ skj][ZZ] += f2[ZZ];
972 pbc_rvec_sub(pbc,x[av],x[ai],xiv);
978 /* Note that xik=xij+xjk, so we have to add xij*f2 */
981 + xij[i]*(f1[j] + (1 - c2)*f2[j] - f3[j])
987 /* TOTAL: 113 flops */
990 static void spread_vsite3OUT(t_iatom ia[],real a,real b,real c,
991 rvec x[],rvec f[],rvec fshift[],
992 gmx_bool VirCorr,matrix dxdf,
993 t_pbc *pbc,t_graph *g)
995 rvec xvi,xij,xik,fv,fj,fk;
1006 sji = pbc_rvec_sub(pbc,x[aj],x[ai],xij);
1007 ski = pbc_rvec_sub(pbc,x[ak],x[ai],xik);
1010 copy_rvec(f[av],fv);
1017 fj[XX] = a*fv[XX] - xik[ZZ]*cfy + xik[YY]*cfz;
1018 fj[YY] = xik[ZZ]*cfx + a*fv[YY] - xik[XX]*cfz;
1019 fj[ZZ] = -xik[YY]*cfx + xik[XX]*cfy + a*fv[ZZ];
1021 fk[XX] = b*fv[XX] + xij[ZZ]*cfy - xij[YY]*cfz;
1022 fk[YY] = -xij[ZZ]*cfx + b*fv[YY] + xij[XX]*cfz;
1023 fk[ZZ] = xij[YY]*cfx - xij[XX]*cfy + b*fv[ZZ];
1026 f[ai][XX] += fv[XX] - fj[XX] - fk[XX];
1027 f[ai][YY] += fv[YY] - fj[YY] - fk[YY];
1028 f[ai][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ];
1034 ivec_sub(SHIFT_IVEC(g,ia[1]),SHIFT_IVEC(g,ai),di);
1036 ivec_sub(SHIFT_IVEC(g,aj),SHIFT_IVEC(g,ai),di);
1038 ivec_sub(SHIFT_IVEC(g,ak),SHIFT_IVEC(g,ai),di);
1041 svi = pbc_rvec_sub(pbc,x[av],x[ai],xvi);
1046 if (fshift && (svi!=CENTRAL || sji!=CENTRAL || ski!=CENTRAL)) {
1047 rvec_dec(fshift[svi],fv);
1048 fshift[CENTRAL][XX] += fv[XX] - fj[XX] - fk[XX];
1049 fshift[CENTRAL][YY] += fv[YY] - fj[YY] - fk[YY];
1050 fshift[CENTRAL][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ];
1051 rvec_inc(fshift[sji],fj);
1052 rvec_inc(fshift[ski],fk);
1060 pbc_rvec_sub(pbc,x[av],x[ai],xiv);
1062 for(i=0; i<DIM; i++)
1064 for(j=0; j<DIM; j++)
1066 dxdf[i][j] += -xiv[i]*fv[j] + xij[i]*fj[j] + xik[i]*fk[j];
1071 /* TOTAL: 54 flops */
1074 static void spread_vsite4FD(t_iatom ia[],real a,real b,real c,
1075 rvec x[],rvec f[],rvec fshift[],
1076 gmx_bool VirCorr,matrix dxdf,
1077 t_pbc *pbc,t_graph *g)
1079 real d,invl,fproj,a1;
1080 rvec xvi,xij,xjk,xjl,xix,fv,temp;
1081 atom_id av,ai,aj,ak,al;
1083 int svi,sji,skj,slj,m;
1091 sji = pbc_rvec_sub(pbc,x[aj],x[ai],xij);
1092 skj = pbc_rvec_sub(pbc,x[ak],x[aj],xjk);
1093 slj = pbc_rvec_sub(pbc,x[al],x[aj],xjl);
1096 /* xix goes from i to point x on the plane jkl */
1097 for(m=0; m<DIM; m++)
1098 xix[m] = xij[m] + a*xjk[m] + b*xjl[m];
1101 invl=gmx_invsqrt(iprod(xix,xix));
1103 /* 4 + ?10? flops */
1105 copy_rvec(f[av],fv);
1107 fproj=iprod(xix,fv)*invl*invl; /* = (xix . f)/(xix . xix) */
1109 for(m=0; m<DIM; m++)
1110 temp[m] = d*(fv[m] - fproj*xix[m]);
1113 /* c is already calculated in constr_vsite3FD
1114 storing c somewhere will save 35 flops! */
1117 for(m=0; m<DIM; m++) {
1118 f[ai][m] += fv[m] - temp[m];
1119 f[aj][m] += a1*temp[m];
1120 f[ak][m] += a*temp[m];
1121 f[al][m] += b*temp[m];
1126 ivec_sub(SHIFT_IVEC(g,ia[1]),SHIFT_IVEC(g,ai),di);
1128 ivec_sub(SHIFT_IVEC(g,aj),SHIFT_IVEC(g,ai),di);
1130 ivec_sub(SHIFT_IVEC(g,ak),SHIFT_IVEC(g,aj),di);
1132 ivec_sub(SHIFT_IVEC(g,al),SHIFT_IVEC(g,aj),di);
1135 svi = pbc_rvec_sub(pbc,x[av],x[ai],xvi);
1141 (svi!=CENTRAL || sji!=CENTRAL || skj!=CENTRAL || slj!=CENTRAL)) {
1142 rvec_dec(fshift[svi],fv);
1143 for(m=0; m<DIM; m++) {
1144 fshift[CENTRAL][m] += fv[m] - (1 + a + b)*temp[m];
1145 fshift[ sji][m] += temp[m];
1146 fshift[ skj][m] += a*temp[m];
1147 fshift[ slj][m] += b*temp[m];
1156 pbc_rvec_sub(pbc,x[av],x[ai],xiv);
1158 for(i=0; i<DIM; i++)
1160 for(j=0; j<DIM; j++)
1162 dxdf[i][j] += -xiv[i]*fv[j] + xix[i]*temp[j];
1167 /* TOTAL: 77 flops */
1171 static void spread_vsite4FDN(t_iatom ia[],real a,real b,real c,
1172 rvec x[],rvec f[],rvec fshift[],
1173 gmx_bool VirCorr,matrix dxdf,
1174 t_pbc *pbc,t_graph *g)
1176 rvec xvi,xij,xik,xil,ra,rb,rja,rjb,rab,rm,rt;
1182 int svi,sij,sik,sil;
1184 /* DEBUG: check atom indices */
1191 copy_rvec(f[av],fv);
1193 sij = pbc_rvec_sub(pbc,x[aj],x[ai],xij);
1194 sik = pbc_rvec_sub(pbc,x[ak],x[ai],xik);
1195 sil = pbc_rvec_sub(pbc,x[al],x[ai],xil);
1208 rvec_sub(ra,xij,rja);
1209 rvec_sub(rb,xij,rjb);
1210 rvec_sub(rb,ra,rab);
1216 invrm=gmx_invsqrt(norm2(rm));
1220 cfx = c*invrm*fv[XX];
1221 cfy = c*invrm*fv[YY];
1222 cfz = c*invrm*fv[ZZ];
1233 fj[XX] = ( -rm[XX]*rt[XX]) * cfx + ( rab[ZZ]-rm[YY]*rt[XX]) * cfy + (-rab[YY]-rm[ZZ]*rt[XX]) * cfz;
1234 fj[YY] = (-rab[ZZ]-rm[XX]*rt[YY]) * cfx + ( -rm[YY]*rt[YY]) * cfy + ( rab[XX]-rm[ZZ]*rt[YY]) * cfz;
1235 fj[ZZ] = ( rab[YY]-rm[XX]*rt[ZZ]) * cfx + (-rab[XX]-rm[YY]*rt[ZZ]) * cfy + ( -rm[ZZ]*rt[ZZ]) * cfz;
1246 fk[XX] = ( -rm[XX]*rt[XX]) * cfx + (-a*rjb[ZZ]-rm[YY]*rt[XX]) * cfy + ( a*rjb[YY]-rm[ZZ]*rt[XX]) * cfz;
1247 fk[YY] = ( a*rjb[ZZ]-rm[XX]*rt[YY]) * cfx + ( -rm[YY]*rt[YY]) * cfy + (-a*rjb[XX]-rm[ZZ]*rt[YY]) * cfz;
1248 fk[ZZ] = (-a*rjb[YY]-rm[XX]*rt[ZZ]) * cfx + ( a*rjb[XX]-rm[YY]*rt[ZZ]) * cfy + ( -rm[ZZ]*rt[ZZ]) * cfz;
1259 fl[XX] = ( -rm[XX]*rt[XX]) * cfx + ( b*rja[ZZ]-rm[YY]*rt[XX]) * cfy + (-b*rja[YY]-rm[ZZ]*rt[XX]) * cfz;
1260 fl[YY] = (-b*rja[ZZ]-rm[XX]*rt[YY]) * cfx + ( -rm[YY]*rt[YY]) * cfy + ( b*rja[XX]-rm[ZZ]*rt[YY]) * cfz;
1261 fl[ZZ] = ( b*rja[YY]-rm[XX]*rt[ZZ]) * cfx + (-b*rja[XX]-rm[YY]*rt[ZZ]) * cfy + ( -rm[ZZ]*rt[ZZ]) * cfz;
1264 f[ai][XX] += fv[XX] - fj[XX] - fk[XX] - fl[XX];
1265 f[ai][YY] += fv[YY] - fj[YY] - fk[YY] - fl[YY];
1266 f[ai][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ] - fl[ZZ];
1273 ivec_sub(SHIFT_IVEC(g,av),SHIFT_IVEC(g,ai),di);
1275 ivec_sub(SHIFT_IVEC(g,aj),SHIFT_IVEC(g,ai),di);
1277 ivec_sub(SHIFT_IVEC(g,ak),SHIFT_IVEC(g,ai),di);
1279 ivec_sub(SHIFT_IVEC(g,al),SHIFT_IVEC(g,ai),di);
1282 svi = pbc_rvec_sub(pbc,x[av],x[ai],xvi);
1287 if (fshift && (svi!=CENTRAL || sij!=CENTRAL || sik!=CENTRAL || sil!=CENTRAL)) {
1288 rvec_dec(fshift[svi],fv);
1289 fshift[CENTRAL][XX] += fv[XX] - fj[XX] - fk[XX] - fl[XX];
1290 fshift[CENTRAL][YY] += fv[YY] - fj[YY] - fk[YY] - fl[YY];
1291 fshift[CENTRAL][ZZ] += fv[ZZ] - fj[ZZ] - fk[ZZ] - fl[ZZ];
1292 rvec_inc(fshift[sij],fj);
1293 rvec_inc(fshift[sik],fk);
1294 rvec_inc(fshift[sil],fl);
1302 pbc_rvec_sub(pbc,x[av],x[ai],xiv);
1304 for(i=0; i<DIM; i++)
1306 for(j=0; j<DIM; j++)
1308 dxdf[i][j] += -xiv[i]*fv[j] + xij[i]*fj[j] + xik[i]*fk[j] + xil[i]*fl[j];
1313 /* Total: 207 flops (Yuck!) */
1317 static int spread_vsiten(t_iatom ia[],t_iparams ip[],
1318 rvec x[],rvec f[],rvec fshift[],
1319 t_pbc *pbc,t_graph *g)
1327 n3 = 3*ip[ia[0]].vsiten.n;
1329 copy_rvec(x[av],xv);
1331 for(i=0; i<n3; i+=3) {
1334 ivec_sub(SHIFT_IVEC(g,ai),SHIFT_IVEC(g,av),di);
1337 siv = pbc_dx_aiuc(pbc,x[ai],xv,dx);
1341 a = ip[ia[i]].vsiten.a;
1344 if (fshift && siv != CENTRAL) {
1345 rvec_inc(fshift[siv],fi);
1346 rvec_dec(fshift[CENTRAL],fi);
1355 static int vsite_count(const t_ilist *ilist,int ftype)
1357 if (ftype == F_VSITEN)
1359 return ilist[ftype].nr/3;
1363 return ilist[ftype].nr/(1 + interaction_function[ftype].nratoms);
1367 static void spread_vsite_f_thread(gmx_vsite_t *vsite,
1368 rvec x[],rvec f[],rvec *fshift,
1369 gmx_bool VirCorr,matrix dxdf,
1370 t_iparams ip[],t_ilist ilist[],
1371 t_graph *g,t_pbc *pbc_null)
1375 int i,inc,m,nra,nr,tp,ftype;
1385 bPBCAll = (pbc_null != NULL && !vsite->bHaveChargeGroups);
1387 /* this loop goes backwards to be able to build *
1388 * higher type vsites from lower types */
1391 for(ftype=F_NRE-1; (ftype>=0); ftype--)
1393 if ((interaction_function[ftype].flags & IF_VSITE) &&
1394 ilist[ftype].nr > 0)
1396 nra = interaction_function[ftype].nratoms;
1398 nr = ilist[ftype].nr;
1399 ia = ilist[ftype].iatoms;
1403 pbc_null2 = pbc_null;
1405 else if (pbc_null != NULL)
1407 vsite_pbc = vsite->vsite_pbc_loc[ftype-F_VSITE2];
1412 if (vsite_pbc != NULL)
1414 if (vsite_pbc[i/(1+nra)] > -2)
1416 pbc_null2 = pbc_null;
1426 /* Constants for constructing */
1427 a1 = ip[tp].vsite.a;
1428 /* Construct the vsite depending on type */
1432 spread_vsite2(ia,a1,x,f,fshift,pbc_null2,g);
1435 b1 = ip[tp].vsite.b;
1436 spread_vsite3(ia,a1,b1,x,f,fshift,pbc_null2,g);
1439 b1 = ip[tp].vsite.b;
1440 spread_vsite3FD(ia,a1,b1,x,f,fshift,VirCorr,dxdf,pbc_null2,g);
1443 b1 = ip[tp].vsite.b;
1444 spread_vsite3FAD(ia,a1,b1,x,f,fshift,VirCorr,dxdf,pbc_null2,g);
1447 b1 = ip[tp].vsite.b;
1448 c1 = ip[tp].vsite.c;
1449 spread_vsite3OUT(ia,a1,b1,c1,x,f,fshift,VirCorr,dxdf,pbc_null2,g);
1452 b1 = ip[tp].vsite.b;
1453 c1 = ip[tp].vsite.c;
1454 spread_vsite4FD(ia,a1,b1,c1,x,f,fshift,VirCorr,dxdf,pbc_null2,g);
1457 b1 = ip[tp].vsite.b;
1458 c1 = ip[tp].vsite.c;
1459 spread_vsite4FDN(ia,a1,b1,c1,x,f,fshift,VirCorr,dxdf,pbc_null2,g);
1462 inc = spread_vsiten(ia,ip,x,f,fshift,pbc_null2,g);
1465 gmx_fatal(FARGS,"No such vsite type %d in %s, line %d",
1466 ftype,__FILE__,__LINE__);
1468 clear_rvec(f[ia[1]]);
1470 /* Increment loop variables */
1478 void spread_vsite_f(FILE *log,gmx_vsite_t *vsite,
1479 rvec x[],rvec f[],rvec *fshift,
1480 gmx_bool VirCorr,matrix vir,
1481 t_nrnb *nrnb,t_idef *idef,
1482 int ePBC,gmx_bool bMolPBC,t_graph *g,matrix box,
1485 t_pbc pbc,*pbc_null;
1488 /* We only need to do pbc when we have inter-cg vsites */
1489 if ((DOMAINDECOMP(cr) || bMolPBC) && vsite->n_intercg_vsite)
1491 /* This is wasting some CPU time as we now do this multiple times
1492 * per MD step. But how often do we have vsites with full pbc?
1494 pbc_null = set_pbc_dd(&pbc,ePBC,cr->dd,FALSE,box);
1501 if (DOMAINDECOMP(cr))
1503 dd_clear_f_vsites(cr->dd,f);
1505 else if (PARTDECOMP(cr) && vsite->vsitecomm != NULL)
1507 pd_clear_nonlocal_constructs(vsite->vsitecomm,f);
1510 if (vsite->nthreads == 1)
1512 spread_vsite_f_thread(vsite,
1514 VirCorr,vsite->tdata[0].dxdf,
1515 idef->iparams,idef->il,
1520 /* First spread the vsites that might depend on other vsites */
1521 spread_vsite_f_thread(vsite,
1523 VirCorr,vsite->tdata[vsite->nthreads].dxdf,
1525 vsite->tdata[vsite->nthreads].ilist,
1528 #pragma omp parallel num_threads(vsite->nthreads)
1533 thread = gmx_omp_get_thread_num();
1535 if (thread == 0 || fshift == NULL)
1543 fshift_t = vsite->tdata[thread].fshift;
1545 for(i=0; i<SHIFTS; i++)
1547 clear_rvec(fshift_t[i]);
1551 spread_vsite_f_thread(vsite,
1553 VirCorr,vsite->tdata[thread].dxdf,
1555 vsite->tdata[thread].ilist,
1563 for(th=1; th<vsite->nthreads; th++)
1565 for(i=0; i<SHIFTS; i++)
1567 rvec_inc(fshift[i],vsite->tdata[th].fshift[i]);
1577 for(th=0; th<(vsite->nthreads==1 ? 1 : vsite->nthreads+1); th++)
1579 for(i=0; i<DIM; i++)
1581 for(j=0; j<DIM; j++)
1583 vir[i][j] += -0.5*vsite->tdata[th].dxdf[i][j];
1589 if (DOMAINDECOMP(cr))
1591 dd_move_f_vsites(cr->dd,f,fshift);
1593 else if (vsite->bPDvsitecomm)
1595 /* We only move forces here, and they are independent of shifts */
1596 move_construct_f(vsite->vsitecomm,f,cr);
1599 inc_nrnb(nrnb,eNR_VSITE2, vsite_count(idef->il,F_VSITE2));
1600 inc_nrnb(nrnb,eNR_VSITE3, vsite_count(idef->il,F_VSITE3));
1601 inc_nrnb(nrnb,eNR_VSITE3FD, vsite_count(idef->il,F_VSITE3FD));
1602 inc_nrnb(nrnb,eNR_VSITE3FAD,vsite_count(idef->il,F_VSITE3FAD));
1603 inc_nrnb(nrnb,eNR_VSITE3OUT,vsite_count(idef->il,F_VSITE3OUT));
1604 inc_nrnb(nrnb,eNR_VSITE4FD, vsite_count(idef->il,F_VSITE4FD));
1605 inc_nrnb(nrnb,eNR_VSITE4FDN,vsite_count(idef->il,F_VSITE4FDN));
1606 inc_nrnb(nrnb,eNR_VSITEN, vsite_count(idef->il,F_VSITEN));
1609 static int *atom2cg(t_block *cgs)
1613 snew(a2cg,cgs->index[cgs->nr]);
1614 for(cg=0; cg<cgs->nr; cg++) {
1615 for(i=cgs->index[cg]; i<cgs->index[cg+1]; i++)
1622 static int count_intercg_vsite(gmx_mtop_t *mtop,
1623 gmx_bool *bHaveChargeGroups)
1625 int mb,mt,ftype,nral,i,cg,a;
1626 gmx_molblock_t *molb;
1627 gmx_moltype_t *molt;
1631 int n_intercg_vsite;
1633 *bHaveChargeGroups = FALSE;
1635 n_intercg_vsite = 0;
1636 for(mb=0; mb<mtop->nmolblock; mb++)
1638 molb = &mtop->molblock[mb];
1639 molt = &mtop->moltype[molb->type];
1641 if (molt->cgs.nr < molt->atoms.nr)
1643 *bHaveChargeGroups = TRUE;
1646 a2cg = atom2cg(&molt->cgs);
1647 for(ftype=0; ftype<F_NRE; ftype++)
1649 if (interaction_function[ftype].flags & IF_VSITE)
1652 il = &molt->ilist[ftype];
1654 for(i=0; i<il->nr; i+=1+nral)
1657 for(a=1; a<nral; a++)
1659 if (a2cg[ia[1+a]] != cg) {
1660 n_intercg_vsite += molb->nmol;
1670 return n_intercg_vsite;
1673 static int **get_vsite_pbc(t_iparams *iparams,t_ilist *ilist,
1674 t_atom *atom,t_mdatoms *md,
1675 t_block *cgs,int *a2cg)
1677 int ftype,nral,i,j,vsi,vsite,cg_v,cg_c,a,nc3=0;
1680 int **vsite_pbc,*vsite_pbc_f;
1682 gmx_bool bViteOnlyCG_and_FirstAtom;
1684 /* Make an array that tells if the pbc of an atom is set */
1685 snew(pbc_set,cgs->index[cgs->nr]);
1686 /* PBC is set for all non vsites */
1687 for(a=0; a<cgs->index[cgs->nr]; a++) {
1688 if ((atom && atom[a].ptype != eptVSite) ||
1689 (md && md->ptype[a] != eptVSite)) {
1694 snew(vsite_pbc,F_VSITEN-F_VSITE2+1);
1696 for(ftype=0; ftype<F_NRE; ftype++) {
1697 if (interaction_function[ftype].flags & IF_VSITE) {
1702 snew(vsite_pbc[ftype-F_VSITE2],il->nr/(1+nral));
1703 vsite_pbc_f = vsite_pbc[ftype-F_VSITE2];
1706 while (i < il->nr) {
1710 /* A value of -2 signals that this vsite and its contructing
1711 * atoms are all within the same cg, so no pbc is required.
1713 vsite_pbc_f[vsi] = -2;
1714 /* Check if constructing atoms are outside the vsite's cg */
1716 if (ftype == F_VSITEN) {
1717 nc3 = 3*iparams[ia[i]].vsiten.n;
1718 for(j=0; j<nc3; j+=3) {
1719 if (a2cg[ia[i+j+2]] != cg_v)
1720 vsite_pbc_f[vsi] = -1;
1723 for(a=1; a<nral; a++) {
1724 if (a2cg[ia[i+1+a]] != cg_v)
1725 vsite_pbc_f[vsi] = -1;
1728 if (vsite_pbc_f[vsi] == -1) {
1729 /* Check if this is the first processed atom of a vsite only cg */
1730 bViteOnlyCG_and_FirstAtom = TRUE;
1731 for(a=cgs->index[cg_v]; a<cgs->index[cg_v+1]; a++) {
1732 /* Non-vsites already have pbc set, so simply check for pbc_set */
1734 bViteOnlyCG_and_FirstAtom = FALSE;
1738 if (bViteOnlyCG_and_FirstAtom) {
1739 /* First processed atom of a vsite only charge group.
1740 * The pbc of the input coordinates to construct_vsites
1741 * should be preserved.
1743 vsite_pbc_f[vsi] = vsite;
1744 } else if (cg_v != a2cg[ia[1+i+1]]) {
1745 /* This vsite has a different charge group index
1746 * than it's first constructing atom
1747 * and the charge group has more than one atom,
1748 * search for the first normal particle
1749 * or vsite that already had its pbc defined.
1750 * If nothing is found, use full pbc for this vsite.
1752 for(a=cgs->index[cg_v]; a<cgs->index[cg_v+1]; a++) {
1753 if (a != vsite && pbc_set[a]) {
1754 vsite_pbc_f[vsi] = a;
1756 fprintf(debug,"vsite %d match pbc with atom %d\n",
1762 fprintf(debug,"vsite atom %d cg %d - %d pbc atom %d\n",
1763 vsite+1,cgs->index[cg_v]+1,cgs->index[cg_v+1],
1764 vsite_pbc_f[vsi]+1);
1767 if (ftype == F_VSITEN) {
1768 /* The other entries in vsite_pbc_f are not used for center vsites */
1774 /* This vsite now has its pbc defined */
1786 gmx_vsite_t *init_vsite(gmx_mtop_t *mtop,t_commrec *cr,
1787 gmx_bool bSerial_NoPBC)
1793 gmx_moltype_t *molt;
1796 /* check if there are vsites */
1798 for(i=0; i<F_NRE; i++)
1800 if (interaction_function[i].flags & IF_VSITE)
1802 nvsite += gmx_mtop_ftype_count(mtop,i);
1813 vsite->n_intercg_vsite = count_intercg_vsite(mtop,
1814 &vsite->bHaveChargeGroups);
1816 /* If we don't have charge groups, the vsite follows its own pbc */
1817 if (!bSerial_NoPBC &&
1818 vsite->bHaveChargeGroups &&
1819 vsite->n_intercg_vsite > 0 && DOMAINDECOMP(cr))
1821 vsite->nvsite_pbc_molt = mtop->nmoltype;
1822 snew(vsite->vsite_pbc_molt,vsite->nvsite_pbc_molt);
1823 for(mt=0; mt<mtop->nmoltype; mt++)
1825 molt = &mtop->moltype[mt];
1826 /* Make an atom to charge group index */
1827 a2cg = atom2cg(&molt->cgs);
1828 vsite->vsite_pbc_molt[mt] = get_vsite_pbc(mtop->ffparams.iparams,
1830 molt->atoms.atom,NULL,
1835 snew(vsite->vsite_pbc_loc_nalloc,F_VSITEN-F_VSITE2+1);
1836 snew(vsite->vsite_pbc_loc ,F_VSITEN-F_VSITE2+1);
1841 vsite->nthreads = 1;
1845 vsite->nthreads = gmx_omp_nthreads_get(emntVSITE);
1849 /* We need one extra thread data structure for the overlap vsites */
1850 snew(vsite->tdata,vsite->nthreads+1);
1853 vsite->th_ind = NULL;
1854 vsite->th_ind_nalloc = 0;
1859 static void prepare_vsite_thread(const t_ilist *ilist,
1860 gmx_vsite_thread_t *vsite_th)
1864 for(ftype=0; ftype<F_NRE; ftype++)
1866 if (interaction_function[ftype].flags & IF_VSITE)
1868 if (ilist[ftype].nr > vsite_th->ilist[ftype].nalloc)
1870 vsite_th->ilist[ftype].nalloc = over_alloc_large(ilist[ftype].nr);
1871 srenew(vsite_th->ilist[ftype].iatoms,vsite_th->ilist[ftype].nalloc);
1874 vsite_th->ilist[ftype].nr = 0;
1879 void split_vsites_over_threads(const t_ilist *ilist,
1880 const t_mdatoms *mdatoms,
1881 gmx_bool bLimitRange,
1885 int vsite_atom_range,natperthread;
1892 if (vsite->nthreads == 1)
1898 #pragma omp parallel for num_threads(vsite->nthreads) schedule(static)
1899 for(th=0; th<vsite->nthreads; th++)
1901 prepare_vsite_thread(ilist,&vsite->tdata[th]);
1903 /* Master threads does the (potential) overlap vsites */
1904 prepare_vsite_thread(ilist,&vsite->tdata[vsite->nthreads]);
1906 /* The current way of distributing the vsites over threads in primitive.
1907 * We divide the atom range 0 - natoms_in_vsite uniformly over threads,
1908 * without taking into account how the vsites are distributed.
1909 * Without domain decomposition we bLimitRange=TRUE and we at least
1910 * tighten the upper bound of the range (useful for common systems
1911 * such as a vsite-protein in 3-site water).
1915 vsite_atom_range = -1;
1916 for(ftype=0; ftype<F_NRE; ftype++)
1918 if ((interaction_function[ftype].flags & IF_VSITE) &&
1921 nral1 = 1 + NRAL(ftype);
1922 iat = ilist[ftype].iatoms;
1923 for(i=0; i<ilist[ftype].nr; i+=nral1)
1925 for(j=i+1; j<i+nral1; j++)
1927 vsite_atom_range = max(vsite_atom_range,iat[j]);
1936 vsite_atom_range = mdatoms->homenr;
1938 natperthread = (vsite_atom_range + vsite->nthreads - 1)/vsite->nthreads;
1942 fprintf(debug,"virtual site thread dist: natoms %d, range %d, natperthread %d\n",mdatoms->nr,vsite_atom_range,natperthread);
1945 /* To simplify the vsite assignment, we make an index which tells us
1946 * to which thread particles, both non-vsites and vsites, are assigned.
1948 if (mdatoms->nr > vsite->th_ind_nalloc)
1950 vsite->th_ind_nalloc = over_alloc_large(mdatoms->nr);
1951 srenew(vsite->th_ind,vsite->th_ind_nalloc);
1953 th_ind = vsite->th_ind;
1955 for(i=0; i<mdatoms->nr; i++)
1957 if (mdatoms->ptype[i] == eptVSite)
1959 /* vsites are not assigned to a thread yet */
1964 /* assign non-vsite particles to thread th */
1967 if (i == (th + 1)*natperthread && th < vsite->nthreads)
1973 for(ftype=0; ftype<F_NRE; ftype++)
1975 if ((interaction_function[ftype].flags & IF_VSITE) &&
1978 nral1 = 1 + NRAL(ftype);
1980 iat = ilist[ftype].iatoms;
1981 for(i=0; i<ilist[ftype].nr; )
1983 th = iat[1+i]/natperthread;
1984 /* We would like to assign this vsite the thread th,
1985 * but it might depend on atoms outside the atom range of th
1986 * or on another vsite not assigned to thread th.
1988 if (ftype != F_VSITEN)
1990 for(j=i+2; j<i+nral1; j++)
1992 if (th_ind[iat[j]] != th)
1994 /* Some constructing atoms are not assigned to
1995 * thread th, move this vsite to a separate batch.
1997 th = vsite->nthreads;
2004 for(j=i+2; j<i+inc; j+=3)
2006 if (th_ind[iat[j]] != th)
2008 th = vsite->nthreads;
2012 /* Copy this vsite to the thread data struct of thread th */
2013 il_th = &vsite->tdata[th].ilist[ftype];
2014 for(j=i; j<i+inc; j++)
2016 il_th->iatoms[il_th->nr++] = iat[j];
2018 /* Update this vsite's thread index entry */
2019 th_ind[iat[1+i]] = th;
2028 for(ftype=0; ftype<F_NRE; ftype++)
2030 if ((interaction_function[ftype].flags & IF_VSITE) &&
2031 ilist[ftype].nr > 0)
2033 fprintf(debug,"%-20s thread dist:",
2034 interaction_function[ftype].longname);
2035 for(th=0; th<vsite->nthreads+1; th++)
2037 fprintf(debug," %4d",vsite->tdata[th].ilist[ftype].nr);
2039 fprintf(debug,"\n");
2045 void set_vsite_top(gmx_vsite_t *vsite,gmx_localtop_t *top,t_mdatoms *md,
2050 if (vsite->n_intercg_vsite > 0)
2052 if (vsite->bHaveChargeGroups)
2054 /* Make an atom to charge group index */
2055 a2cg = atom2cg(&top->cgs);
2056 vsite->vsite_pbc_loc = get_vsite_pbc(top->idef.iparams,
2057 top->idef.il,NULL,md,
2064 snew(vsite->vsitecomm,1);
2065 vsite->bPDvsitecomm =
2066 setup_parallel_vsites(&(top->idef),cr,vsite->vsitecomm);
2070 if (vsite->nthreads > 1)
2072 if (vsite->bHaveChargeGroups || PARTDECOMP(cr))
2074 gmx_incons("Can not use threads virtual sites combined with charge groups or particle decomposition");
2077 split_vsites_over_threads(top->idef.il,md,!DOMAINDECOMP(cr),vsite);