2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team,
6 * check out http://www.gromacs.org for more information.
7 * Copyright (c) 2012, by the GROMACS development team, led by
8 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
9 * others, as listed in the AUTHORS file in the top-level source
10 * directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
52 #include "gmx_fatal.h"
53 #include "gmx_fatal_collective.h"
57 #include "nonbonded.h"
66 #include "md_support.h"
67 #include "md_logging.h"
72 #include "mtop_util.h"
73 #include "nbnxn_search.h"
74 #include "nbnxn_atomdata.h"
75 #include "nbnxn_consts.h"
77 #include "gmx_omp_nthreads.h"
80 /* MSVC definition for __cpuid() */
84 #include "types/nbnxn_cuda_types_ext.h"
85 #include "gpu_utils.h"
86 #include "nbnxn_cuda_data_mgmt.h"
87 #include "pmalloc_cuda.h"
89 t_forcerec *mk_forcerec(void)
99 static void pr_nbfp(FILE *fp,real *nbfp,gmx_bool bBHAM,int atnr)
103 for(i=0; (i<atnr); i++) {
104 for(j=0; (j<atnr); j++) {
105 fprintf(fp,"%2d - %2d",i,j);
107 fprintf(fp," a=%10g, b=%10g, c=%10g\n",BHAMA(nbfp,atnr,i,j),
108 BHAMB(nbfp,atnr,i,j),BHAMC(nbfp,atnr,i,j)/6.0);
110 fprintf(fp," c6=%10g, c12=%10g\n",C6(nbfp,atnr,i,j)/6.0,
111 C12(nbfp,atnr,i,j)/12.0);
117 static real *mk_nbfp(const gmx_ffparams_t *idef,gmx_bool bBHAM)
124 snew(nbfp,3*atnr*atnr);
125 for(i=k=0; (i<atnr); i++) {
126 for(j=0; (j<atnr); j++,k++) {
127 BHAMA(nbfp,atnr,i,j) = idef->iparams[k].bham.a;
128 BHAMB(nbfp,atnr,i,j) = idef->iparams[k].bham.b;
129 /* nbfp now includes the 6.0 derivative prefactor */
130 BHAMC(nbfp,atnr,i,j) = idef->iparams[k].bham.c*6.0;
135 snew(nbfp,2*atnr*atnr);
136 for(i=k=0; (i<atnr); i++) {
137 for(j=0; (j<atnr); j++,k++) {
138 /* nbfp now includes the 6.0/12.0 derivative prefactors */
139 C6(nbfp,atnr,i,j) = idef->iparams[k].lj.c6*6.0;
140 C12(nbfp,atnr,i,j) = idef->iparams[k].lj.c12*12.0;
148 /* This routine sets fr->solvent_opt to the most common solvent in the
149 * system, e.g. esolSPC or esolTIP4P. It will also mark each charge group in
150 * the fr->solvent_type array with the correct type (or esolNO).
152 * Charge groups that fulfill the conditions but are not identical to the
153 * most common one will be marked as esolNO in the solvent_type array.
155 * TIP3p is identical to SPC for these purposes, so we call it
156 * SPC in the arrays (Apologies to Bill Jorgensen ;-)
158 * NOTE: QM particle should not
159 * become an optimized solvent. Not even if there is only one charge
169 } solvent_parameters_t;
172 check_solvent_cg(const gmx_moltype_t *molt,
175 const unsigned char *qm_grpnr,
176 const t_grps *qm_grps,
178 int *n_solvent_parameters,
179 solvent_parameters_t **solvent_parameters_p,
183 const t_blocka * excl;
194 solvent_parameters_t *solvent_parameters;
196 /* We use a list with parameters for each solvent type.
197 * Every time we discover a new molecule that fulfills the basic
198 * conditions for a solvent we compare with the previous entries
199 * in these lists. If the parameters are the same we just increment
200 * the counter for that type, and otherwise we create a new type
201 * based on the current molecule.
203 * Once we've finished going through all molecules we check which
204 * solvent is most common, and mark all those molecules while we
205 * clear the flag on all others.
208 solvent_parameters = *solvent_parameters_p;
210 /* Mark the cg first as non optimized */
213 /* Check if this cg has no exclusions with atoms in other charge groups
214 * and all atoms inside the charge group excluded.
215 * We only have 3 or 4 atom solvent loops.
217 if (GET_CGINFO_EXCL_INTER(cginfo) ||
218 !GET_CGINFO_EXCL_INTRA(cginfo))
223 /* Get the indices of the first atom in this charge group */
224 j0 = molt->cgs.index[cg0];
225 j1 = molt->cgs.index[cg0+1];
227 /* Number of atoms in our molecule */
232 "Moltype '%s': there are %d atoms in this charge group\n",
236 /* Check if it could be an SPC (3 atoms) or TIP4p (4) water,
244 /* Check if we are doing QM on this group */
246 if (qm_grpnr != NULL)
248 for(j=j0 ; j<j1 && !qm; j++)
250 qm = (qm_grpnr[j] < qm_grps->nr - 1);
253 /* Cannot use solvent optimization with QM */
259 atom = molt->atoms.atom;
261 /* Still looks like a solvent, time to check parameters */
263 /* If it is perturbed (free energy) we can't use the solvent loops,
264 * so then we just skip to the next molecule.
268 for(j=j0; j<j1 && !perturbed; j++)
270 perturbed = PERTURBED(atom[j]);
278 /* Now it's only a question if the VdW and charge parameters
279 * are OK. Before doing the check we compare and see if they are
280 * identical to a possible previous solvent type.
281 * First we assign the current types and charges.
285 tmp_vdwtype[j] = atom[j0+j].type;
286 tmp_charge[j] = atom[j0+j].q;
289 /* Does it match any previous solvent type? */
290 for(k=0 ; k<*n_solvent_parameters; k++)
295 /* We can only match SPC with 3 atoms and TIP4p with 4 atoms */
296 if( (solvent_parameters[k].model==esolSPC && nj!=3) ||
297 (solvent_parameters[k].model==esolTIP4P && nj!=4) )
300 /* Check that types & charges match for all atoms in molecule */
301 for(j=0 ; j<nj && match==TRUE; j++)
303 if (tmp_vdwtype[j] != solvent_parameters[k].vdwtype[j])
307 if(tmp_charge[j] != solvent_parameters[k].charge[j])
314 /* Congratulations! We have a matched solvent.
315 * Flag it with this type for later processing.
318 solvent_parameters[k].count += nmol;
320 /* We are done with this charge group */
325 /* If we get here, we have a tentative new solvent type.
326 * Before we add it we must check that it fulfills the requirements
327 * of the solvent optimized loops. First determine which atoms have
333 tjA = tmp_vdwtype[j];
335 /* Go through all other tpes and see if any have non-zero
336 * VdW parameters when combined with this one.
338 for(k=0; k<fr->ntype && (has_vdw[j]==FALSE); k++)
340 /* We already checked that the atoms weren't perturbed,
341 * so we only need to check state A now.
345 has_vdw[j] = (has_vdw[j] ||
346 (BHAMA(fr->nbfp,fr->ntype,tjA,k) != 0.0) ||
347 (BHAMB(fr->nbfp,fr->ntype,tjA,k) != 0.0) ||
348 (BHAMC(fr->nbfp,fr->ntype,tjA,k) != 0.0));
353 has_vdw[j] = (has_vdw[j] ||
354 (C6(fr->nbfp,fr->ntype,tjA,k) != 0.0) ||
355 (C12(fr->nbfp,fr->ntype,tjA,k) != 0.0));
360 /* Now we know all we need to make the final check and assignment. */
364 * For this we require thatn all atoms have charge,
365 * the charges on atom 2 & 3 should be the same, and only
366 * atom 1 might have VdW.
368 if (has_vdw[1] == FALSE &&
369 has_vdw[2] == FALSE &&
370 tmp_charge[0] != 0 &&
371 tmp_charge[1] != 0 &&
372 tmp_charge[2] == tmp_charge[1])
374 srenew(solvent_parameters,*n_solvent_parameters+1);
375 solvent_parameters[*n_solvent_parameters].model = esolSPC;
376 solvent_parameters[*n_solvent_parameters].count = nmol;
379 solvent_parameters[*n_solvent_parameters].vdwtype[k] = tmp_vdwtype[k];
380 solvent_parameters[*n_solvent_parameters].charge[k] = tmp_charge[k];
383 *cg_sp = *n_solvent_parameters;
384 (*n_solvent_parameters)++;
389 /* Or could it be a TIP4P?
390 * For this we require thatn atoms 2,3,4 have charge, but not atom 1.
391 * Only atom 1 mght have VdW.
393 if(has_vdw[1] == FALSE &&
394 has_vdw[2] == FALSE &&
395 has_vdw[3] == FALSE &&
396 tmp_charge[0] == 0 &&
397 tmp_charge[1] != 0 &&
398 tmp_charge[2] == tmp_charge[1] &&
401 srenew(solvent_parameters,*n_solvent_parameters+1);
402 solvent_parameters[*n_solvent_parameters].model = esolTIP4P;
403 solvent_parameters[*n_solvent_parameters].count = nmol;
406 solvent_parameters[*n_solvent_parameters].vdwtype[k] = tmp_vdwtype[k];
407 solvent_parameters[*n_solvent_parameters].charge[k] = tmp_charge[k];
410 *cg_sp = *n_solvent_parameters;
411 (*n_solvent_parameters)++;
415 *solvent_parameters_p = solvent_parameters;
419 check_solvent(FILE * fp,
420 const gmx_mtop_t * mtop,
422 cginfo_mb_t *cginfo_mb)
425 const t_block * mols;
426 const gmx_moltype_t *molt;
427 int mb,mol,cg_mol,at_offset,cg_offset,am,cgm,i,nmol_ch,nmol;
428 int n_solvent_parameters;
429 solvent_parameters_t *solvent_parameters;
435 fprintf(debug,"Going to determine what solvent types we have.\n");
440 n_solvent_parameters = 0;
441 solvent_parameters = NULL;
442 /* Allocate temporary array for solvent type */
443 snew(cg_sp,mtop->nmolblock);
447 for(mb=0; mb<mtop->nmolblock; mb++)
449 molt = &mtop->moltype[mtop->molblock[mb].type];
451 /* Here we have to loop over all individual molecules
452 * because we need to check for QMMM particles.
454 snew(cg_sp[mb],cginfo_mb[mb].cg_mod);
455 nmol_ch = cginfo_mb[mb].cg_mod/cgs->nr;
456 nmol = mtop->molblock[mb].nmol/nmol_ch;
457 for(mol=0; mol<nmol_ch; mol++)
460 am = mol*cgs->index[cgs->nr];
461 for(cg_mol=0; cg_mol<cgs->nr; cg_mol++)
463 check_solvent_cg(molt,cg_mol,nmol,
464 mtop->groups.grpnr[egcQMMM] ?
465 mtop->groups.grpnr[egcQMMM]+at_offset+am : 0,
466 &mtop->groups.grps[egcQMMM],
468 &n_solvent_parameters,&solvent_parameters,
469 cginfo_mb[mb].cginfo[cgm+cg_mol],
470 &cg_sp[mb][cgm+cg_mol]);
473 cg_offset += cgs->nr;
474 at_offset += cgs->index[cgs->nr];
477 /* Puh! We finished going through all charge groups.
478 * Now find the most common solvent model.
481 /* Most common solvent this far */
483 for(i=0;i<n_solvent_parameters;i++)
486 solvent_parameters[i].count > solvent_parameters[bestsp].count)
494 bestsol = solvent_parameters[bestsp].model;
501 #ifdef DISABLE_WATER_NLIST
506 for(mb=0; mb<mtop->nmolblock; mb++)
508 cgs = &mtop->moltype[mtop->molblock[mb].type].cgs;
509 nmol = (mtop->molblock[mb].nmol*cgs->nr)/cginfo_mb[mb].cg_mod;
510 for(i=0; i<cginfo_mb[mb].cg_mod; i++)
512 if (cg_sp[mb][i] == bestsp)
514 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[i],bestsol);
519 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[i],esolNO);
526 if (bestsol != esolNO && fp!=NULL)
528 fprintf(fp,"\nEnabling %s-like water optimization for %d molecules.\n\n",
530 solvent_parameters[bestsp].count);
533 sfree(solvent_parameters);
534 fr->solvent_opt = bestsol;
537 enum { acNONE=0, acCONSTRAINT, acSETTLE };
539 static cginfo_mb_t *init_cginfo_mb(FILE *fplog,const gmx_mtop_t *mtop,
540 t_forcerec *fr,gmx_bool bNoSolvOpt,
541 gmx_bool *bExcl_IntraCGAll_InterCGNone)
544 const t_blocka *excl;
545 const gmx_moltype_t *molt;
546 const gmx_molblock_t *molb;
547 cginfo_mb_t *cginfo_mb;
550 int cg_offset,a_offset,cgm,am;
551 int mb,m,ncg_tot,cg,a0,a1,gid,ai,j,aj,excl_nalloc;
555 gmx_bool bId,*bExcl,bExclIntraAll,bExclInter,bHaveVDW,bHaveQ;
557 ncg_tot = ncg_mtop(mtop);
558 snew(cginfo_mb,mtop->nmolblock);
560 snew(type_VDW,fr->ntype);
561 for(ai=0; ai<fr->ntype; ai++)
563 type_VDW[ai] = FALSE;
564 for(j=0; j<fr->ntype; j++)
566 type_VDW[ai] = type_VDW[ai] ||
568 C6(fr->nbfp,fr->ntype,ai,j) != 0 ||
569 C12(fr->nbfp,fr->ntype,ai,j) != 0;
573 *bExcl_IntraCGAll_InterCGNone = TRUE;
576 snew(bExcl,excl_nalloc);
579 for(mb=0; mb<mtop->nmolblock; mb++)
581 molb = &mtop->molblock[mb];
582 molt = &mtop->moltype[molb->type];
586 /* Check if the cginfo is identical for all molecules in this block.
587 * If so, we only need an array of the size of one molecule.
588 * Otherwise we make an array of #mol times #cgs per molecule.
592 for(m=0; m<molb->nmol; m++)
594 am = m*cgs->index[cgs->nr];
595 for(cg=0; cg<cgs->nr; cg++)
598 a1 = cgs->index[cg+1];
599 if (ggrpnr(&mtop->groups,egcENER,a_offset+am+a0) !=
600 ggrpnr(&mtop->groups,egcENER,a_offset +a0))
604 if (mtop->groups.grpnr[egcQMMM] != NULL)
606 for(ai=a0; ai<a1; ai++)
608 if (mtop->groups.grpnr[egcQMMM][a_offset+am+ai] !=
609 mtop->groups.grpnr[egcQMMM][a_offset +ai])
618 cginfo_mb[mb].cg_start = cg_offset;
619 cginfo_mb[mb].cg_end = cg_offset + molb->nmol*cgs->nr;
620 cginfo_mb[mb].cg_mod = (bId ? 1 : molb->nmol)*cgs->nr;
621 snew(cginfo_mb[mb].cginfo,cginfo_mb[mb].cg_mod);
622 cginfo = cginfo_mb[mb].cginfo;
624 /* Set constraints flags for constrained atoms */
625 snew(a_con,molt->atoms.nr);
626 for(ftype=0; ftype<F_NRE; ftype++)
628 if (interaction_function[ftype].flags & IF_CONSTRAINT)
633 for(ia=0; ia<molt->ilist[ftype].nr; ia+=1+nral)
637 for(a=0; a<nral; a++)
639 a_con[molt->ilist[ftype].iatoms[ia+1+a]] =
640 (ftype == F_SETTLE ? acSETTLE : acCONSTRAINT);
646 for(m=0; m<(bId ? 1 : molb->nmol); m++)
649 am = m*cgs->index[cgs->nr];
650 for(cg=0; cg<cgs->nr; cg++)
653 a1 = cgs->index[cg+1];
655 /* Store the energy group in cginfo */
656 gid = ggrpnr(&mtop->groups,egcENER,a_offset+am+a0);
657 SET_CGINFO_GID(cginfo[cgm+cg],gid);
659 /* Check the intra/inter charge group exclusions */
660 if (a1-a0 > excl_nalloc) {
661 excl_nalloc = a1 - a0;
662 srenew(bExcl,excl_nalloc);
664 /* bExclIntraAll: all intra cg interactions excluded
665 * bExclInter: any inter cg interactions excluded
667 bExclIntraAll = TRUE;
671 for(ai=a0; ai<a1; ai++)
673 /* Check VDW and electrostatic interactions */
674 bHaveVDW = bHaveVDW || (type_VDW[molt->atoms.atom[ai].type] ||
675 type_VDW[molt->atoms.atom[ai].typeB]);
676 bHaveQ = bHaveQ || (molt->atoms.atom[ai].q != 0 ||
677 molt->atoms.atom[ai].qB != 0);
679 /* Clear the exclusion list for atom ai */
680 for(aj=a0; aj<a1; aj++)
682 bExcl[aj-a0] = FALSE;
684 /* Loop over all the exclusions of atom ai */
685 for(j=excl->index[ai]; j<excl->index[ai+1]; j++)
688 if (aj < a0 || aj >= a1)
697 /* Check if ai excludes a0 to a1 */
698 for(aj=a0; aj<a1; aj++)
702 bExclIntraAll = FALSE;
709 SET_CGINFO_CONSTR(cginfo[cgm+cg]);
712 SET_CGINFO_SETTLE(cginfo[cgm+cg]);
720 SET_CGINFO_EXCL_INTRA(cginfo[cgm+cg]);
724 SET_CGINFO_EXCL_INTER(cginfo[cgm+cg]);
726 if (a1 - a0 > MAX_CHARGEGROUP_SIZE)
728 /* The size in cginfo is currently only read with DD */
729 gmx_fatal(FARGS,"A charge group has size %d which is larger than the limit of %d atoms",a1-a0,MAX_CHARGEGROUP_SIZE);
733 SET_CGINFO_HAS_VDW(cginfo[cgm+cg]);
737 SET_CGINFO_HAS_Q(cginfo[cgm+cg]);
739 /* Store the charge group size */
740 SET_CGINFO_NATOMS(cginfo[cgm+cg],a1-a0);
742 if (!bExclIntraAll || bExclInter)
744 *bExcl_IntraCGAll_InterCGNone = FALSE;
751 cg_offset += molb->nmol*cgs->nr;
752 a_offset += molb->nmol*cgs->index[cgs->nr];
756 /* the solvent optimizer is called after the QM is initialized,
757 * because we don't want to have the QM subsystemto become an
761 check_solvent(fplog,mtop,fr,cginfo_mb);
763 if (getenv("GMX_NO_SOLV_OPT"))
767 fprintf(fplog,"Found environment variable GMX_NO_SOLV_OPT.\n"
768 "Disabling all solvent optimization\n");
770 fr->solvent_opt = esolNO;
774 fr->solvent_opt = esolNO;
776 if (!fr->solvent_opt)
778 for(mb=0; mb<mtop->nmolblock; mb++)
780 for(cg=0; cg<cginfo_mb[mb].cg_mod; cg++)
782 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[cg],esolNO);
790 static int *cginfo_expand(int nmb,cginfo_mb_t *cgi_mb)
795 ncg = cgi_mb[nmb-1].cg_end;
798 for(cg=0; cg<ncg; cg++)
800 while (cg >= cgi_mb[mb].cg_end)
805 cgi_mb[mb].cginfo[(cg - cgi_mb[mb].cg_start) % cgi_mb[mb].cg_mod];
811 static void set_chargesum(FILE *log,t_forcerec *fr,const gmx_mtop_t *mtop)
815 const t_atoms *atoms;
819 for(mb=0; mb<mtop->nmolblock; mb++)
821 nmol = mtop->molblock[mb].nmol;
822 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
823 for(i=0; i<atoms->nr; i++)
825 q = atoms->atom[i].q;
831 fr->q2sum[0] = q2sum;
832 if (fr->efep != efepNO)
836 for(mb=0; mb<mtop->nmolblock; mb++)
838 nmol = mtop->molblock[mb].nmol;
839 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
840 for(i=0; i<atoms->nr; i++)
842 q = atoms->atom[i].qB;
847 fr->q2sum[1] = q2sum;
852 fr->qsum[1] = fr->qsum[0];
853 fr->q2sum[1] = fr->q2sum[0];
856 if (fr->efep == efepNO)
857 fprintf(log,"System total charge: %.3f\n",fr->qsum[0]);
859 fprintf(log,"System total charge, top. A: %.3f top. B: %.3f\n",
860 fr->qsum[0],fr->qsum[1]);
864 void update_forcerec(FILE *log,t_forcerec *fr,matrix box)
866 if (fr->eeltype == eelGRF)
868 calc_rffac(NULL,fr->eeltype,fr->epsilon_r,fr->epsilon_rf,
869 fr->rcoulomb,fr->temp,fr->zsquare,box,
870 &fr->kappa,&fr->k_rf,&fr->c_rf);
874 void set_avcsixtwelve(FILE *fplog,t_forcerec *fr,const gmx_mtop_t *mtop)
876 const t_atoms *atoms,*atoms_tpi;
877 const t_blocka *excl;
878 int mb,nmol,nmolc,i,j,tpi,tpj,j1,j2,k,n,nexcl,q;
879 #if (defined SIZEOF_LONG_LONG_INT) && (SIZEOF_LONG_LONG_INT >= 8)
880 long long int npair,npair_ij,tmpi,tmpj;
882 double npair, npair_ij,tmpi,tmpj;
893 for(q=0; q<(fr->efep==efepNO ? 1 : 2); q++) {
899 /* Count the types so we avoid natoms^2 operations */
901 for(mb=0; mb<mtop->nmolblock; mb++) {
902 nmol = mtop->molblock[mb].nmol;
903 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
904 for(i=0; i<atoms->nr; i++) {
907 tpi = atoms->atom[i].type;
911 tpi = atoms->atom[i].typeB;
913 typecount[tpi] += nmol;
916 for(tpi=0; tpi<ntp; tpi++) {
917 for(tpj=tpi; tpj<ntp; tpj++) {
918 tmpi = typecount[tpi];
919 tmpj = typecount[tpj];
922 npair_ij = tmpi*tmpj;
926 npair_ij = tmpi*(tmpi - 1)/2;
929 /* nbfp now includes the 6.0 derivative prefactor */
930 csix += npair_ij*BHAMC(nbfp,ntp,tpi,tpj)/6.0;
932 /* nbfp now includes the 6.0/12.0 derivative prefactors */
933 csix += npair_ij* C6(nbfp,ntp,tpi,tpj)/6.0;
934 ctwelve += npair_ij* C12(nbfp,ntp,tpi,tpj)/12.0;
940 /* Subtract the excluded pairs.
941 * The main reason for substracting exclusions is that in some cases
942 * some combinations might never occur and the parameters could have
943 * any value. These unused values should not influence the dispersion
946 for(mb=0; mb<mtop->nmolblock; mb++) {
947 nmol = mtop->molblock[mb].nmol;
948 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
949 excl = &mtop->moltype[mtop->molblock[mb].type].excls;
950 for(i=0; (i<atoms->nr); i++) {
953 tpi = atoms->atom[i].type;
957 tpi = atoms->atom[i].typeB;
960 j2 = excl->index[i+1];
961 for(j=j1; j<j2; j++) {
967 tpj = atoms->atom[k].type;
971 tpj = atoms->atom[k].typeB;
974 /* nbfp now includes the 6.0 derivative prefactor */
975 csix -= nmol*BHAMC(nbfp,ntp,tpi,tpj)/6.0;
977 /* nbfp now includes the 6.0/12.0 derivative prefactors */
978 csix -= nmol*C6 (nbfp,ntp,tpi,tpj)/6.0;
979 ctwelve -= nmol*C12(nbfp,ntp,tpi,tpj)/12.0;
987 /* Only correct for the interaction of the test particle
988 * with the rest of the system.
991 &mtop->moltype[mtop->molblock[mtop->nmolblock-1].type].atoms;
994 for(mb=0; mb<mtop->nmolblock; mb++) {
995 nmol = mtop->molblock[mb].nmol;
996 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
997 for(j=0; j<atoms->nr; j++) {
999 /* Remove the interaction of the test charge group
1002 if (mb == mtop->nmolblock-1)
1006 if (mb == 0 && nmol == 1)
1008 gmx_fatal(FARGS,"Old format tpr with TPI, please generate a new tpr file");
1013 tpj = atoms->atom[j].type;
1017 tpj = atoms->atom[j].typeB;
1019 for(i=0; i<fr->n_tpi; i++)
1023 tpi = atoms_tpi->atom[i].type;
1027 tpi = atoms_tpi->atom[i].typeB;
1031 /* nbfp now includes the 6.0 derivative prefactor */
1032 csix += nmolc*BHAMC(nbfp,ntp,tpi,tpj)/6.0;
1036 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1037 csix += nmolc*C6 (nbfp,ntp,tpi,tpj)/6.0;
1038 ctwelve += nmolc*C12(nbfp,ntp,tpi,tpj)/12.0;
1045 if (npair - nexcl <= 0 && fplog) {
1046 fprintf(fplog,"\nWARNING: There are no atom pairs for dispersion correction\n\n");
1050 csix /= npair - nexcl;
1051 ctwelve /= npair - nexcl;
1054 fprintf(debug,"Counted %d exclusions\n",nexcl);
1055 fprintf(debug,"Average C6 parameter is: %10g\n",(double)csix);
1056 fprintf(debug,"Average C12 parameter is: %10g\n",(double)ctwelve);
1058 fr->avcsix[q] = csix;
1059 fr->avctwelve[q] = ctwelve;
1063 if (fr->eDispCorr == edispcAllEner ||
1064 fr->eDispCorr == edispcAllEnerPres)
1066 fprintf(fplog,"Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
1067 fr->avcsix[0],fr->avctwelve[0]);
1071 fprintf(fplog,"Long Range LJ corr.: <C6> %10.4e\n",fr->avcsix[0]);
1077 static void set_bham_b_max(FILE *fplog,t_forcerec *fr,
1078 const gmx_mtop_t *mtop)
1080 const t_atoms *at1,*at2;
1081 int mt1,mt2,i,j,tpi,tpj,ntypes;
1087 fprintf(fplog,"Determining largest Buckingham b parameter for table\n");
1094 for(mt1=0; mt1<mtop->nmoltype; mt1++)
1096 at1 = &mtop->moltype[mt1].atoms;
1097 for(i=0; (i<at1->nr); i++)
1099 tpi = at1->atom[i].type;
1101 gmx_fatal(FARGS,"Atomtype[%d] = %d, maximum = %d",i,tpi,ntypes);
1103 for(mt2=mt1; mt2<mtop->nmoltype; mt2++)
1105 at2 = &mtop->moltype[mt2].atoms;
1106 for(j=0; (j<at2->nr); j++) {
1107 tpj = at2->atom[j].type;
1110 gmx_fatal(FARGS,"Atomtype[%d] = %d, maximum = %d",j,tpj,ntypes);
1112 b = BHAMB(nbfp,ntypes,tpi,tpj);
1113 if (b > fr->bham_b_max)
1117 if ((b < bmin) || (bmin==-1))
1127 fprintf(fplog,"Buckingham b parameters, min: %g, max: %g\n",
1128 bmin,fr->bham_b_max);
1132 static void make_nbf_tables(FILE *fp,const output_env_t oenv,
1133 t_forcerec *fr,real rtab,
1134 const t_commrec *cr,
1135 const char *tabfn,char *eg1,char *eg2,
1141 if (tabfn == NULL) {
1143 fprintf(debug,"No table file name passed, can not read table, can not do non-bonded interactions\n");
1147 sprintf(buf,"%s",tabfn);
1149 /* Append the two energy group names */
1150 sprintf(buf + strlen(tabfn) - strlen(ftp2ext(efXVG)) - 1,"_%s_%s.%s",
1151 eg1,eg2,ftp2ext(efXVG));
1152 nbl->table_elec_vdw = make_tables(fp,oenv,fr,MASTER(cr),buf,rtab,0);
1153 /* Copy the contents of the table to separate coulomb and LJ tables too,
1154 * to improve cache performance.
1156 /* For performance reasons we want
1157 * the table data to be aligned to 16-byte. The pointers could be freed
1158 * but currently aren't.
1160 nbl->table_elec.interaction = GMX_TABLE_INTERACTION_ELEC;
1161 nbl->table_elec.format = nbl->table_elec_vdw.format;
1162 nbl->table_elec.r = nbl->table_elec_vdw.r;
1163 nbl->table_elec.n = nbl->table_elec_vdw.n;
1164 nbl->table_elec.scale = nbl->table_elec_vdw.scale;
1165 nbl->table_elec.scale_exp = nbl->table_elec_vdw.scale_exp;
1166 nbl->table_elec.formatsize = nbl->table_elec_vdw.formatsize;
1167 nbl->table_elec.ninteractions = 1;
1168 nbl->table_elec.stride = nbl->table_elec.formatsize * nbl->table_elec.ninteractions;
1169 snew_aligned(nbl->table_elec.data,nbl->table_elec.stride*(nbl->table_elec.n+1),32);
1171 nbl->table_vdw.interaction = GMX_TABLE_INTERACTION_VDWREP_VDWDISP;
1172 nbl->table_vdw.format = nbl->table_elec_vdw.format;
1173 nbl->table_vdw.r = nbl->table_elec_vdw.r;
1174 nbl->table_vdw.n = nbl->table_elec_vdw.n;
1175 nbl->table_vdw.scale = nbl->table_elec_vdw.scale;
1176 nbl->table_vdw.scale_exp = nbl->table_elec_vdw.scale_exp;
1177 nbl->table_vdw.formatsize = nbl->table_elec_vdw.formatsize;
1178 nbl->table_vdw.ninteractions = 2;
1179 nbl->table_vdw.stride = nbl->table_vdw.formatsize * nbl->table_vdw.ninteractions;
1180 snew_aligned(nbl->table_vdw.data,nbl->table_vdw.stride*(nbl->table_vdw.n+1),32);
1182 for(i=0; i<=nbl->table_elec_vdw.n; i++)
1185 nbl->table_elec.data[4*i+j] = nbl->table_elec_vdw.data[12*i+j];
1187 nbl->table_vdw.data[8*i+j] = nbl->table_elec_vdw.data[12*i+4+j];
1191 static void count_tables(int ftype1,int ftype2,const gmx_mtop_t *mtop,
1192 int *ncount,int **count)
1194 const gmx_moltype_t *molt;
1196 int mt,ftype,stride,i,j,tabnr;
1198 for(mt=0; mt<mtop->nmoltype; mt++)
1200 molt = &mtop->moltype[mt];
1201 for(ftype=0; ftype<F_NRE; ftype++)
1203 if (ftype == ftype1 || ftype == ftype2) {
1204 il = &molt->ilist[ftype];
1205 stride = 1 + NRAL(ftype);
1206 for(i=0; i<il->nr; i+=stride) {
1207 tabnr = mtop->ffparams.iparams[il->iatoms[i]].tab.table;
1209 gmx_fatal(FARGS,"A bonded table number is smaller than 0: %d\n",tabnr);
1210 if (tabnr >= *ncount) {
1211 srenew(*count,tabnr+1);
1212 for(j=*ncount; j<tabnr+1; j++)
1223 static bondedtable_t *make_bonded_tables(FILE *fplog,
1224 int ftype1,int ftype2,
1225 const gmx_mtop_t *mtop,
1226 const char *basefn,const char *tabext)
1228 int i,ncount,*count;
1236 count_tables(ftype1,ftype2,mtop,&ncount,&count);
1240 for(i=0; i<ncount; i++) {
1242 sprintf(tabfn,"%s",basefn);
1243 sprintf(tabfn + strlen(basefn) - strlen(ftp2ext(efXVG)) - 1,"_%s%d.%s",
1244 tabext,i,ftp2ext(efXVG));
1245 tab[i] = make_bonded_table(fplog,tabfn,NRAL(ftype1)-2);
1254 void forcerec_set_ranges(t_forcerec *fr,
1255 int ncg_home,int ncg_force,
1257 int natoms_force_constr,int natoms_f_novirsum)
1262 /* fr->ncg_force is unused in the standard code,
1263 * but it can be useful for modified code dealing with charge groups.
1265 fr->ncg_force = ncg_force;
1266 fr->natoms_force = natoms_force;
1267 fr->natoms_force_constr = natoms_force_constr;
1269 if (fr->natoms_force_constr > fr->nalloc_force)
1271 fr->nalloc_force = over_alloc_dd(fr->natoms_force_constr);
1275 srenew(fr->f_twin,fr->nalloc_force);
1279 if (fr->bF_NoVirSum)
1281 fr->f_novirsum_n = natoms_f_novirsum;
1282 if (fr->f_novirsum_n > fr->f_novirsum_nalloc)
1284 fr->f_novirsum_nalloc = over_alloc_dd(fr->f_novirsum_n);
1285 srenew(fr->f_novirsum_alloc,fr->f_novirsum_nalloc);
1290 fr->f_novirsum_n = 0;
1294 static real cutoff_inf(real cutoff)
1298 cutoff = GMX_CUTOFF_INF;
1304 static void make_adress_tf_tables(FILE *fp,const output_env_t oenv,
1305 t_forcerec *fr,const t_inputrec *ir,
1306 const char *tabfn, const gmx_mtop_t *mtop,
1312 if (tabfn == NULL) {
1313 gmx_fatal(FARGS,"No thermoforce table file given. Use -tabletf to specify a file\n");
1317 snew(fr->atf_tabs, ir->adress->n_tf_grps);
1319 for (i=0; i<ir->adress->n_tf_grps; i++){
1320 j = ir->adress->tf_table_index[i]; /* get energy group index */
1321 sprintf(buf + strlen(tabfn) - strlen(ftp2ext(efXVG)) - 1,"tf_%s.%s",
1322 *(mtop->groups.grpname[mtop->groups.grps[egcENER].nm_ind[j]]) ,ftp2ext(efXVG));
1323 printf("loading tf table for energygrp index %d from %s\n", ir->adress->tf_table_index[j], buf);
1324 fr->atf_tabs[i] = make_atf_table(fp,oenv,fr,buf, box);
1329 gmx_bool can_use_allvsall(const t_inputrec *ir, const gmx_mtop_t *mtop,
1330 gmx_bool bPrintNote,t_commrec *cr,FILE *fp)
1339 ir->ePBC==epbcNONE &&
1340 ir->vdwtype==evdwCUT &&
1341 ir->coulombtype==eelCUT &&
1343 (ir->implicit_solvent == eisNO ||
1344 (ir->implicit_solvent==eisGBSA && (ir->gb_algorithm==egbSTILL ||
1345 ir->gb_algorithm==egbHCT ||
1346 ir->gb_algorithm==egbOBC))) &&
1347 getenv("GMX_NO_ALLVSALL") == NULL
1350 if (bAllvsAll && ir->opts.ngener > 1)
1352 const char *note="NOTE: Can not use all-vs-all force loops, because there are multiple energy monitor groups; you might get significantly higher performance when using only a single energy monitor group.\n";
1358 fprintf(stderr,"\n%s\n",note);
1362 fprintf(fp,"\n%s\n",note);
1368 if(bAllvsAll && fp && MASTER(cr))
1370 fprintf(fp,"\nUsing accelerated all-vs-all kernels.\n\n");
1377 static void init_forcerec_f_threads(t_forcerec *fr,int nenergrp)
1381 /* These thread local data structures are used for bondeds only */
1382 fr->nthreads = gmx_omp_nthreads_get(emntBonded);
1384 if (fr->nthreads > 1)
1386 snew(fr->f_t,fr->nthreads);
1387 /* Thread 0 uses the global force and energy arrays */
1388 for(t=1; t<fr->nthreads; t++)
1390 fr->f_t[t].f = NULL;
1391 fr->f_t[t].f_nalloc = 0;
1392 snew(fr->f_t[t].fshift,SHIFTS);
1393 fr->f_t[t].grpp.nener = nenergrp*nenergrp;
1394 for(i=0; i<egNR; i++)
1396 snew(fr->f_t[t].grpp.ener[i],fr->f_t[t].grpp.nener);
1403 static void pick_nbnxn_kernel_cpu(FILE *fp,
1404 const t_commrec *cr,
1405 const gmx_cpuid_t cpuid_info,
1406 const t_inputrec *ir,
1410 *kernel_type = nbnxnk4x4_PlainC;
1411 *ewald_excl = ewaldexclTable;
1413 #ifdef GMX_NBNXN_SIMD
1415 #ifdef GMX_NBNXN_SIMD_4XN
1416 *kernel_type = nbnxnk4xN_SIMD_4xN;
1418 #ifdef GMX_NBNXN_SIMD_2XNN
1419 /* We expect the 2xNN kernels to be faster in most cases */
1420 *kernel_type = nbnxnk4xN_SIMD_2xNN;
1423 #if defined GMX_NBNXN_SIMD_4XN && defined GMX_X86_AVX_256
1424 if (EEL_RF(ir->coulombtype) || ir->coulombtype == eelCUT)
1426 /* The raw pair rate of the 4x8 kernel is higher than 2x(4+4),
1427 * 10% with HT, 50% without HT, but extra zeros interactions
1428 * can compensate. As we currently don't detect the actual use
1429 * of HT, switch to 4x8 to avoid a potential performance hit.
1431 *kernel_type = nbnxnk4xN_SIMD_4xN;
1434 if (getenv("GMX_NBNXN_SIMD_4XN") != NULL)
1436 #ifdef GMX_NBNXN_SIMD_4XN
1437 *kernel_type = nbnxnk4xN_SIMD_4xN;
1439 gmx_fatal(FARGS,"SIMD 4xN kernels requested, but Gromacs has been compiled without support for these kernels");
1442 if (getenv("GMX_NBNXN_SIMD_2XNN") != NULL)
1444 #ifdef GMX_NBNXN_SIMD_2XNN
1445 *kernel_type = nbnxnk4xN_SIMD_2xNN;
1447 gmx_fatal(FARGS,"SIMD 2x(N+N) kernels requested, but Gromacs has been compiled without support for these kernels");
1451 /* Analytical Ewald exclusion correction is only an option in the
1452 * x86 SIMD kernel. This is faster in single precision
1453 * on Bulldozer and slightly faster on Sandy Bridge.
1455 #if (defined GMX_X86_AVX_128_FMA || defined GMX_X86_AVX_256) && !defined GMX_DOUBLE
1456 *ewald_excl = ewaldexclAnalytical;
1458 if (getenv("GMX_NBNXN_EWALD_TABLE") != NULL)
1460 *ewald_excl = ewaldexclTable;
1462 if (getenv("GMX_NBNXN_EWALD_ANALYTICAL") != NULL)
1464 *ewald_excl = ewaldexclAnalytical;
1468 #endif /* GMX_X86_SSE2 */
1472 const char *lookup_nbnxn_kernel_name(int kernel_type)
1474 const char *returnvalue = NULL;
1477 case nbnxnkNotSet: returnvalue = "not set"; break;
1478 case nbnxnk4x4_PlainC: returnvalue = "plain C"; break;
1479 #ifndef GMX_NBNXN_SIMD
1480 case nbnxnk4xN_SIMD_4xN: returnvalue = "not available"; break;
1481 case nbnxnk4xN_SIMD_2xNN: returnvalue = "not available"; break;
1484 #if GMX_NBNXN_SIMD_BITWIDTH == 128
1485 /* x86 SIMD intrinsics can be converted to either SSE or AVX depending
1486 * on compiler flags. As we use nearly identical intrinsics, using an AVX
1487 * compiler flag without an AVX macro effectively results in AVX kernels.
1488 * For gcc we check for __AVX__
1489 * At least a check for icc should be added (if there is a macro)
1491 #if !(defined GMX_X86_AVX_128_FMA || defined __AVX__)
1492 #ifndef GMX_X86_SSE4_1
1493 case nbnxnk4xN_SIMD_4xN: returnvalue = "SSE2"; break;
1494 case nbnxnk4xN_SIMD_2xNN: returnvalue = "SSE2"; break;
1496 case nbnxnk4xN_SIMD_4xN: returnvalue = "SSE4.1"; break;
1497 case nbnxnk4xN_SIMD_2xNN: returnvalue = "SSE4.1"; break;
1500 case nbnxnk4xN_SIMD_4xN: returnvalue = "AVX-128"; break;
1501 case nbnxnk4xN_SIMD_2xNN: returnvalue = "AVX-128"; break;
1504 #if GMX_NBNXN_SIMD_BITWIDTH == 256
1505 case nbnxnk4xN_SIMD_4xN: returnvalue = "AVX-256"; break;
1506 case nbnxnk4xN_SIMD_2xNN: returnvalue = "AVX-256"; break;
1508 #else /* not GMX_X86_SSE2 */
1509 case nbnxnk4xN_SIMD_4xN: returnvalue = "SIMD"; break;
1510 case nbnxnk4xN_SIMD_2xNN: returnvalue = "SIMD"; break;
1513 case nbnxnk8x8x8_CUDA: returnvalue = "CUDA"; break;
1514 case nbnxnk8x8x8_PlainC: returnvalue = "plain C"; break;
1518 gmx_fatal(FARGS, "Illegal kernel type selected");
1525 static void pick_nbnxn_kernel(FILE *fp,
1526 const t_commrec *cr,
1527 const gmx_hw_info_t *hwinfo,
1528 gmx_bool use_cpu_acceleration,
1530 gmx_bool bEmulateGPU,
1531 const t_inputrec *ir,
1534 gmx_bool bDoNonbonded)
1536 assert(kernel_type);
1538 *kernel_type = nbnxnkNotSet;
1539 *ewald_excl = ewaldexclTable;
1543 *kernel_type = nbnxnk8x8x8_PlainC;
1547 md_print_warn(cr, fp, "Emulating a GPU run on the CPU (slow)");
1552 *kernel_type = nbnxnk8x8x8_CUDA;
1555 if (*kernel_type == nbnxnkNotSet)
1557 if (use_cpu_acceleration)
1559 pick_nbnxn_kernel_cpu(fp,cr,hwinfo->cpuid_info,ir,
1560 kernel_type,ewald_excl);
1564 *kernel_type = nbnxnk4x4_PlainC;
1568 if (bDoNonbonded && fp != NULL)
1570 fprintf(fp,"\nUsing %s %dx%d non-bonded kernels\n\n",
1571 lookup_nbnxn_kernel_name(*kernel_type),
1572 nbnxn_kernel_pairlist_simple(*kernel_type) ? NBNXN_CPU_CLUSTER_I_SIZE : NBNXN_GPU_CLUSTER_SIZE,
1573 nbnxn_kernel_to_cj_size(*kernel_type));
1577 static void pick_nbnxn_resources(FILE *fp,
1578 const t_commrec *cr,
1579 const gmx_hw_info_t *hwinfo,
1580 gmx_bool bDoNonbonded,
1582 gmx_bool *bEmulateGPU)
1584 gmx_bool bEmulateGPUEnvVarSet;
1585 char gpu_err_str[STRLEN];
1589 bEmulateGPUEnvVarSet = (getenv("GMX_EMULATE_GPU") != NULL);
1591 /* Run GPU emulation mode if GMX_EMULATE_GPU is defined. Because
1592 * GPUs (currently) only handle non-bonded calculations, we will
1593 * automatically switch to emulation if non-bonded calculations are
1594 * turned off via GMX_NO_NONBONDED - this is the simple and elegant
1595 * way to turn off GPU initialization, data movement, and cleanup.
1597 * GPU emulation can be useful to assess the performance one can expect by
1598 * adding GPU(s) to the machine. The conditional below allows this even
1599 * if mdrun is compiled without GPU acceleration support.
1600 * Note that you should freezing the system as otherwise it will explode.
1602 *bEmulateGPU = (bEmulateGPUEnvVarSet ||
1603 (!bDoNonbonded && hwinfo->bCanUseGPU));
1605 /* Enable GPU mode when GPUs are available or no GPU emulation is requested.
1607 if (hwinfo->bCanUseGPU && !(*bEmulateGPU))
1609 /* Each PP node will use the intra-node id-th device from the
1610 * list of detected/selected GPUs. */
1611 if (!init_gpu(cr->rank_pp_intranode, gpu_err_str, &hwinfo->gpu_info))
1613 /* At this point the init should never fail as we made sure that
1614 * we have all the GPUs we need. If it still does, we'll bail. */
1615 gmx_fatal(FARGS, "On node %d failed to initialize GPU #%d: %s",
1617 get_gpu_device_id(&hwinfo->gpu_info, cr->rank_pp_intranode),
1621 /* Here we actually turn on hardware GPU acceleration */
1626 gmx_bool uses_simple_tables(int cutoff_scheme,
1627 nonbonded_verlet_t *nbv,
1630 gmx_bool bUsesSimpleTables = TRUE;
1633 switch(cutoff_scheme)
1636 bUsesSimpleTables = TRUE;
1639 assert(NULL != nbv && NULL != nbv->grp);
1640 grp_index = (group < 0) ? 0 : (nbv->ngrp - 1);
1641 bUsesSimpleTables = nbnxn_kernel_pairlist_simple(nbv->grp[grp_index].kernel_type);
1644 gmx_incons("unimplemented");
1646 return bUsesSimpleTables;
1649 static void init_ewald_f_table(interaction_const_t *ic,
1650 gmx_bool bUsesSimpleTables,
1655 if (bUsesSimpleTables)
1657 /* With a spacing of 0.0005 we are at the force summation accuracy
1658 * for the SSE kernels for "normal" atomistic simulations.
1660 ic->tabq_scale = ewald_spline3_table_scale(ic->ewaldcoeff,
1663 maxr = (rtab>ic->rcoulomb) ? rtab : ic->rcoulomb;
1664 ic->tabq_size = (int)(maxr*ic->tabq_scale) + 2;
1668 ic->tabq_size = GPU_EWALD_COULOMB_FORCE_TABLE_SIZE;
1669 /* Subtract 2 iso 1 to avoid access out of range due to rounding */
1670 ic->tabq_scale = (ic->tabq_size - 2)/ic->rcoulomb;
1673 sfree_aligned(ic->tabq_coul_FDV0);
1674 sfree_aligned(ic->tabq_coul_F);
1675 sfree_aligned(ic->tabq_coul_V);
1677 /* Create the original table data in FDV0 */
1678 snew_aligned(ic->tabq_coul_FDV0,ic->tabq_size*4,32);
1679 snew_aligned(ic->tabq_coul_F,ic->tabq_size,32);
1680 snew_aligned(ic->tabq_coul_V,ic->tabq_size,32);
1681 table_spline3_fill_ewald_lr(ic->tabq_coul_F,ic->tabq_coul_V,ic->tabq_coul_FDV0,
1682 ic->tabq_size,1/ic->tabq_scale,ic->ewaldcoeff);
1685 void init_interaction_const_tables(FILE *fp,
1686 interaction_const_t *ic,
1687 gmx_bool bUsesSimpleTables,
1692 if (ic->eeltype == eelEWALD || EEL_PME(ic->eeltype))
1694 init_ewald_f_table(ic,bUsesSimpleTables,rtab);
1698 fprintf(fp,"Initialized non-bonded Ewald correction tables, spacing: %.2e size: %d\n\n",
1699 1/ic->tabq_scale,ic->tabq_size);
1704 void init_interaction_const(FILE *fp,
1705 interaction_const_t **interaction_const,
1706 const t_forcerec *fr,
1709 interaction_const_t *ic;
1710 gmx_bool bUsesSimpleTables = TRUE;
1714 /* Just allocate something so we can free it */
1715 snew_aligned(ic->tabq_coul_FDV0,16,32);
1716 snew_aligned(ic->tabq_coul_F,16,32);
1717 snew_aligned(ic->tabq_coul_V,16,32);
1719 ic->rlist = fr->rlist;
1720 ic->rlistlong = fr->rlistlong;
1723 ic->rvdw = fr->rvdw;
1724 if (fr->vdw_modifier==eintmodPOTSHIFT)
1726 ic->sh_invrc6 = pow(ic->rvdw,-6.0);
1733 /* Electrostatics */
1734 ic->eeltype = fr->eeltype;
1735 ic->rcoulomb = fr->rcoulomb;
1736 ic->epsilon_r = fr->epsilon_r;
1737 ic->epsfac = fr->epsfac;
1740 ic->ewaldcoeff = fr->ewaldcoeff;
1741 if (fr->coulomb_modifier==eintmodPOTSHIFT)
1743 ic->sh_ewald = gmx_erfc(ic->ewaldcoeff*ic->rcoulomb);
1750 /* Reaction-field */
1751 if (EEL_RF(ic->eeltype))
1753 ic->epsilon_rf = fr->epsilon_rf;
1754 ic->k_rf = fr->k_rf;
1755 ic->c_rf = fr->c_rf;
1759 /* For plain cut-off we might use the reaction-field kernels */
1760 ic->epsilon_rf = ic->epsilon_r;
1762 if (fr->coulomb_modifier==eintmodPOTSHIFT)
1764 ic->c_rf = 1/ic->rcoulomb;
1774 fprintf(fp,"Potential shift: LJ r^-12: %.3f r^-6 %.3f",
1775 sqr(ic->sh_invrc6),ic->sh_invrc6);
1776 if (ic->eeltype == eelCUT)
1778 fprintf(fp,", Coulomb %.3f",ic->c_rf);
1780 else if (EEL_PME(ic->eeltype))
1782 fprintf(fp,", Ewald %.3e",ic->sh_ewald);
1787 *interaction_const = ic;
1789 if (fr->nbv != NULL && fr->nbv->bUseGPU)
1791 nbnxn_cuda_init_const(fr->nbv->cu_nbv, ic, fr->nbv);
1794 bUsesSimpleTables = uses_simple_tables(fr->cutoff_scheme, fr->nbv, -1);
1795 init_interaction_const_tables(fp,ic,bUsesSimpleTables,rtab);
1798 static void init_nb_verlet(FILE *fp,
1799 nonbonded_verlet_t **nb_verlet,
1800 const t_inputrec *ir,
1801 const t_forcerec *fr,
1802 const t_commrec *cr,
1803 const char *nbpu_opt)
1805 nonbonded_verlet_t *nbv;
1808 gmx_bool bEmulateGPU, bHybridGPURun = FALSE;
1810 nbnxn_alloc_t *nb_alloc;
1811 nbnxn_free_t *nb_free;
1815 pick_nbnxn_resources(fp, cr, fr->hwinfo,
1822 nbv->ngrp = (DOMAINDECOMP(cr) ? 2 : 1);
1823 for(i=0; i<nbv->ngrp; i++)
1825 nbv->grp[i].nbl_lists.nnbl = 0;
1826 nbv->grp[i].nbat = NULL;
1827 nbv->grp[i].kernel_type = nbnxnkNotSet;
1829 if (i == 0) /* local */
1831 pick_nbnxn_kernel(fp, cr, fr->hwinfo, fr->use_cpu_acceleration,
1832 nbv->bUseGPU, bEmulateGPU,
1834 &nbv->grp[i].kernel_type,
1835 &nbv->grp[i].ewald_excl,
1838 else /* non-local */
1840 if (nbpu_opt != NULL && strcmp(nbpu_opt,"gpu_cpu") == 0)
1842 /* Use GPU for local, select a CPU kernel for non-local */
1843 pick_nbnxn_kernel(fp, cr, fr->hwinfo, fr->use_cpu_acceleration,
1846 &nbv->grp[i].kernel_type,
1847 &nbv->grp[i].ewald_excl,
1850 bHybridGPURun = TRUE;
1854 /* Use the same kernel for local and non-local interactions */
1855 nbv->grp[i].kernel_type = nbv->grp[0].kernel_type;
1856 nbv->grp[i].ewald_excl = nbv->grp[0].ewald_excl;
1863 /* init the NxN GPU data; the last argument tells whether we'll have
1864 * both local and non-local NB calculation on GPU */
1865 nbnxn_cuda_init(fp, &nbv->cu_nbv,
1866 &fr->hwinfo->gpu_info, cr->rank_pp_intranode,
1867 (nbv->ngrp > 1) && !bHybridGPURun);
1869 if ((env = getenv("GMX_NB_MIN_CI")) != NULL)
1873 nbv->min_ci_balanced = strtol(env, &end, 10);
1874 if (!end || (*end != 0) || nbv->min_ci_balanced <= 0)
1876 gmx_fatal(FARGS, "Invalid value passed in GMX_NB_MIN_CI=%s, positive integer required", env);
1881 fprintf(debug, "Neighbor-list balancing parameter: %d (passed as env. var.)\n",
1882 nbv->min_ci_balanced);
1887 nbv->min_ci_balanced = nbnxn_cuda_min_ci_balanced(nbv->cu_nbv);
1890 fprintf(debug, "Neighbor-list balancing parameter: %d (auto-adjusted to the number of GPU multi-processors)\n",
1891 nbv->min_ci_balanced);
1897 nbv->min_ci_balanced = 0;
1902 nbnxn_init_search(&nbv->nbs,
1903 DOMAINDECOMP(cr) ? & cr->dd->nc : NULL,
1904 DOMAINDECOMP(cr) ? domdec_zones(cr->dd) : NULL,
1905 gmx_omp_nthreads_get(emntNonbonded));
1907 for(i=0; i<nbv->ngrp; i++)
1909 if (nbv->grp[0].kernel_type == nbnxnk8x8x8_CUDA)
1911 nb_alloc = &pmalloc;
1920 nbnxn_init_pairlist_set(&nbv->grp[i].nbl_lists,
1921 nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type),
1922 /* 8x8x8 "non-simple" lists are ATM always combined */
1923 !nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type),
1927 nbv->grp[0].kernel_type != nbv->grp[i].kernel_type)
1929 snew(nbv->grp[i].nbat,1);
1930 nbnxn_atomdata_init(fp,
1932 nbv->grp[i].kernel_type,
1935 nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type) ? gmx_omp_nthreads_get(emntNonbonded) : 1,
1940 nbv->grp[i].nbat = nbv->grp[0].nbat;
1945 void init_forcerec(FILE *fp,
1946 const output_env_t oenv,
1949 const t_inputrec *ir,
1950 const gmx_mtop_t *mtop,
1951 const t_commrec *cr,
1958 const char *nbpu_opt,
1959 gmx_bool bNoSolvOpt,
1962 int i,j,m,natoms,ngrp,negp_pp,negptable,egi,egj;
1968 gmx_bool bGenericKernelOnly;
1969 gmx_bool bTab,bSep14tab,bNormalnblists;
1971 int *nm_ind,egp_flags;
1973 /* By default we turn acceleration on, but it might be turned off further down... */
1974 fr->use_cpu_acceleration = TRUE;
1976 fr->bDomDec = DOMAINDECOMP(cr);
1978 natoms = mtop->natoms;
1980 if (check_box(ir->ePBC,box))
1982 gmx_fatal(FARGS,check_box(ir->ePBC,box));
1985 /* Test particle insertion ? */
1986 if (EI_TPI(ir->eI)) {
1987 /* Set to the size of the molecule to be inserted (the last one) */
1988 /* Because of old style topologies, we have to use the last cg
1989 * instead of the last molecule type.
1991 cgs = &mtop->moltype[mtop->molblock[mtop->nmolblock-1].type].cgs;
1992 fr->n_tpi = cgs->index[cgs->nr] - cgs->index[cgs->nr-1];
1993 if (fr->n_tpi != mtop->mols.index[mtop->mols.nr] - mtop->mols.index[mtop->mols.nr-1]) {
1994 gmx_fatal(FARGS,"The molecule to insert can not consist of multiple charge groups.\nMake it a single charge group.");
2000 /* Copy AdResS parameters */
2002 fr->adress_type = ir->adress->type;
2003 fr->adress_const_wf = ir->adress->const_wf;
2004 fr->adress_ex_width = ir->adress->ex_width;
2005 fr->adress_hy_width = ir->adress->hy_width;
2006 fr->adress_icor = ir->adress->icor;
2007 fr->adress_site = ir->adress->site;
2008 fr->adress_ex_forcecap = ir->adress->ex_forcecap;
2009 fr->adress_do_hybridpairs = ir->adress->do_hybridpairs;
2012 snew(fr->adress_group_explicit , ir->adress->n_energy_grps);
2013 for (i=0; i< ir->adress->n_energy_grps; i++){
2014 fr->adress_group_explicit[i]= ir->adress->group_explicit[i];
2017 fr->n_adress_tf_grps = ir->adress->n_tf_grps;
2018 snew(fr->adress_tf_table_index, fr->n_adress_tf_grps);
2019 for (i=0; i< fr->n_adress_tf_grps; i++){
2020 fr->adress_tf_table_index[i]= ir->adress->tf_table_index[i];
2022 copy_rvec(ir->adress->refs,fr->adress_refs);
2024 fr->adress_type = eAdressOff;
2025 fr->adress_do_hybridpairs = FALSE;
2028 /* Copy the user determined parameters */
2029 fr->userint1 = ir->userint1;
2030 fr->userint2 = ir->userint2;
2031 fr->userint3 = ir->userint3;
2032 fr->userint4 = ir->userint4;
2033 fr->userreal1 = ir->userreal1;
2034 fr->userreal2 = ir->userreal2;
2035 fr->userreal3 = ir->userreal3;
2036 fr->userreal4 = ir->userreal4;
2039 fr->fc_stepsize = ir->fc_stepsize;
2042 fr->efep = ir->efep;
2043 fr->sc_alphavdw = ir->fepvals->sc_alpha;
2044 if (ir->fepvals->bScCoul)
2046 fr->sc_alphacoul = ir->fepvals->sc_alpha;
2047 fr->sc_sigma6_min = pow(ir->fepvals->sc_sigma_min,6);
2051 fr->sc_alphacoul = 0;
2052 fr->sc_sigma6_min = 0; /* only needed when bScCoul is on */
2054 fr->sc_power = ir->fepvals->sc_power;
2055 fr->sc_r_power = ir->fepvals->sc_r_power;
2056 fr->sc_sigma6_def = pow(ir->fepvals->sc_sigma,6);
2058 env = getenv("GMX_SCSIGMA_MIN");
2062 sscanf(env,"%lf",&dbl);
2063 fr->sc_sigma6_min = pow(dbl,6);
2066 fprintf(fp,"Setting the minimum soft core sigma to %g nm\n",dbl);
2070 fr->bNonbonded = TRUE;
2071 if (getenv("GMX_NO_NONBONDED") != NULL)
2073 /* turn off non-bonded calculations */
2074 fr->bNonbonded = FALSE;
2075 md_print_warn(cr,fp,
2076 "Found environment variable GMX_NO_NONBONDED.\n"
2077 "Disabling nonbonded calculations.\n");
2080 bGenericKernelOnly = FALSE;
2082 /* We now check in the NS code whether a particular combination of interactions
2083 * can be used with water optimization, and disable it if that is not the case.
2086 if (getenv("GMX_NB_GENERIC") != NULL)
2091 "Found environment variable GMX_NB_GENERIC.\n"
2092 "Disabling all interaction-specific nonbonded kernels, will only\n"
2093 "use the slow generic ones in src/gmxlib/nonbonded/nb_generic.c\n\n");
2095 bGenericKernelOnly = TRUE;
2098 if (bGenericKernelOnly==TRUE)
2103 if( (getenv("GMX_DISABLE_CPU_ACCELERATION") != NULL) || (getenv("GMX_NOOPTIMIZEDKERNELS") != NULL) )
2105 fr->use_cpu_acceleration = FALSE;
2109 "\nFound environment variable GMX_DISABLE_CPU_ACCELERATION.\n"
2110 "Disabling all CPU architecture-specific (e.g. SSE2/SSE4/AVX) routines.\n\n");
2114 fr->bBHAM = (mtop->ffparams.functype[0] == F_BHAM);
2116 /* Check if we can/should do all-vs-all kernels */
2117 fr->bAllvsAll = can_use_allvsall(ir,mtop,FALSE,NULL,NULL);
2118 fr->AllvsAll_work = NULL;
2119 fr->AllvsAll_workgb = NULL;
2122 /* Neighbour searching stuff */
2123 fr->cutoff_scheme = ir->cutoff_scheme;
2124 fr->bGrid = (ir->ns_type == ensGRID);
2125 fr->ePBC = ir->ePBC;
2127 /* Determine if we will do PBC for distances in bonded interactions */
2128 if (fr->ePBC == epbcNONE)
2130 fr->bMolPBC = FALSE;
2134 if (!DOMAINDECOMP(cr))
2136 /* The group cut-off scheme and SHAKE assume charge groups
2137 * are whole, but not using molpbc is faster in most cases.
2139 if (fr->cutoff_scheme == ecutsGROUP ||
2140 (ir->eConstrAlg == econtSHAKE &&
2141 (gmx_mtop_ftype_count(mtop,F_CONSTR) > 0 ||
2142 gmx_mtop_ftype_count(mtop,F_CONSTRNC) > 0)))
2144 fr->bMolPBC = ir->bPeriodicMols;
2149 if (getenv("GMX_USE_GRAPH") != NULL)
2151 fr->bMolPBC = FALSE;
2154 fprintf(fp,"\nGMX_MOLPBC is set, using the graph for bonded interactions\n\n");
2161 fr->bMolPBC = dd_bonded_molpbc(cr->dd,fr->ePBC);
2164 fr->bGB = (ir->implicit_solvent == eisGBSA);
2166 fr->rc_scaling = ir->refcoord_scaling;
2167 copy_rvec(ir->posres_com,fr->posres_com);
2168 copy_rvec(ir->posres_comB,fr->posres_comB);
2169 fr->rlist = cutoff_inf(ir->rlist);
2170 fr->rlistlong = cutoff_inf(ir->rlistlong);
2171 fr->eeltype = ir->coulombtype;
2172 fr->vdwtype = ir->vdwtype;
2174 fr->coulomb_modifier = ir->coulomb_modifier;
2175 fr->vdw_modifier = ir->vdw_modifier;
2177 /* Electrostatics: Translate from interaction-setting-in-mdp-file to kernel interaction format */
2181 fr->nbkernel_elec_interaction = (fr->bGB) ? GMX_NBKERNEL_ELEC_GENERALIZEDBORN : GMX_NBKERNEL_ELEC_COULOMB;
2187 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_REACTIONFIELD;
2191 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_REACTIONFIELD;
2192 fr->coulomb_modifier = eintmodEXACTCUTOFF;
2201 case eelPMEUSERSWITCH:
2202 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_CUBICSPLINETABLE;
2207 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_EWALD;
2211 gmx_fatal(FARGS,"Unsupported electrostatic interaction: %s",eel_names[fr->eeltype]);
2215 /* Vdw: Translate from mdp settings to kernel format */
2221 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_BUCKINGHAM;
2225 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_LENNARDJONES;
2232 case evdwENCADSHIFT:
2233 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_CUBICSPLINETABLE;
2237 gmx_fatal(FARGS,"Unsupported vdw interaction: %s",evdw_names[fr->vdwtype]);
2241 /* These start out identical to ir, but might be altered if we e.g. tabulate the interaction in the kernel */
2242 fr->nbkernel_elec_modifier = fr->coulomb_modifier;
2243 fr->nbkernel_vdw_modifier = fr->vdw_modifier;
2245 fr->bTwinRange = fr->rlistlong > fr->rlist;
2246 fr->bEwald = (EEL_PME(fr->eeltype) || fr->eeltype==eelEWALD);
2248 fr->reppow = mtop->ffparams.reppow;
2250 if (ir->cutoff_scheme == ecutsGROUP)
2252 fr->bvdwtab = (fr->vdwtype != evdwCUT ||
2253 !gmx_within_tol(fr->reppow,12.0,10*GMX_DOUBLE_EPS));
2254 /* We have special kernels for standard Ewald and PME, but the pme-switch ones are tabulated above */
2255 fr->bcoultab = !(fr->eeltype == eelCUT ||
2256 fr->eeltype == eelEWALD ||
2257 fr->eeltype == eelPME ||
2258 fr->eeltype == eelRF ||
2259 fr->eeltype == eelRF_ZERO);
2261 /* If the user absolutely wants different switch/shift settings for coul/vdw, it is likely
2262 * going to be faster to tabulate the interaction than calling the generic kernel.
2264 if(fr->nbkernel_elec_modifier==eintmodPOTSWITCH && fr->nbkernel_vdw_modifier==eintmodPOTSWITCH)
2266 if((fr->rcoulomb_switch != fr->rvdw_switch) || (fr->rcoulomb != fr->rvdw))
2268 fr->bcoultab = TRUE;
2271 else if((fr->nbkernel_elec_modifier==eintmodPOTSHIFT && fr->nbkernel_vdw_modifier==eintmodPOTSHIFT) ||
2272 ((fr->nbkernel_elec_interaction == GMX_NBKERNEL_ELEC_REACTIONFIELD &&
2273 fr->nbkernel_elec_modifier==eintmodEXACTCUTOFF &&
2274 (fr->nbkernel_vdw_modifier==eintmodPOTSWITCH || fr->nbkernel_vdw_modifier==eintmodPOTSHIFT))))
2276 if(fr->rcoulomb != fr->rvdw)
2278 fr->bcoultab = TRUE;
2282 if (getenv("GMX_REQUIRE_TABLES"))
2285 fr->bcoultab = TRUE;
2290 fprintf(fp,"Table routines are used for coulomb: %s\n",bool_names[fr->bcoultab]);
2291 fprintf(fp,"Table routines are used for vdw: %s\n",bool_names[fr->bvdwtab ]);
2294 if(fr->bvdwtab==TRUE)
2296 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_CUBICSPLINETABLE;
2297 fr->nbkernel_vdw_modifier = eintmodNONE;
2299 if(fr->bcoultab==TRUE)
2301 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_CUBICSPLINETABLE;
2302 fr->nbkernel_elec_modifier = eintmodNONE;
2306 if (ir->cutoff_scheme == ecutsVERLET)
2308 if (!gmx_within_tol(fr->reppow,12.0,10*GMX_DOUBLE_EPS))
2310 gmx_fatal(FARGS,"Cut-off scheme %S only supports LJ repulsion power 12",ecutscheme_names[ir->cutoff_scheme]);
2312 fr->bvdwtab = FALSE;
2313 fr->bcoultab = FALSE;
2316 /* Tables are used for direct ewald sum */
2319 if (EEL_PME(ir->coulombtype))
2322 fprintf(fp,"Will do PME sum in reciprocal space.\n");
2323 if (ir->coulombtype == eelP3M_AD)
2325 please_cite(fp,"Hockney1988");
2326 please_cite(fp,"Ballenegger2012");
2330 please_cite(fp,"Essmann95a");
2333 if (ir->ewald_geometry == eewg3DC)
2337 fprintf(fp,"Using the Ewald3DC correction for systems with a slab geometry.\n");
2339 please_cite(fp,"In-Chul99a");
2342 fr->ewaldcoeff=calc_ewaldcoeff(ir->rcoulomb, ir->ewald_rtol);
2343 init_ewald_tab(&(fr->ewald_table), cr, ir, fp);
2346 fprintf(fp,"Using a Gaussian width (1/beta) of %g nm for Ewald\n",
2351 /* Electrostatics */
2352 fr->epsilon_r = ir->epsilon_r;
2353 fr->epsilon_rf = ir->epsilon_rf;
2354 fr->fudgeQQ = mtop->ffparams.fudgeQQ;
2355 fr->rcoulomb_switch = ir->rcoulomb_switch;
2356 fr->rcoulomb = cutoff_inf(ir->rcoulomb);
2358 /* Parameters for generalized RF */
2362 if (fr->eeltype == eelGRF)
2364 init_generalized_rf(fp,mtop,ir,fr);
2366 else if (fr->eeltype == eelSHIFT)
2368 for(m=0; (m<DIM); m++)
2369 box_size[m]=box[m][m];
2371 if ((fr->eeltype == eelSHIFT && fr->rcoulomb > fr->rcoulomb_switch))
2372 set_shift_consts(fp,fr->rcoulomb_switch,fr->rcoulomb,box_size,fr);
2375 fr->bF_NoVirSum = (EEL_FULL(fr->eeltype) ||
2376 gmx_mtop_ftype_count(mtop,F_POSRES) > 0 ||
2377 IR_ELEC_FIELD(*ir) ||
2378 (fr->adress_icor != eAdressICOff)
2381 if (fr->cutoff_scheme == ecutsGROUP &&
2382 ncg_mtop(mtop) > fr->cg_nalloc && !DOMAINDECOMP(cr)) {
2383 /* Count the total number of charge groups */
2384 fr->cg_nalloc = ncg_mtop(mtop);
2385 srenew(fr->cg_cm,fr->cg_nalloc);
2387 if (fr->shift_vec == NULL)
2388 snew(fr->shift_vec,SHIFTS);
2390 if (fr->fshift == NULL)
2391 snew(fr->fshift,SHIFTS);
2393 if (fr->nbfp == NULL) {
2394 fr->ntype = mtop->ffparams.atnr;
2395 fr->nbfp = mk_nbfp(&mtop->ffparams,fr->bBHAM);
2398 /* Copy the energy group exclusions */
2399 fr->egp_flags = ir->opts.egp_flags;
2401 /* Van der Waals stuff */
2402 fr->rvdw = cutoff_inf(ir->rvdw);
2403 fr->rvdw_switch = ir->rvdw_switch;
2404 if ((fr->vdwtype != evdwCUT) && (fr->vdwtype != evdwUSER) && !fr->bBHAM) {
2405 if (fr->rvdw_switch >= fr->rvdw)
2406 gmx_fatal(FARGS,"rvdw_switch (%f) must be < rvdw (%f)",
2407 fr->rvdw_switch,fr->rvdw);
2409 fprintf(fp,"Using %s Lennard-Jones, switch between %g and %g nm\n",
2410 (fr->eeltype==eelSWITCH) ? "switched":"shifted",
2411 fr->rvdw_switch,fr->rvdw);
2414 if (fr->bBHAM && (fr->vdwtype == evdwSHIFT || fr->vdwtype == evdwSWITCH))
2415 gmx_fatal(FARGS,"Switch/shift interaction not supported with Buckingham");
2418 fprintf(fp,"Cut-off's: NS: %g Coulomb: %g %s: %g\n",
2419 fr->rlist,fr->rcoulomb,fr->bBHAM ? "BHAM":"LJ",fr->rvdw);
2421 fr->eDispCorr = ir->eDispCorr;
2422 if (ir->eDispCorr != edispcNO)
2424 set_avcsixtwelve(fp,fr,mtop);
2429 set_bham_b_max(fp,fr,mtop);
2432 fr->gb_epsilon_solvent = ir->gb_epsilon_solvent;
2434 /* Copy the GBSA data (radius, volume and surftens for each
2435 * atomtype) from the topology atomtype section to forcerec.
2437 snew(fr->atype_radius,fr->ntype);
2438 snew(fr->atype_vol,fr->ntype);
2439 snew(fr->atype_surftens,fr->ntype);
2440 snew(fr->atype_gb_radius,fr->ntype);
2441 snew(fr->atype_S_hct,fr->ntype);
2443 if (mtop->atomtypes.nr > 0)
2445 for(i=0;i<fr->ntype;i++)
2446 fr->atype_radius[i] =mtop->atomtypes.radius[i];
2447 for(i=0;i<fr->ntype;i++)
2448 fr->atype_vol[i] = mtop->atomtypes.vol[i];
2449 for(i=0;i<fr->ntype;i++)
2450 fr->atype_surftens[i] = mtop->atomtypes.surftens[i];
2451 for(i=0;i<fr->ntype;i++)
2452 fr->atype_gb_radius[i] = mtop->atomtypes.gb_radius[i];
2453 for(i=0;i<fr->ntype;i++)
2454 fr->atype_S_hct[i] = mtop->atomtypes.S_hct[i];
2457 /* Generate the GB table if needed */
2461 fr->gbtabscale=2000;
2467 fr->gbtab=make_gb_table(fp,oenv,fr,tabpfn,fr->gbtabscale);
2469 init_gb(&fr->born,cr,fr,ir,mtop,ir->rgbradii,ir->gb_algorithm);
2471 /* Copy local gb data (for dd, this is done in dd_partition_system) */
2472 if (!DOMAINDECOMP(cr))
2474 make_local_gb(cr,fr->born,ir->gb_algorithm);
2478 /* Set the charge scaling */
2479 if (fr->epsilon_r != 0)
2480 fr->epsfac = ONE_4PI_EPS0/fr->epsilon_r;
2482 /* eps = 0 is infinite dieletric: no coulomb interactions */
2485 /* Reaction field constants */
2486 if (EEL_RF(fr->eeltype))
2487 calc_rffac(fp,fr->eeltype,fr->epsilon_r,fr->epsilon_rf,
2488 fr->rcoulomb,fr->temp,fr->zsquare,box,
2489 &fr->kappa,&fr->k_rf,&fr->c_rf);
2491 set_chargesum(fp,fr,mtop);
2493 /* if we are using LR electrostatics, and they are tabulated,
2494 * the tables will contain modified coulomb interactions.
2495 * Since we want to use the non-shifted ones for 1-4
2496 * coulombic interactions, we must have an extra set of tables.
2499 /* Construct tables.
2500 * A little unnecessary to make both vdw and coul tables sometimes,
2501 * but what the heck... */
2503 bTab = fr->bcoultab || fr->bvdwtab || fr->bEwald;
2505 bSep14tab = ((!bTab || fr->eeltype!=eelCUT || fr->vdwtype!=evdwCUT ||
2506 fr->bBHAM || fr->bEwald) &&
2507 (gmx_mtop_ftype_count(mtop,F_LJ14) > 0 ||
2508 gmx_mtop_ftype_count(mtop,F_LJC14_Q) > 0 ||
2509 gmx_mtop_ftype_count(mtop,F_LJC_PAIRS_NB) > 0));
2511 negp_pp = ir->opts.ngener - ir->nwall;
2514 bNormalnblists = TRUE;
2517 bNormalnblists = (ir->eDispCorr != edispcNO);
2518 for(egi=0; egi<negp_pp; egi++) {
2519 for(egj=egi; egj<negp_pp; egj++) {
2520 egp_flags = ir->opts.egp_flags[GID(egi,egj,ir->opts.ngener)];
2521 if (!(egp_flags & EGP_EXCL)) {
2522 if (egp_flags & EGP_TABLE) {
2525 bNormalnblists = TRUE;
2530 if (bNormalnblists) {
2531 fr->nnblists = negptable + 1;
2533 fr->nnblists = negptable;
2535 if (fr->nnblists > 1)
2536 snew(fr->gid2nblists,ir->opts.ngener*ir->opts.ngener);
2543 snew(fr->nblists,fr->nnblists);
2545 /* This code automatically gives table length tabext without cut-off's,
2546 * in that case grompp should already have checked that we do not need
2547 * normal tables and we only generate tables for 1-4 interactions.
2549 rtab = ir->rlistlong + ir->tabext;
2552 /* make tables for ordinary interactions */
2553 if (bNormalnblists) {
2554 make_nbf_tables(fp,oenv,fr,rtab,cr,tabfn,NULL,NULL,&fr->nblists[0]);
2556 make_nbf_tables(fp,oenv,fr,rtab,cr,tabfn,NULL,NULL,&fr->nblists[fr->nnblists/2]);
2559 fr->tab14 = fr->nblists[0].table_elec_vdw;
2564 if (negptable > 0) {
2565 /* Read the special tables for certain energy group pairs */
2566 nm_ind = mtop->groups.grps[egcENER].nm_ind;
2567 for(egi=0; egi<negp_pp; egi++) {
2568 for(egj=egi; egj<negp_pp; egj++) {
2569 egp_flags = ir->opts.egp_flags[GID(egi,egj,ir->opts.ngener)];
2570 if ((egp_flags & EGP_TABLE) && !(egp_flags & EGP_EXCL)) {
2571 nbl = &(fr->nblists[m]);
2572 if (fr->nnblists > 1) {
2573 fr->gid2nblists[GID(egi,egj,ir->opts.ngener)] = m;
2575 /* Read the table file with the two energy groups names appended */
2576 make_nbf_tables(fp,oenv,fr,rtab,cr,tabfn,
2577 *mtop->groups.grpname[nm_ind[egi]],
2578 *mtop->groups.grpname[nm_ind[egj]],
2581 make_nbf_tables(fp,oenv,fr,rtab,cr,tabfn,
2582 *mtop->groups.grpname[nm_ind[egi]],
2583 *mtop->groups.grpname[nm_ind[egj]],
2584 &fr->nblists[fr->nnblists/2+m]);
2587 } else if (fr->nnblists > 1) {
2588 fr->gid2nblists[GID(egi,egj,ir->opts.ngener)] = 0;
2596 /* generate extra tables with plain Coulomb for 1-4 interactions only */
2597 fr->tab14 = make_tables(fp,oenv,fr,MASTER(cr),tabpfn,rtab,
2598 GMX_MAKETABLES_14ONLY);
2601 /* Read AdResS Thermo Force table if needed */
2602 if(fr->adress_icor == eAdressICThermoForce)
2604 /* old todo replace */
2606 if (ir->adress->n_tf_grps > 0){
2607 make_adress_tf_tables(fp,oenv,fr,ir,tabfn, mtop, box);
2610 /* load the default table */
2611 snew(fr->atf_tabs, 1);
2612 fr->atf_tabs[DEFAULT_TF_TABLE] = make_atf_table(fp,oenv,fr,tabafn, box);
2617 fr->nwall = ir->nwall;
2618 if (ir->nwall && ir->wall_type==ewtTABLE)
2620 make_wall_tables(fp,oenv,ir,tabfn,&mtop->groups,fr);
2623 if (fcd && tabbfn) {
2624 fcd->bondtab = make_bonded_tables(fp,
2625 F_TABBONDS,F_TABBONDSNC,
2627 fcd->angletab = make_bonded_tables(fp,
2630 fcd->dihtab = make_bonded_tables(fp,
2635 fprintf(debug,"No fcdata or table file name passed, can not read table, can not do bonded interactions\n");
2638 /* QM/MM initialization if requested
2642 fprintf(stderr,"QM/MM calculation requested.\n");
2645 fr->bQMMM = ir->bQMMM;
2646 fr->qr = mk_QMMMrec();
2648 /* Set all the static charge group info */
2649 fr->cginfo_mb = init_cginfo_mb(fp,mtop,fr,bNoSolvOpt,
2650 &fr->bExcl_IntraCGAll_InterCGNone);
2651 if (DOMAINDECOMP(cr)) {
2654 fr->cginfo = cginfo_expand(mtop->nmolblock,fr->cginfo_mb);
2657 if (!DOMAINDECOMP(cr))
2659 /* When using particle decomposition, the effect of the second argument,
2660 * which sets fr->hcg, is corrected later in do_md and init_em.
2662 forcerec_set_ranges(fr,ncg_mtop(mtop),ncg_mtop(mtop),
2663 mtop->natoms,mtop->natoms,mtop->natoms);
2666 fr->print_force = print_force;
2669 /* coarse load balancing vars */
2674 /* Initialize neighbor search */
2675 init_ns(fp,cr,&fr->ns,fr,mtop,box);
2677 if (cr->duty & DUTY_PP)
2679 gmx_nonbonded_setup(fp,fr,bGenericKernelOnly);
2683 gmx_setup_adress_kernels(fp,bGenericKernelOnly);
2688 /* Initialize the thread working data for bonded interactions */
2689 init_forcerec_f_threads(fr,mtop->groups.grps[egcENER].nr);
2691 snew(fr->excl_load,fr->nthreads+1);
2693 if (fr->cutoff_scheme == ecutsVERLET)
2695 if (ir->rcoulomb != ir->rvdw)
2697 gmx_fatal(FARGS,"With Verlet lists rcoulomb and rvdw should be identical");
2700 init_nb_verlet(fp, &fr->nbv, ir, fr, cr, nbpu_opt);
2703 /* fr->ic is used both by verlet and group kernels (to some extent) now */
2704 init_interaction_const(fp, &fr->ic, fr, rtab);
2705 if (ir->eDispCorr != edispcNO)
2707 calc_enervirdiff(fp,ir->eDispCorr,fr);
2711 #define pr_real(fp,r) fprintf(fp,"%s: %e\n",#r,r)
2712 #define pr_int(fp,i) fprintf((fp),"%s: %d\n",#i,i)
2713 #define pr_bool(fp,b) fprintf((fp),"%s: %s\n",#b,bool_names[b])
2715 void pr_forcerec(FILE *fp,t_forcerec *fr,t_commrec *cr)
2719 pr_real(fp,fr->rlist);
2720 pr_real(fp,fr->rcoulomb);
2721 pr_real(fp,fr->fudgeQQ);
2722 pr_bool(fp,fr->bGrid);
2723 pr_bool(fp,fr->bTwinRange);
2724 /*pr_int(fp,fr->cg0);
2725 pr_int(fp,fr->hcg);*/
2726 for(i=0; i<fr->nnblists; i++)
2727 pr_int(fp,fr->nblists[i].table_elec_vdw.n);
2728 pr_real(fp,fr->rcoulomb_switch);
2729 pr_real(fp,fr->rcoulomb);
2734 void forcerec_set_excl_load(t_forcerec *fr,
2735 const gmx_localtop_t *top,const t_commrec *cr)
2738 int t,i,j,ntot,n,ntarget;
2740 if (cr != NULL && PARTDECOMP(cr))
2742 /* No OpenMP with particle decomposition */
2750 ind = top->excls.index;
2754 for(i=0; i<top->excls.nr; i++)
2756 for(j=ind[i]; j<ind[i+1]; j++)
2765 fr->excl_load[0] = 0;
2768 for(t=1; t<=fr->nthreads; t++)
2770 ntarget = (ntot*t)/fr->nthreads;
2771 while(i < top->excls.nr && n < ntarget)
2773 for(j=ind[i]; j<ind[i+1]; j++)
2782 fr->excl_load[t] = i;