1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
4 * This source code is part of
8 * GROningen MAchine for Chemical Simulations
11 * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
12 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
13 * Copyright (c) 2001-2004, The GROMACS development team,
14 * check out http://www.gromacs.org for more information.
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License
18 * as published by the Free Software Foundation; either version 2
19 * of the License, or (at your option) any later version.
21 * If you want to redistribute modifications, please consider that
22 * scientific software is very special. Version control is crucial -
23 * bugs must be traceable. We will be happy to consider code for
24 * inclusion in the official distribution, but derived work must not
25 * be called official GROMACS. Details are found in the README & COPYING
26 * files - if they are missing, get the official version at www.gromacs.org.
28 * To help us fund GROMACS development, we humbly ask that you cite
29 * the papers on the package - you can find them in the top README file.
31 * For more info, check our website at http://www.gromacs.org
34 * GROwing Monsters And Cloning Shrimps
50 #include "gmx_fatal.h"
51 #include "gmx_fatal_collective.h"
55 #include "nonbonded.h"
64 #include "md_support.h"
69 #include "mtop_util.h"
70 #include "nbnxn_search.h"
71 #include "nbnxn_atomdata.h"
72 #include "nbnxn_consts.h"
74 #include "gmx_omp_nthreads.h"
77 /* MSVC definition for __cpuid() */
81 #include "types/nbnxn_cuda_types_ext.h"
82 #include "gpu_utils.h"
83 #include "nbnxn_cuda_data_mgmt.h"
84 #include "pmalloc_cuda.h"
86 t_forcerec *mk_forcerec(void)
96 static void pr_nbfp(FILE *fp,real *nbfp,gmx_bool bBHAM,int atnr)
100 for(i=0; (i<atnr); i++) {
101 for(j=0; (j<atnr); j++) {
102 fprintf(fp,"%2d - %2d",i,j);
104 fprintf(fp," a=%10g, b=%10g, c=%10g\n",BHAMA(nbfp,atnr,i,j),
105 BHAMB(nbfp,atnr,i,j),BHAMC(nbfp,atnr,i,j));
107 fprintf(fp," c6=%10g, c12=%10g\n",C6(nbfp,atnr,i,j),
114 static real *mk_nbfp(const gmx_ffparams_t *idef,gmx_bool bBHAM)
121 snew(nbfp,3*atnr*atnr);
122 for(i=k=0; (i<atnr); i++) {
123 for(j=0; (j<atnr); j++,k++) {
124 BHAMA(nbfp,atnr,i,j) = idef->iparams[k].bham.a;
125 BHAMB(nbfp,atnr,i,j) = idef->iparams[k].bham.b;
126 BHAMC(nbfp,atnr,i,j) = idef->iparams[k].bham.c;
131 snew(nbfp,2*atnr*atnr);
132 for(i=k=0; (i<atnr); i++) {
133 for(j=0; (j<atnr); j++,k++) {
134 C6(nbfp,atnr,i,j) = idef->iparams[k].lj.c6;
135 C12(nbfp,atnr,i,j) = idef->iparams[k].lj.c12;
142 /* This routine sets fr->solvent_opt to the most common solvent in the
143 * system, e.g. esolSPC or esolTIP4P. It will also mark each charge group in
144 * the fr->solvent_type array with the correct type (or esolNO).
146 * Charge groups that fulfill the conditions but are not identical to the
147 * most common one will be marked as esolNO in the solvent_type array.
149 * TIP3p is identical to SPC for these purposes, so we call it
150 * SPC in the arrays (Apologies to Bill Jorgensen ;-)
152 * NOTE: QM particle should not
153 * become an optimized solvent. Not even if there is only one charge
163 } solvent_parameters_t;
166 check_solvent_cg(const gmx_moltype_t *molt,
169 const unsigned char *qm_grpnr,
170 const t_grps *qm_grps,
172 int *n_solvent_parameters,
173 solvent_parameters_t **solvent_parameters_p,
177 const t_blocka * excl;
188 solvent_parameters_t *solvent_parameters;
190 /* We use a list with parameters for each solvent type.
191 * Every time we discover a new molecule that fulfills the basic
192 * conditions for a solvent we compare with the previous entries
193 * in these lists. If the parameters are the same we just increment
194 * the counter for that type, and otherwise we create a new type
195 * based on the current molecule.
197 * Once we've finished going through all molecules we check which
198 * solvent is most common, and mark all those molecules while we
199 * clear the flag on all others.
202 solvent_parameters = *solvent_parameters_p;
204 /* Mark the cg first as non optimized */
207 /* Check if this cg has no exclusions with atoms in other charge groups
208 * and all atoms inside the charge group excluded.
209 * We only have 3 or 4 atom solvent loops.
211 if (GET_CGINFO_EXCL_INTER(cginfo) ||
212 !GET_CGINFO_EXCL_INTRA(cginfo))
217 /* Get the indices of the first atom in this charge group */
218 j0 = molt->cgs.index[cg0];
219 j1 = molt->cgs.index[cg0+1];
221 /* Number of atoms in our molecule */
226 "Moltype '%s': there are %d atoms in this charge group\n",
230 /* Check if it could be an SPC (3 atoms) or TIP4p (4) water,
238 /* Check if we are doing QM on this group */
240 if (qm_grpnr != NULL)
242 for(j=j0 ; j<j1 && !qm; j++)
244 qm = (qm_grpnr[j] < qm_grps->nr - 1);
247 /* Cannot use solvent optimization with QM */
253 atom = molt->atoms.atom;
255 /* Still looks like a solvent, time to check parameters */
257 /* If it is perturbed (free energy) we can't use the solvent loops,
258 * so then we just skip to the next molecule.
262 for(j=j0; j<j1 && !perturbed; j++)
264 perturbed = PERTURBED(atom[j]);
272 /* Now it's only a question if the VdW and charge parameters
273 * are OK. Before doing the check we compare and see if they are
274 * identical to a possible previous solvent type.
275 * First we assign the current types and charges.
279 tmp_vdwtype[j] = atom[j0+j].type;
280 tmp_charge[j] = atom[j0+j].q;
283 /* Does it match any previous solvent type? */
284 for(k=0 ; k<*n_solvent_parameters; k++)
289 /* We can only match SPC with 3 atoms and TIP4p with 4 atoms */
290 if( (solvent_parameters[k].model==esolSPC && nj!=3) ||
291 (solvent_parameters[k].model==esolTIP4P && nj!=4) )
294 /* Check that types & charges match for all atoms in molecule */
295 for(j=0 ; j<nj && match==TRUE; j++)
297 if (tmp_vdwtype[j] != solvent_parameters[k].vdwtype[j])
301 if(tmp_charge[j] != solvent_parameters[k].charge[j])
308 /* Congratulations! We have a matched solvent.
309 * Flag it with this type for later processing.
312 solvent_parameters[k].count += nmol;
314 /* We are done with this charge group */
319 /* If we get here, we have a tentative new solvent type.
320 * Before we add it we must check that it fulfills the requirements
321 * of the solvent optimized loops. First determine which atoms have
327 tjA = tmp_vdwtype[j];
329 /* Go through all other tpes and see if any have non-zero
330 * VdW parameters when combined with this one.
332 for(k=0; k<fr->ntype && (has_vdw[j]==FALSE); k++)
334 /* We already checked that the atoms weren't perturbed,
335 * so we only need to check state A now.
339 has_vdw[j] = (has_vdw[j] ||
340 (BHAMA(fr->nbfp,fr->ntype,tjA,k) != 0.0) ||
341 (BHAMB(fr->nbfp,fr->ntype,tjA,k) != 0.0) ||
342 (BHAMC(fr->nbfp,fr->ntype,tjA,k) != 0.0));
347 has_vdw[j] = (has_vdw[j] ||
348 (C6(fr->nbfp,fr->ntype,tjA,k) != 0.0) ||
349 (C12(fr->nbfp,fr->ntype,tjA,k) != 0.0));
354 /* Now we know all we need to make the final check and assignment. */
358 * For this we require thatn all atoms have charge,
359 * the charges on atom 2 & 3 should be the same, and only
360 * atom 1 should have VdW.
362 if (has_vdw[0] == TRUE &&
363 has_vdw[1] == FALSE &&
364 has_vdw[2] == FALSE &&
365 tmp_charge[0] != 0 &&
366 tmp_charge[1] != 0 &&
367 tmp_charge[2] == tmp_charge[1])
369 srenew(solvent_parameters,*n_solvent_parameters+1);
370 solvent_parameters[*n_solvent_parameters].model = esolSPC;
371 solvent_parameters[*n_solvent_parameters].count = nmol;
374 solvent_parameters[*n_solvent_parameters].vdwtype[k] = tmp_vdwtype[k];
375 solvent_parameters[*n_solvent_parameters].charge[k] = tmp_charge[k];
378 *cg_sp = *n_solvent_parameters;
379 (*n_solvent_parameters)++;
384 /* Or could it be a TIP4P?
385 * For this we require thatn atoms 2,3,4 have charge, but not atom 1.
386 * Only atom 1 should have VdW.
388 if(has_vdw[0] == TRUE &&
389 has_vdw[1] == FALSE &&
390 has_vdw[2] == FALSE &&
391 has_vdw[3] == FALSE &&
392 tmp_charge[0] == 0 &&
393 tmp_charge[1] != 0 &&
394 tmp_charge[2] == tmp_charge[1] &&
397 srenew(solvent_parameters,*n_solvent_parameters+1);
398 solvent_parameters[*n_solvent_parameters].model = esolTIP4P;
399 solvent_parameters[*n_solvent_parameters].count = nmol;
402 solvent_parameters[*n_solvent_parameters].vdwtype[k] = tmp_vdwtype[k];
403 solvent_parameters[*n_solvent_parameters].charge[k] = tmp_charge[k];
406 *cg_sp = *n_solvent_parameters;
407 (*n_solvent_parameters)++;
411 *solvent_parameters_p = solvent_parameters;
415 check_solvent(FILE * fp,
416 const gmx_mtop_t * mtop,
418 cginfo_mb_t *cginfo_mb)
421 const t_block * mols;
422 const gmx_moltype_t *molt;
423 int mb,mol,cg_mol,at_offset,cg_offset,am,cgm,i,nmol_ch,nmol;
424 int n_solvent_parameters;
425 solvent_parameters_t *solvent_parameters;
431 fprintf(debug,"Going to determine what solvent types we have.\n");
436 n_solvent_parameters = 0;
437 solvent_parameters = NULL;
438 /* Allocate temporary array for solvent type */
439 snew(cg_sp,mtop->nmolblock);
443 for(mb=0; mb<mtop->nmolblock; mb++)
445 molt = &mtop->moltype[mtop->molblock[mb].type];
447 /* Here we have to loop over all individual molecules
448 * because we need to check for QMMM particles.
450 snew(cg_sp[mb],cginfo_mb[mb].cg_mod);
451 nmol_ch = cginfo_mb[mb].cg_mod/cgs->nr;
452 nmol = mtop->molblock[mb].nmol/nmol_ch;
453 for(mol=0; mol<nmol_ch; mol++)
456 am = mol*cgs->index[cgs->nr];
457 for(cg_mol=0; cg_mol<cgs->nr; cg_mol++)
459 check_solvent_cg(molt,cg_mol,nmol,
460 mtop->groups.grpnr[egcQMMM] ?
461 mtop->groups.grpnr[egcQMMM]+at_offset+am : 0,
462 &mtop->groups.grps[egcQMMM],
464 &n_solvent_parameters,&solvent_parameters,
465 cginfo_mb[mb].cginfo[cgm+cg_mol],
466 &cg_sp[mb][cgm+cg_mol]);
469 cg_offset += cgs->nr;
470 at_offset += cgs->index[cgs->nr];
473 /* Puh! We finished going through all charge groups.
474 * Now find the most common solvent model.
477 /* Most common solvent this far */
479 for(i=0;i<n_solvent_parameters;i++)
482 solvent_parameters[i].count > solvent_parameters[bestsp].count)
490 bestsol = solvent_parameters[bestsp].model;
497 #ifdef DISABLE_WATER_NLIST
502 for(mb=0; mb<mtop->nmolblock; mb++)
504 cgs = &mtop->moltype[mtop->molblock[mb].type].cgs;
505 nmol = (mtop->molblock[mb].nmol*cgs->nr)/cginfo_mb[mb].cg_mod;
506 for(i=0; i<cginfo_mb[mb].cg_mod; i++)
508 if (cg_sp[mb][i] == bestsp)
510 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[i],bestsol);
515 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[i],esolNO);
522 if (bestsol != esolNO && fp!=NULL)
524 fprintf(fp,"\nEnabling %s-like water optimization for %d molecules.\n\n",
526 solvent_parameters[bestsp].count);
529 sfree(solvent_parameters);
530 fr->solvent_opt = bestsol;
533 enum { acNONE=0, acCONSTRAINT, acSETTLE };
535 static cginfo_mb_t *init_cginfo_mb(FILE *fplog,const gmx_mtop_t *mtop,
536 t_forcerec *fr,gmx_bool bNoSolvOpt,
537 gmx_bool *bExcl_IntraCGAll_InterCGNone)
540 const t_blocka *excl;
541 const gmx_moltype_t *molt;
542 const gmx_molblock_t *molb;
543 cginfo_mb_t *cginfo_mb;
546 int cg_offset,a_offset,cgm,am;
547 int mb,m,ncg_tot,cg,a0,a1,gid,ai,j,aj,excl_nalloc;
551 gmx_bool bId,*bExcl,bExclIntraAll,bExclInter,bHaveVDW,bHaveQ;
553 ncg_tot = ncg_mtop(mtop);
554 snew(cginfo_mb,mtop->nmolblock);
556 snew(type_VDW,fr->ntype);
557 for(ai=0; ai<fr->ntype; ai++)
559 type_VDW[ai] = FALSE;
560 for(j=0; j<fr->ntype; j++)
562 type_VDW[ai] = type_VDW[ai] ||
564 C6(fr->nbfp,fr->ntype,ai,j) != 0 ||
565 C12(fr->nbfp,fr->ntype,ai,j) != 0;
569 *bExcl_IntraCGAll_InterCGNone = TRUE;
572 snew(bExcl,excl_nalloc);
575 for(mb=0; mb<mtop->nmolblock; mb++)
577 molb = &mtop->molblock[mb];
578 molt = &mtop->moltype[molb->type];
582 /* Check if the cginfo is identical for all molecules in this block.
583 * If so, we only need an array of the size of one molecule.
584 * Otherwise we make an array of #mol times #cgs per molecule.
588 for(m=0; m<molb->nmol; m++)
590 am = m*cgs->index[cgs->nr];
591 for(cg=0; cg<cgs->nr; cg++)
594 a1 = cgs->index[cg+1];
595 if (ggrpnr(&mtop->groups,egcENER,a_offset+am+a0) !=
596 ggrpnr(&mtop->groups,egcENER,a_offset +a0))
600 if (mtop->groups.grpnr[egcQMMM] != NULL)
602 for(ai=a0; ai<a1; ai++)
604 if (mtop->groups.grpnr[egcQMMM][a_offset+am+ai] !=
605 mtop->groups.grpnr[egcQMMM][a_offset +ai])
614 cginfo_mb[mb].cg_start = cg_offset;
615 cginfo_mb[mb].cg_end = cg_offset + molb->nmol*cgs->nr;
616 cginfo_mb[mb].cg_mod = (bId ? 1 : molb->nmol)*cgs->nr;
617 snew(cginfo_mb[mb].cginfo,cginfo_mb[mb].cg_mod);
618 cginfo = cginfo_mb[mb].cginfo;
620 /* Set constraints flags for constrained atoms */
621 snew(a_con,molt->atoms.nr);
622 for(ftype=0; ftype<F_NRE; ftype++)
624 if (interaction_function[ftype].flags & IF_CONSTRAINT)
629 for(ia=0; ia<molt->ilist[ftype].nr; ia+=1+nral)
633 for(a=0; a<nral; a++)
635 a_con[molt->ilist[ftype].iatoms[ia+1+a]] =
636 (ftype == F_SETTLE ? acSETTLE : acCONSTRAINT);
642 for(m=0; m<(bId ? 1 : molb->nmol); m++)
645 am = m*cgs->index[cgs->nr];
646 for(cg=0; cg<cgs->nr; cg++)
649 a1 = cgs->index[cg+1];
651 /* Store the energy group in cginfo */
652 gid = ggrpnr(&mtop->groups,egcENER,a_offset+am+a0);
653 SET_CGINFO_GID(cginfo[cgm+cg],gid);
655 /* Check the intra/inter charge group exclusions */
656 if (a1-a0 > excl_nalloc) {
657 excl_nalloc = a1 - a0;
658 srenew(bExcl,excl_nalloc);
660 /* bExclIntraAll: all intra cg interactions excluded
661 * bExclInter: any inter cg interactions excluded
663 bExclIntraAll = TRUE;
667 for(ai=a0; ai<a1; ai++)
669 /* Check VDW and electrostatic interactions */
670 bHaveVDW = bHaveVDW || (type_VDW[molt->atoms.atom[ai].type] ||
671 type_VDW[molt->atoms.atom[ai].typeB]);
672 bHaveQ = bHaveQ || (molt->atoms.atom[ai].q != 0 ||
673 molt->atoms.atom[ai].qB != 0);
675 /* Clear the exclusion list for atom ai */
676 for(aj=a0; aj<a1; aj++)
678 bExcl[aj-a0] = FALSE;
680 /* Loop over all the exclusions of atom ai */
681 for(j=excl->index[ai]; j<excl->index[ai+1]; j++)
684 if (aj < a0 || aj >= a1)
693 /* Check if ai excludes a0 to a1 */
694 for(aj=a0; aj<a1; aj++)
698 bExclIntraAll = FALSE;
705 SET_CGINFO_CONSTR(cginfo[cgm+cg]);
708 SET_CGINFO_SETTLE(cginfo[cgm+cg]);
716 SET_CGINFO_EXCL_INTRA(cginfo[cgm+cg]);
720 SET_CGINFO_EXCL_INTER(cginfo[cgm+cg]);
722 if (a1 - a0 > MAX_CHARGEGROUP_SIZE)
724 /* The size in cginfo is currently only read with DD */
725 gmx_fatal(FARGS,"A charge group has size %d which is larger than the limit of %d atoms",a1-a0,MAX_CHARGEGROUP_SIZE);
729 SET_CGINFO_HAS_VDW(cginfo[cgm+cg]);
733 SET_CGINFO_HAS_Q(cginfo[cgm+cg]);
735 /* Store the charge group size */
736 SET_CGINFO_NATOMS(cginfo[cgm+cg],a1-a0);
738 if (!bExclIntraAll || bExclInter)
740 *bExcl_IntraCGAll_InterCGNone = FALSE;
747 cg_offset += molb->nmol*cgs->nr;
748 a_offset += molb->nmol*cgs->index[cgs->nr];
752 /* the solvent optimizer is called after the QM is initialized,
753 * because we don't want to have the QM subsystemto become an
757 check_solvent(fplog,mtop,fr,cginfo_mb);
759 if (getenv("GMX_NO_SOLV_OPT"))
763 fprintf(fplog,"Found environment variable GMX_NO_SOLV_OPT.\n"
764 "Disabling all solvent optimization\n");
766 fr->solvent_opt = esolNO;
770 fr->solvent_opt = esolNO;
772 if (!fr->solvent_opt)
774 for(mb=0; mb<mtop->nmolblock; mb++)
776 for(cg=0; cg<cginfo_mb[mb].cg_mod; cg++)
778 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[cg],esolNO);
786 static int *cginfo_expand(int nmb,cginfo_mb_t *cgi_mb)
791 ncg = cgi_mb[nmb-1].cg_end;
794 for(cg=0; cg<ncg; cg++)
796 while (cg >= cgi_mb[mb].cg_end)
801 cgi_mb[mb].cginfo[(cg - cgi_mb[mb].cg_start) % cgi_mb[mb].cg_mod];
807 static void set_chargesum(FILE *log,t_forcerec *fr,const gmx_mtop_t *mtop)
811 const t_atoms *atoms;
815 for(mb=0; mb<mtop->nmolblock; mb++)
817 nmol = mtop->molblock[mb].nmol;
818 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
819 for(i=0; i<atoms->nr; i++)
821 q = atoms->atom[i].q;
827 fr->q2sum[0] = q2sum;
828 if (fr->efep != efepNO)
832 for(mb=0; mb<mtop->nmolblock; mb++)
834 nmol = mtop->molblock[mb].nmol;
835 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
836 for(i=0; i<atoms->nr; i++)
838 q = atoms->atom[i].qB;
843 fr->q2sum[1] = q2sum;
848 fr->qsum[1] = fr->qsum[0];
849 fr->q2sum[1] = fr->q2sum[0];
852 if (fr->efep == efepNO)
853 fprintf(log,"System total charge: %.3f\n",fr->qsum[0]);
855 fprintf(log,"System total charge, top. A: %.3f top. B: %.3f\n",
856 fr->qsum[0],fr->qsum[1]);
860 void update_forcerec(FILE *log,t_forcerec *fr,matrix box)
862 if (fr->eeltype == eelGRF)
864 calc_rffac(NULL,fr->eeltype,fr->epsilon_r,fr->epsilon_rf,
865 fr->rcoulomb,fr->temp,fr->zsquare,box,
866 &fr->kappa,&fr->k_rf,&fr->c_rf);
870 void set_avcsixtwelve(FILE *fplog,t_forcerec *fr,const gmx_mtop_t *mtop)
872 const t_atoms *atoms,*atoms_tpi;
873 const t_blocka *excl;
874 int mb,nmol,nmolc,i,j,tpi,tpj,j1,j2,k,n,nexcl,q;
875 #if (defined SIZEOF_LONG_LONG_INT) && (SIZEOF_LONG_LONG_INT >= 8)
876 long long int npair,npair_ij,tmpi,tmpj;
878 double npair, npair_ij,tmpi,tmpj;
889 for(q=0; q<(fr->efep==efepNO ? 1 : 2); q++) {
895 /* Count the types so we avoid natoms^2 operations */
897 for(mb=0; mb<mtop->nmolblock; mb++) {
898 nmol = mtop->molblock[mb].nmol;
899 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
900 for(i=0; i<atoms->nr; i++) {
903 tpi = atoms->atom[i].type;
907 tpi = atoms->atom[i].typeB;
909 typecount[tpi] += nmol;
912 for(tpi=0; tpi<ntp; tpi++) {
913 for(tpj=tpi; tpj<ntp; tpj++) {
914 tmpi = typecount[tpi];
915 tmpj = typecount[tpj];
918 npair_ij = tmpi*tmpj;
922 npair_ij = tmpi*(tmpi - 1)/2;
925 csix += npair_ij*BHAMC(nbfp,ntp,tpi,tpj);
927 csix += npair_ij* C6(nbfp,ntp,tpi,tpj);
928 ctwelve += npair_ij* C12(nbfp,ntp,tpi,tpj);
934 /* Subtract the excluded pairs.
935 * The main reason for substracting exclusions is that in some cases
936 * some combinations might never occur and the parameters could have
937 * any value. These unused values should not influence the dispersion
940 for(mb=0; mb<mtop->nmolblock; mb++) {
941 nmol = mtop->molblock[mb].nmol;
942 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
943 excl = &mtop->moltype[mtop->molblock[mb].type].excls;
944 for(i=0; (i<atoms->nr); i++) {
947 tpi = atoms->atom[i].type;
951 tpi = atoms->atom[i].typeB;
954 j2 = excl->index[i+1];
955 for(j=j1; j<j2; j++) {
961 tpj = atoms->atom[k].type;
965 tpj = atoms->atom[k].typeB;
968 csix -= nmol*BHAMC(nbfp,ntp,tpi,tpj);
970 csix -= nmol*C6 (nbfp,ntp,tpi,tpj);
971 ctwelve -= nmol*C12(nbfp,ntp,tpi,tpj);
979 /* Only correct for the interaction of the test particle
980 * with the rest of the system.
983 &mtop->moltype[mtop->molblock[mtop->nmolblock-1].type].atoms;
986 for(mb=0; mb<mtop->nmolblock; mb++) {
987 nmol = mtop->molblock[mb].nmol;
988 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
989 for(j=0; j<atoms->nr; j++) {
991 /* Remove the interaction of the test charge group
994 if (mb == mtop->nmolblock-1)
998 if (mb == 0 && nmol == 1)
1000 gmx_fatal(FARGS,"Old format tpr with TPI, please generate a new tpr file");
1005 tpj = atoms->atom[j].type;
1009 tpj = atoms->atom[j].typeB;
1011 for(i=0; i<fr->n_tpi; i++)
1015 tpi = atoms_tpi->atom[i].type;
1019 tpi = atoms_tpi->atom[i].typeB;
1023 csix += nmolc*BHAMC(nbfp,ntp,tpi,tpj);
1027 csix += nmolc*C6 (nbfp,ntp,tpi,tpj);
1028 ctwelve += nmolc*C12(nbfp,ntp,tpi,tpj);
1035 if (npair - nexcl <= 0 && fplog) {
1036 fprintf(fplog,"\nWARNING: There are no atom pairs for dispersion correction\n\n");
1040 csix /= npair - nexcl;
1041 ctwelve /= npair - nexcl;
1044 fprintf(debug,"Counted %d exclusions\n",nexcl);
1045 fprintf(debug,"Average C6 parameter is: %10g\n",(double)csix);
1046 fprintf(debug,"Average C12 parameter is: %10g\n",(double)ctwelve);
1048 fr->avcsix[q] = csix;
1049 fr->avctwelve[q] = ctwelve;
1053 if (fr->eDispCorr == edispcAllEner ||
1054 fr->eDispCorr == edispcAllEnerPres)
1056 fprintf(fplog,"Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
1057 fr->avcsix[0],fr->avctwelve[0]);
1061 fprintf(fplog,"Long Range LJ corr.: <C6> %10.4e\n",fr->avcsix[0]);
1067 static void set_bham_b_max(FILE *fplog,t_forcerec *fr,
1068 const gmx_mtop_t *mtop)
1070 const t_atoms *at1,*at2;
1071 int mt1,mt2,i,j,tpi,tpj,ntypes;
1077 fprintf(fplog,"Determining largest Buckingham b parameter for table\n");
1084 for(mt1=0; mt1<mtop->nmoltype; mt1++)
1086 at1 = &mtop->moltype[mt1].atoms;
1087 for(i=0; (i<at1->nr); i++)
1089 tpi = at1->atom[i].type;
1091 gmx_fatal(FARGS,"Atomtype[%d] = %d, maximum = %d",i,tpi,ntypes);
1093 for(mt2=mt1; mt2<mtop->nmoltype; mt2++)
1095 at2 = &mtop->moltype[mt2].atoms;
1096 for(j=0; (j<at2->nr); j++) {
1097 tpj = at2->atom[j].type;
1100 gmx_fatal(FARGS,"Atomtype[%d] = %d, maximum = %d",j,tpj,ntypes);
1102 b = BHAMB(nbfp,ntypes,tpi,tpj);
1103 if (b > fr->bham_b_max)
1107 if ((b < bmin) || (bmin==-1))
1117 fprintf(fplog,"Buckingham b parameters, min: %g, max: %g\n",
1118 bmin,fr->bham_b_max);
1122 static void make_nbf_tables(FILE *fp,const output_env_t oenv,
1123 t_forcerec *fr,real rtab,
1124 const t_commrec *cr,
1125 const char *tabfn,char *eg1,char *eg2,
1131 if (tabfn == NULL) {
1133 fprintf(debug,"No table file name passed, can not read table, can not do non-bonded interactions\n");
1137 sprintf(buf,"%s",tabfn);
1139 /* Append the two energy group names */
1140 sprintf(buf + strlen(tabfn) - strlen(ftp2ext(efXVG)) - 1,"_%s_%s.%s",
1141 eg1,eg2,ftp2ext(efXVG));
1142 nbl->tab = make_tables(fp,oenv,fr,MASTER(cr),buf,rtab,0);
1143 /* Copy the contents of the table to separate coulomb and LJ tables too,
1144 * to improve cache performance.
1147 /* For performance reasons we want
1148 * the table data to be aligned to 16-byte. The pointer could be freed
1149 * but currently isn't.
1151 snew_aligned(nbl->vdwtab,8*(nbl->tab.n+1),16);
1152 snew_aligned(nbl->coultab,4*(nbl->tab.n+1),16);
1154 for(i=0; i<=nbl->tab.n; i++) {
1156 nbl->coultab[4*i+j] = nbl->tab.tab[12*i+j];
1158 nbl->vdwtab [8*i+j] = nbl->tab.tab[12*i+4+j];
1162 static void count_tables(int ftype1,int ftype2,const gmx_mtop_t *mtop,
1163 int *ncount,int **count)
1165 const gmx_moltype_t *molt;
1167 int mt,ftype,stride,i,j,tabnr;
1169 for(mt=0; mt<mtop->nmoltype; mt++)
1171 molt = &mtop->moltype[mt];
1172 for(ftype=0; ftype<F_NRE; ftype++)
1174 if (ftype == ftype1 || ftype == ftype2) {
1175 il = &molt->ilist[ftype];
1176 stride = 1 + NRAL(ftype);
1177 for(i=0; i<il->nr; i+=stride) {
1178 tabnr = mtop->ffparams.iparams[il->iatoms[i]].tab.table;
1180 gmx_fatal(FARGS,"A bonded table number is smaller than 0: %d\n",tabnr);
1181 if (tabnr >= *ncount) {
1182 srenew(*count,tabnr+1);
1183 for(j=*ncount; j<tabnr+1; j++)
1194 static bondedtable_t *make_bonded_tables(FILE *fplog,
1195 int ftype1,int ftype2,
1196 const gmx_mtop_t *mtop,
1197 const char *basefn,const char *tabext)
1199 int i,ncount,*count;
1207 count_tables(ftype1,ftype2,mtop,&ncount,&count);
1211 for(i=0; i<ncount; i++) {
1213 sprintf(tabfn,"%s",basefn);
1214 sprintf(tabfn + strlen(basefn) - strlen(ftp2ext(efXVG)) - 1,"_%s%d.%s",
1215 tabext,i,ftp2ext(efXVG));
1216 tab[i] = make_bonded_table(fplog,tabfn,NRAL(ftype1)-2);
1225 void forcerec_set_ranges(t_forcerec *fr,
1226 int ncg_home,int ncg_force,
1228 int natoms_force_constr,int natoms_f_novirsum)
1233 /* fr->ncg_force is unused in the standard code,
1234 * but it can be useful for modified code dealing with charge groups.
1236 fr->ncg_force = ncg_force;
1237 fr->natoms_force = natoms_force;
1238 fr->natoms_force_constr = natoms_force_constr;
1240 if (fr->natoms_force_constr > fr->nalloc_force)
1242 fr->nalloc_force = over_alloc_dd(fr->natoms_force_constr);
1246 srenew(fr->f_twin,fr->nalloc_force);
1250 if (fr->bF_NoVirSum)
1252 fr->f_novirsum_n = natoms_f_novirsum;
1253 if (fr->f_novirsum_n > fr->f_novirsum_nalloc)
1255 fr->f_novirsum_nalloc = over_alloc_dd(fr->f_novirsum_n);
1256 srenew(fr->f_novirsum_alloc,fr->f_novirsum_nalloc);
1261 fr->f_novirsum_n = 0;
1265 static real cutoff_inf(real cutoff)
1269 cutoff = GMX_CUTOFF_INF;
1275 static void make_adress_tf_tables(FILE *fp,const output_env_t oenv,
1276 t_forcerec *fr,const t_inputrec *ir,
1277 const char *tabfn, const gmx_mtop_t *mtop,
1283 if (tabfn == NULL) {
1284 gmx_fatal(FARGS,"No thermoforce table file given. Use -tabletf to specify a file\n");
1288 snew(fr->atf_tabs, ir->adress->n_tf_grps);
1290 for (i=0; i<ir->adress->n_tf_grps; i++){
1291 j = ir->adress->tf_table_index[i]; /* get energy group index */
1292 sprintf(buf + strlen(tabfn) - strlen(ftp2ext(efXVG)) - 1,"tf_%s.%s",
1293 *(mtop->groups.grpname[mtop->groups.grps[egcENER].nm_ind[j]]) ,ftp2ext(efXVG));
1294 printf("loading tf table for energygrp index %d from %s\n", ir->adress->tf_table_index[j], buf);
1295 fr->atf_tabs[i] = make_atf_table(fp,oenv,fr,buf, box);
1300 gmx_bool can_use_allvsall(const t_inputrec *ir, const gmx_mtop_t *mtop,
1301 gmx_bool bPrintNote,t_commrec *cr,FILE *fp)
1310 ir->ePBC==epbcNONE &&
1311 ir->vdwtype==evdwCUT &&
1312 ir->coulombtype==eelCUT &&
1314 (ir->implicit_solvent == eisNO ||
1315 (ir->implicit_solvent==eisGBSA && (ir->gb_algorithm==egbSTILL ||
1316 ir->gb_algorithm==egbHCT ||
1317 ir->gb_algorithm==egbOBC))) &&
1318 getenv("GMX_NO_ALLVSALL") == NULL
1321 if (bAllvsAll && ir->opts.ngener > 1)
1323 const char *note="NOTE: Can not use all-vs-all force loops, because there are multiple energy monitor groups; you might get significantly higher performance when using only a single energy monitor group.\n";
1329 fprintf(stderr,"\n%s\n",note);
1333 fprintf(fp,"\n%s\n",note);
1339 if(bAllvsAll && fp && MASTER(cr))
1341 fprintf(fp,"\nUsing accelerated all-vs-all kernels.\n\n");
1348 static void init_forcerec_f_threads(t_forcerec *fr,int grpp_nener)
1352 fr->nthreads = gmx_omp_nthreads_get(emntBonded);
1354 if (fr->nthreads > 1)
1356 snew(fr->f_t,fr->nthreads);
1357 /* Thread 0 uses the global force and energy arrays */
1358 for(t=1; t<fr->nthreads; t++)
1360 fr->f_t[t].f = NULL;
1361 fr->f_t[t].f_nalloc = 0;
1362 snew(fr->f_t[t].fshift,SHIFTS);
1363 /* snew(fr->f_t[t].ener,F_NRE); */
1364 fr->f_t[t].grpp.nener = grpp_nener;
1365 for(i=0; i<egNR; i++)
1367 snew(fr->f_t[t].grpp.ener[i],grpp_nener);
1374 static void pick_nbnxn_kernel_cpu(FILE *fp,
1375 const t_commrec *cr,
1376 const gmx_cpuid_t cpuid_info,
1379 *kernel_type = nbk4x4_PlainC;
1383 /* On Intel Sandy-Bridge AVX-256 kernels are always faster.
1384 * On AMD Bulldozer AVX-256 is much slower than AVX-128.
1386 if(gmx_cpuid_feature(cpuid_info, GMX_CPUID_FEATURE_X86_AVX) == 1 &&
1387 gmx_cpuid_vendor(cpuid_info) != GMX_CPUID_VENDOR_AMD)
1389 #ifdef GMX_X86_AVX_256
1390 *kernel_type = nbk4xN_X86_SIMD256;
1392 *kernel_type = nbk4xN_X86_SIMD128;
1397 *kernel_type = nbk4xN_X86_SIMD128;
1400 if (getenv("GMX_NBNXN_AVX128") != NULL)
1402 *kernel_type = nbk4xN_X86_SIMD128;
1404 if (getenv("GMX_NBNXN_AVX256") != NULL)
1406 #ifdef GMX_X86_AVX_256
1407 *kernel_type = nbk4xN_X86_SIMD256;
1409 gmx_fatal(FARGS,"You requested AVX-256 nbnxn kernels, but GROMACS was built without AVX support");
1413 #endif /* GMX_X86_SSE2 */
1417 /* Note that _mm_... intrinsics can be converted to either SSE or AVX
1418 * depending on compiler flags.
1419 * For gcc we check for __AVX__
1420 * At least a check for icc should be added (if there is a macro)
1422 static const char *nbk_name[] =
1423 { "not set", "plain C 4x4",
1424 #if !(defined GMX_X86_AVX_256 || defined GMX_X86_AVX128_FMA || defined __AVX__)
1425 #ifndef GMX_X86_SSE4_1
1450 "CUDA 8x8x8", "plain C 8x8x8" };
1452 static void pick_nbnxn_kernel(FILE *fp,
1453 const t_commrec *cr,
1454 const gmx_hw_info_t *hwinfo,
1455 gmx_bool use_cpu_acceleration,
1459 gmx_bool bEmulateGPU, bGPU;
1460 char gpu_err_str[STRLEN];
1462 assert(kernel_type);
1464 *kernel_type = nbkNotSet;
1465 /* if bUseGPU == NULL we don't want a GPU (e.g. hybrid mode kernel selection) */
1466 bGPU = (bUseGPU != NULL) && hwinfo->bCanUseGPU;
1468 /* Run GPU emulation mode if GMX_EMULATE_GPU is defined or in case if nobonded
1469 calculations are turned off via GMX_NO_NONBONDED -- this is the simple way
1470 to turn off GPU/CUDA initializations as well.. */
1471 bEmulateGPU = ((getenv("GMX_EMULATE_GPU") != NULL) ||
1472 (getenv("GMX_NO_NONBONDED") != NULL));
1482 /* Each PP node will use the intra-node id-th device from the
1483 * list of detected/selected GPUs. */
1484 if (!init_gpu(cr->nodeid_group_intra, gpu_err_str, &hwinfo->gpu_info))
1486 /* At this point the init should never fail as we made sure that
1487 * we have all the GPUs we need. If it still does, we'll bail. */
1488 gmx_fatal(FARGS, "On node %d failed to initialize GPU #%d: %s",
1490 get_gpu_device_id(&hwinfo->gpu_info, cr->nodeid_group_intra),
1499 *kernel_type = nbk8x8x8_PlainC;
1501 md_print_warn(cr, fp, "Emulating a GPU run on the CPU (slow)");
1505 *kernel_type = nbk8x8x8_CUDA;
1508 if (*kernel_type == nbkNotSet)
1510 if (use_cpu_acceleration)
1512 pick_nbnxn_kernel_cpu(fp,cr,hwinfo->cpuid_info,kernel_type);
1516 *kernel_type = nbk4x4_PlainC;
1524 fprintf(stderr,"Using %s non-bonded kernels\n",
1525 nbk_name[*kernel_type]);
1527 fprintf(fp,"\nUsing %s non-bonded kernels\n\n",
1528 nbk_name[*kernel_type]);
1533 static void init_verlet_ewald_f_table(interaction_const_t *ic,
1534 int verlet_kernel_type)
1536 if (nbnxn_kernel_pairlist_simple(verlet_kernel_type))
1538 /* With a spacing of 0.0005 we are at the force summation accuracy
1539 * for the SSE kernels for "normal" atomistic simulations.
1541 ic->tabq_scale = ewald_spline3_table_scale(ic->ewaldcoeff,
1543 ic->tabq_size = (int)(ic->rcoulomb*ic->tabq_scale) + 2;
1545 ic->tabq_format = tableformatFDV0;
1547 ic->tabq_format = tableformatF;
1552 ic->tabq_size = GPU_EWALD_COULOMB_FORCE_TABLE_SIZE;
1553 /* Subtract 2 iso 1 to avoid access out of range due to rounding */
1554 ic->tabq_scale = (ic->tabq_size - 2)/ic->rcoulomb;
1555 if (verlet_kernel_type == nbk8x8x8_CUDA)
1557 /* This case is handled in the nbnxn CUDA module */
1558 ic->tabq_format = tableformatNONE;
1562 ic->tabq_format = tableformatF;
1566 switch (ic->tabq_format)
1568 case tableformatNONE:
1571 sfree_aligned(ic->tabq_coul_F);
1572 sfree_aligned(ic->tabq_coul_V);
1573 snew_aligned(ic->tabq_coul_F,ic->tabq_size,16);
1574 snew_aligned(ic->tabq_coul_V,ic->tabq_size,16);
1575 table_spline3_fill_ewald_lr(ic->tabq_coul_F,ic->tabq_coul_V,
1576 ic->tabq_size,ic->tabq_format,
1577 1/ic->tabq_scale,ic->ewaldcoeff);
1579 case tableformatFDV0:
1580 sfree_aligned(ic->tabq_coul_F);
1581 snew_aligned(ic->tabq_coul_FDV0,ic->tabq_size*4,16);
1582 table_spline3_fill_ewald_lr(ic->tabq_coul_FDV0,NULL,
1583 ic->tabq_size,ic->tabq_format,
1584 1/ic->tabq_scale,ic->ewaldcoeff);
1587 gmx_incons("Unknown table format");
1591 void init_interaction_const_tables(FILE *fp,
1592 interaction_const_t *ic,
1593 int verlet_kernel_type)
1597 if (ic->eeltype == eelEWALD || EEL_PME(ic->eeltype))
1599 init_verlet_ewald_f_table(ic,verlet_kernel_type);
1603 fprintf(fp,"Initialized non-bonded Ewald correction tables, spacing: %.2e size: %d\n\n",
1604 1/ic->tabq_scale,ic->tabq_size);
1609 void init_interaction_const(FILE *fp,
1610 interaction_const_t **interaction_const,
1611 const t_forcerec *fr)
1613 interaction_const_t *ic;
1617 ic->rlist = fr->rlist;
1620 ic->rvdw = fr->rvdw;
1621 if (fr->vdw_pot_shift)
1623 ic->sh_invrc6 = pow(ic->rvdw,-6.0);
1630 /* Electrostatics */
1631 ic->eeltype = fr->eeltype;
1632 ic->rcoulomb = fr->rcoulomb;
1633 ic->epsilon_r = fr->epsilon_r;
1634 ic->epsfac = fr->epsfac;
1637 ic->ewaldcoeff = fr->ewaldcoeff;
1638 if (fr->coul_pot_shift)
1640 ic->sh_ewald = gmx_erfc(ic->ewaldcoeff*ic->rcoulomb);
1647 /* Reaction-field */
1648 if (EEL_RF(ic->eeltype))
1650 ic->epsilon_rf = fr->epsilon_rf;
1651 ic->k_rf = fr->k_rf;
1652 ic->c_rf = fr->c_rf;
1656 /* For plain cut-off we might use the reaction-field kernels */
1657 ic->epsilon_rf = ic->epsilon_r;
1659 if (fr->coul_pot_shift)
1661 ic->c_rf = 1/ic->rcoulomb;
1671 fprintf(fp,"Potential shift: LJ r^-12: %.3f r^-6 %.3f",
1672 sqr(ic->sh_invrc6),ic->sh_invrc6);
1673 if (ic->eeltype == eelCUT)
1675 fprintf(fp,", Coulomb %.3f",ic->c_rf);
1677 else if (EEL_PME(ic->eeltype))
1679 fprintf(fp,", Ewald %.3e",ic->sh_ewald);
1684 *interaction_const = ic;
1686 if (fr->nbv != NULL && fr->nbv->bUseGPU)
1688 nbnxn_cuda_init_const(fr->nbv->cu_nbv, ic, fr->nbv);
1691 if (fr->cutoff_scheme == ecutsVERLET)
1693 assert(fr->nbv != NULL && fr->nbv->grp != NULL);
1694 init_interaction_const_tables(fp,ic,fr->nbv->grp[fr->nbv->ngrp-1].kernel_type);
1698 static void init_nb_verlet(FILE *fp,
1699 nonbonded_verlet_t **nb_verlet,
1700 const t_inputrec *ir,
1701 const t_forcerec *fr,
1702 const t_commrec *cr,
1703 const char *nbpu_opt)
1705 nonbonded_verlet_t *nbv;
1708 gmx_bool bHybridGPURun = FALSE;
1710 nbnxn_alloc_t *nb_alloc;
1711 nbnxn_free_t *nb_free;
1717 nbv->ngrp = (DOMAINDECOMP(cr) ? 2 : 1);
1718 for(i=0; i<nbv->ngrp; i++)
1720 nbv->grp[i].nbl_lists.nnbl = 0;
1721 nbv->grp[i].nbat = NULL;
1722 nbv->grp[i].kernel_type = nbkNotSet;
1724 if (i == 0) /* local */
1726 pick_nbnxn_kernel(fp, cr, fr->hwinfo, fr->use_cpu_acceleration,
1728 &nbv->grp[i].kernel_type);
1730 else /* non-local */
1732 if (nbpu_opt != NULL && strcmp(nbpu_opt,"gpu_cpu") == 0)
1734 /* Use GPU for local, select a CPU kernel for non-local */
1735 pick_nbnxn_kernel(fp, cr, fr->hwinfo, fr->use_cpu_acceleration,
1737 &nbv->grp[i].kernel_type);
1739 bHybridGPURun = TRUE;
1743 /* Use the same kernel for local and non-local interactions */
1744 nbv->grp[i].kernel_type = nbv->grp[0].kernel_type;
1751 /* init the NxN GPU data; the last argument tells whether we'll have
1752 * both local and non-local NB calculation on GPU */
1753 nbnxn_cuda_init(fp, &nbv->cu_nbv,
1754 &fr->hwinfo->gpu_info, cr->nodeid_group_intra,
1755 (nbv->ngrp > 1) && !bHybridGPURun);
1757 if ((env = getenv("GMX_NB_MIN_CI")) != NULL)
1761 nbv->min_ci_balanced = strtol(env, &end, 10);
1762 if (!end || (*end != 0) || nbv->min_ci_balanced <= 0)
1764 gmx_fatal(FARGS, "Invalid value passed in GMX_NB_MIN_CI=%s, positive integer required", env);
1769 fprintf(debug, "Neighbor-list balancing parameter: %d (passed as env. var.)\n",
1770 nbv->min_ci_balanced);
1775 nbv->min_ci_balanced = nbnxn_cuda_min_ci_balanced(nbv->cu_nbv);
1778 fprintf(debug, "Neighbor-list balancing parameter: %d (auto-adjusted to the number of GPU multi-processors)\n",
1779 nbv->min_ci_balanced);
1785 nbv->min_ci_balanced = 0;
1790 nbnxn_init_search(&nbv->nbs,
1791 DOMAINDECOMP(cr) ? & cr->dd->nc : NULL,
1792 DOMAINDECOMP(cr) ? domdec_zones(cr->dd) : NULL,
1793 gmx_omp_nthreads_get(emntNonbonded));
1795 for(i=0; i<nbv->ngrp; i++)
1797 if (nbv->grp[0].kernel_type == nbk8x8x8_CUDA)
1799 nb_alloc = &pmalloc;
1808 nbnxn_init_pairlist_set(&nbv->grp[i].nbl_lists,
1809 nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type),
1810 /* 8x8x8 "non-simple" lists are ATM always combined */
1811 !nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type),
1815 nbv->grp[0].kernel_type != nbv->grp[i].kernel_type)
1817 snew(nbv->grp[i].nbat,1);
1818 nbnxn_atomdata_init(fp,
1820 nbv->grp[i].kernel_type,
1823 nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type) ? gmx_omp_nthreads_get(emntNonbonded) : 1,
1828 nbv->grp[i].nbat = nbv->grp[0].nbat;
1833 void init_forcerec(FILE *fp,
1834 const output_env_t oenv,
1837 const t_inputrec *ir,
1838 const gmx_mtop_t *mtop,
1839 const t_commrec *cr,
1846 const char *nbpu_opt,
1847 gmx_bool bNoSolvOpt,
1850 int i,j,m,natoms,ngrp,negp_pp,negptable,egi,egj;
1856 gmx_bool bGenericKernelOnly;
1857 gmx_bool bTab,bSep14tab,bNormalnblists;
1859 int *nm_ind,egp_flags;
1861 /* By default we turn acceleration on, but it might be turned off further down... */
1862 fr->use_cpu_acceleration = TRUE;
1864 fr->bDomDec = DOMAINDECOMP(cr);
1866 natoms = mtop->natoms;
1868 if (check_box(ir->ePBC,box))
1870 gmx_fatal(FARGS,check_box(ir->ePBC,box));
1873 /* Test particle insertion ? */
1874 if (EI_TPI(ir->eI)) {
1875 /* Set to the size of the molecule to be inserted (the last one) */
1876 /* Because of old style topologies, we have to use the last cg
1877 * instead of the last molecule type.
1879 cgs = &mtop->moltype[mtop->molblock[mtop->nmolblock-1].type].cgs;
1880 fr->n_tpi = cgs->index[cgs->nr] - cgs->index[cgs->nr-1];
1881 if (fr->n_tpi != mtop->mols.index[mtop->mols.nr] - mtop->mols.index[mtop->mols.nr-1]) {
1882 gmx_fatal(FARGS,"The molecule to insert can not consist of multiple charge groups.\nMake it a single charge group.");
1888 /* Copy AdResS parameters */
1890 fr->adress_type = ir->adress->type;
1891 fr->adress_const_wf = ir->adress->const_wf;
1892 fr->adress_ex_width = ir->adress->ex_width;
1893 fr->adress_hy_width = ir->adress->hy_width;
1894 fr->adress_icor = ir->adress->icor;
1895 fr->adress_site = ir->adress->site;
1896 fr->adress_ex_forcecap = ir->adress->ex_forcecap;
1897 fr->adress_do_hybridpairs = ir->adress->do_hybridpairs;
1900 snew(fr->adress_group_explicit , ir->adress->n_energy_grps);
1901 for (i=0; i< ir->adress->n_energy_grps; i++){
1902 fr->adress_group_explicit[i]= ir->adress->group_explicit[i];
1905 fr->n_adress_tf_grps = ir->adress->n_tf_grps;
1906 snew(fr->adress_tf_table_index, fr->n_adress_tf_grps);
1907 for (i=0; i< fr->n_adress_tf_grps; i++){
1908 fr->adress_tf_table_index[i]= ir->adress->tf_table_index[i];
1910 copy_rvec(ir->adress->refs,fr->adress_refs);
1912 fr->adress_type = eAdressOff;
1913 fr->adress_do_hybridpairs = FALSE;
1916 /* Copy the user determined parameters */
1917 fr->userint1 = ir->userint1;
1918 fr->userint2 = ir->userint2;
1919 fr->userint3 = ir->userint3;
1920 fr->userint4 = ir->userint4;
1921 fr->userreal1 = ir->userreal1;
1922 fr->userreal2 = ir->userreal2;
1923 fr->userreal3 = ir->userreal3;
1924 fr->userreal4 = ir->userreal4;
1927 fr->fc_stepsize = ir->fc_stepsize;
1930 fr->efep = ir->efep;
1931 fr->sc_alphavdw = ir->fepvals->sc_alpha;
1932 if (ir->fepvals->bScCoul)
1934 fr->sc_alphacoul = ir->fepvals->sc_alpha;
1935 fr->sc_sigma6_min = pow(ir->fepvals->sc_sigma_min,6);
1939 fr->sc_alphacoul = 0;
1940 fr->sc_sigma6_min = 0; /* only needed when bScCoul is on */
1942 fr->sc_power = ir->fepvals->sc_power;
1943 fr->sc_r_power = ir->fepvals->sc_r_power;
1944 fr->sc_sigma6_def = pow(ir->fepvals->sc_sigma,6);
1946 env = getenv("GMX_SCSIGMA_MIN");
1950 sscanf(env,"%lf",&dbl);
1951 fr->sc_sigma6_min = pow(dbl,6);
1954 fprintf(fp,"Setting the minimum soft core sigma to %g nm\n",dbl);
1958 fr->bNonbonded = TRUE;
1959 if (getenv("GMX_NO_NONBONDED") != NULL)
1961 /* turn off non-bonded calculations */
1962 fr->bNonbonded = FALSE;
1963 md_print_warn(cr,fp,
1964 "Found environment variable GMX_NO_NONBONDED.\n"
1965 "Disabling nonbonded calculations.\n");
1968 bGenericKernelOnly = FALSE;
1969 if (getenv("GMX_NB_GENERIC") != NULL)
1974 "Found environment variable GMX_NB_GENERIC.\n"
1975 "Disabling interaction-specific nonbonded kernels.\n\n");
1977 bGenericKernelOnly = TRUE;
1981 if( (getenv("GMX_DISABLE_CPU_ACCELERATION") != NULL) || (getenv("GMX_NOOPTIMIZEDKERNELS") != NULL) )
1983 fr->use_cpu_acceleration = FALSE;
1987 "\nFound environment variable GMX_DISABLE_CPU_ACCELERATION.\n"
1988 "Disabling all CPU architecture-specific (e.g. SSE2/SSE4/AVX) routines.\n\n");
1992 /* Check if we can/should do all-vs-all kernels */
1993 fr->bAllvsAll = can_use_allvsall(ir,mtop,FALSE,NULL,NULL);
1994 fr->AllvsAll_work = NULL;
1995 fr->AllvsAll_workgb = NULL;
1998 /* Neighbour searching stuff */
1999 fr->cutoff_scheme = ir->cutoff_scheme;
2000 fr->bGrid = (ir->ns_type == ensGRID);
2001 fr->ePBC = ir->ePBC;
2003 /* Determine if we will do PBC for distances in bonded interactions */
2004 if (fr->ePBC == epbcNONE)
2006 fr->bMolPBC = FALSE;
2010 if (!DOMAINDECOMP(cr))
2012 /* The group cut-off scheme and SHAKE assume charge groups
2013 * are whole, but not using molpbc is faster in most cases.
2015 if (fr->cutoff_scheme == ecutsGROUP ||
2016 (ir->eConstrAlg == econtSHAKE &&
2017 (gmx_mtop_ftype_count(mtop,F_CONSTR) > 0 ||
2018 gmx_mtop_ftype_count(mtop,F_CONSTRNC) > 0)))
2020 fr->bMolPBC = ir->bPeriodicMols;
2025 if (getenv("GMX_USE_GRAPH") != NULL)
2027 fr->bMolPBC = FALSE;
2030 fprintf(fp,"\nGMX_MOLPBC is set, using the graph for bonded interactions\n\n");
2037 fr->bMolPBC = dd_bonded_molpbc(cr->dd,fr->ePBC);
2040 fr->rc_scaling = ir->refcoord_scaling;
2041 copy_rvec(ir->posres_com,fr->posres_com);
2042 copy_rvec(ir->posres_comB,fr->posres_comB);
2043 fr->rlist = cutoff_inf(ir->rlist);
2044 fr->rlistlong = cutoff_inf(ir->rlistlong);
2045 fr->eeltype = ir->coulombtype;
2046 fr->vdwtype = ir->vdwtype;
2048 fr->coul_pot_shift = (ir->coulomb_modifier == eintmodPOTSHIFT);
2049 fr->vdw_pot_shift = (ir->vdw_modifier == eintmodPOTSHIFT);
2051 fr->bTwinRange = fr->rlistlong > fr->rlist;
2052 fr->bEwald = (EEL_PME(fr->eeltype) || fr->eeltype==eelEWALD);
2054 fr->reppow = mtop->ffparams.reppow;
2056 if (ir->cutoff_scheme == ecutsGROUP)
2058 fr->bvdwtab = (fr->vdwtype != evdwCUT ||
2059 !gmx_within_tol(fr->reppow,12.0,10*GMX_DOUBLE_EPS));
2060 fr->bcoultab = (!(fr->eeltype == eelCUT || EEL_RF(fr->eeltype)) ||
2061 fr->eeltype == eelRF_ZERO);
2063 if (getenv("GMX_REQUIRE_TABLES"))
2066 fr->bcoultab = TRUE;
2071 fprintf(fp,"Table routines are used for coulomb: %s\n",bool_names[fr->bcoultab]);
2072 fprintf(fp,"Table routines are used for vdw: %s\n",bool_names[fr->bvdwtab ]);
2076 if (ir->cutoff_scheme == ecutsVERLET)
2078 if (!gmx_within_tol(fr->reppow,12.0,10*GMX_DOUBLE_EPS))
2080 gmx_fatal(FARGS,"Cut-off scheme %S only supports LJ repulsion power 12",ecutscheme_names[ir->cutoff_scheme]);
2082 fr->bvdwtab = FALSE;
2083 fr->bcoultab = FALSE;
2086 /* Tables are used for direct ewald sum */
2089 if (EEL_PME(ir->coulombtype))
2092 fprintf(fp,"Will do PME sum in reciprocal space.\n");
2093 if (ir->coulombtype == eelP3M_AD)
2095 please_cite(fp,"Hockney1988");
2096 please_cite(fp,"Ballenegger2012");
2100 please_cite(fp,"Essmann95a");
2103 if (ir->ewald_geometry == eewg3DC)
2107 fprintf(fp,"Using the Ewald3DC correction for systems with a slab geometry.\n");
2109 please_cite(fp,"In-Chul99a");
2112 fr->ewaldcoeff=calc_ewaldcoeff(ir->rcoulomb, ir->ewald_rtol);
2113 init_ewald_tab(&(fr->ewald_table), cr, ir, fp);
2116 fprintf(fp,"Using a Gaussian width (1/beta) of %g nm for Ewald\n",
2121 /* Electrostatics */
2122 fr->epsilon_r = ir->epsilon_r;
2123 fr->epsilon_rf = ir->epsilon_rf;
2124 fr->fudgeQQ = mtop->ffparams.fudgeQQ;
2125 fr->rcoulomb_switch = ir->rcoulomb_switch;
2126 fr->rcoulomb = cutoff_inf(ir->rcoulomb);
2128 /* Parameters for generalized RF */
2132 if (fr->eeltype == eelGRF)
2134 init_generalized_rf(fp,mtop,ir,fr);
2136 else if (fr->eeltype == eelSHIFT)
2138 for(m=0; (m<DIM); m++)
2139 box_size[m]=box[m][m];
2141 if ((fr->eeltype == eelSHIFT && fr->rcoulomb > fr->rcoulomb_switch))
2142 set_shift_consts(fp,fr->rcoulomb_switch,fr->rcoulomb,box_size,fr);
2145 fr->bF_NoVirSum = (EEL_FULL(fr->eeltype) ||
2146 gmx_mtop_ftype_count(mtop,F_POSRES) > 0 ||
2147 gmx_mtop_ftype_count(mtop,F_FBPOSRES) > 0 ||
2148 IR_ELEC_FIELD(*ir) ||
2149 (fr->adress_icor != eAdressICOff)
2152 if (fr->cutoff_scheme == ecutsGROUP &&
2153 ncg_mtop(mtop) > fr->cg_nalloc && !DOMAINDECOMP(cr)) {
2154 /* Count the total number of charge groups */
2155 fr->cg_nalloc = ncg_mtop(mtop);
2156 srenew(fr->cg_cm,fr->cg_nalloc);
2158 if (fr->shift_vec == NULL)
2159 snew(fr->shift_vec,SHIFTS);
2161 if (fr->fshift == NULL)
2162 snew(fr->fshift,SHIFTS);
2164 if (fr->nbfp == NULL) {
2165 fr->ntype = mtop->ffparams.atnr;
2166 fr->bBHAM = (mtop->ffparams.functype[0] == F_BHAM);
2167 fr->nbfp = mk_nbfp(&mtop->ffparams,fr->bBHAM);
2170 /* Copy the energy group exclusions */
2171 fr->egp_flags = ir->opts.egp_flags;
2173 /* Van der Waals stuff */
2174 fr->rvdw = cutoff_inf(ir->rvdw);
2175 fr->rvdw_switch = ir->rvdw_switch;
2176 if ((fr->vdwtype != evdwCUT) && (fr->vdwtype != evdwUSER) && !fr->bBHAM) {
2177 if (fr->rvdw_switch >= fr->rvdw)
2178 gmx_fatal(FARGS,"rvdw_switch (%f) must be < rvdw (%f)",
2179 fr->rvdw_switch,fr->rvdw);
2181 fprintf(fp,"Using %s Lennard-Jones, switch between %g and %g nm\n",
2182 (fr->eeltype==eelSWITCH) ? "switched":"shifted",
2183 fr->rvdw_switch,fr->rvdw);
2186 if (fr->bBHAM && (fr->vdwtype == evdwSHIFT || fr->vdwtype == evdwSWITCH))
2187 gmx_fatal(FARGS,"Switch/shift interaction not supported with Buckingham");
2190 fprintf(fp,"Cut-off's: NS: %g Coulomb: %g %s: %g\n",
2191 fr->rlist,fr->rcoulomb,fr->bBHAM ? "BHAM":"LJ",fr->rvdw);
2193 fr->eDispCorr = ir->eDispCorr;
2194 if (ir->eDispCorr != edispcNO)
2196 set_avcsixtwelve(fp,fr,mtop);
2201 set_bham_b_max(fp,fr,mtop);
2204 fr->bGB = (ir->implicit_solvent == eisGBSA);
2205 fr->gb_epsilon_solvent = ir->gb_epsilon_solvent;
2207 /* Copy the GBSA data (radius, volume and surftens for each
2208 * atomtype) from the topology atomtype section to forcerec.
2210 snew(fr->atype_radius,fr->ntype);
2211 snew(fr->atype_vol,fr->ntype);
2212 snew(fr->atype_surftens,fr->ntype);
2213 snew(fr->atype_gb_radius,fr->ntype);
2214 snew(fr->atype_S_hct,fr->ntype);
2216 if (mtop->atomtypes.nr > 0)
2218 for(i=0;i<fr->ntype;i++)
2219 fr->atype_radius[i] =mtop->atomtypes.radius[i];
2220 for(i=0;i<fr->ntype;i++)
2221 fr->atype_vol[i] = mtop->atomtypes.vol[i];
2222 for(i=0;i<fr->ntype;i++)
2223 fr->atype_surftens[i] = mtop->atomtypes.surftens[i];
2224 for(i=0;i<fr->ntype;i++)
2225 fr->atype_gb_radius[i] = mtop->atomtypes.gb_radius[i];
2226 for(i=0;i<fr->ntype;i++)
2227 fr->atype_S_hct[i] = mtop->atomtypes.S_hct[i];
2230 /* Generate the GB table if needed */
2234 fr->gbtabscale=2000;
2240 fr->gbtab=make_gb_table(fp,oenv,fr,tabpfn,fr->gbtabscale);
2242 init_gb(&fr->born,cr,fr,ir,mtop,ir->rgbradii,ir->gb_algorithm);
2244 /* Copy local gb data (for dd, this is done in dd_partition_system) */
2245 if (!DOMAINDECOMP(cr))
2247 make_local_gb(cr,fr->born,ir->gb_algorithm);
2251 /* Set the charge scaling */
2252 if (fr->epsilon_r != 0)
2253 fr->epsfac = ONE_4PI_EPS0/fr->epsilon_r;
2255 /* eps = 0 is infinite dieletric: no coulomb interactions */
2258 /* Reaction field constants */
2259 if (EEL_RF(fr->eeltype))
2260 calc_rffac(fp,fr->eeltype,fr->epsilon_r,fr->epsilon_rf,
2261 fr->rcoulomb,fr->temp,fr->zsquare,box,
2262 &fr->kappa,&fr->k_rf,&fr->c_rf);
2264 set_chargesum(fp,fr,mtop);
2266 /* if we are using LR electrostatics, and they are tabulated,
2267 * the tables will contain modified coulomb interactions.
2268 * Since we want to use the non-shifted ones for 1-4
2269 * coulombic interactions, we must have an extra set of tables.
2272 /* Construct tables.
2273 * A little unnecessary to make both vdw and coul tables sometimes,
2274 * but what the heck... */
2276 bTab = fr->bcoultab || fr->bvdwtab;
2278 bSep14tab = ((!bTab || fr->eeltype!=eelCUT || fr->vdwtype!=evdwCUT ||
2280 (gmx_mtop_ftype_count(mtop,F_LJ14) > 0 ||
2281 gmx_mtop_ftype_count(mtop,F_LJC14_Q) > 0 ||
2282 gmx_mtop_ftype_count(mtop,F_LJC_PAIRS_NB) > 0));
2284 negp_pp = ir->opts.ngener - ir->nwall;
2287 bNormalnblists = TRUE;
2290 bNormalnblists = (ir->eDispCorr != edispcNO);
2291 for(egi=0; egi<negp_pp; egi++) {
2292 for(egj=egi; egj<negp_pp; egj++) {
2293 egp_flags = ir->opts.egp_flags[GID(egi,egj,ir->opts.ngener)];
2294 if (!(egp_flags & EGP_EXCL)) {
2295 if (egp_flags & EGP_TABLE) {
2298 bNormalnblists = TRUE;
2303 if (bNormalnblists) {
2304 fr->nnblists = negptable + 1;
2306 fr->nnblists = negptable;
2308 if (fr->nnblists > 1)
2309 snew(fr->gid2nblists,ir->opts.ngener*ir->opts.ngener);
2311 snew(fr->nblists,fr->nnblists);
2313 /* This code automatically gives table length tabext without cut-off's,
2314 * in that case grompp should already have checked that we do not need
2315 * normal tables and we only generate tables for 1-4 interactions.
2317 rtab = ir->rlistlong + ir->tabext;
2320 /* make tables for ordinary interactions */
2321 if (bNormalnblists) {
2322 make_nbf_tables(fp,oenv,fr,rtab,cr,tabfn,NULL,NULL,&fr->nblists[0]);
2324 fr->tab14 = fr->nblists[0].tab;
2329 if (negptable > 0) {
2330 /* Read the special tables for certain energy group pairs */
2331 nm_ind = mtop->groups.grps[egcENER].nm_ind;
2332 for(egi=0; egi<negp_pp; egi++) {
2333 for(egj=egi; egj<negp_pp; egj++) {
2334 egp_flags = ir->opts.egp_flags[GID(egi,egj,ir->opts.ngener)];
2335 if ((egp_flags & EGP_TABLE) && !(egp_flags & EGP_EXCL)) {
2336 nbl = &(fr->nblists[m]);
2337 if (fr->nnblists > 1) {
2338 fr->gid2nblists[GID(egi,egj,ir->opts.ngener)] = m;
2340 /* Read the table file with the two energy groups names appended */
2341 make_nbf_tables(fp,oenv,fr,rtab,cr,tabfn,
2342 *mtop->groups.grpname[nm_ind[egi]],
2343 *mtop->groups.grpname[nm_ind[egj]],
2346 } else if (fr->nnblists > 1) {
2347 fr->gid2nblists[GID(egi,egj,ir->opts.ngener)] = 0;
2355 /* generate extra tables with plain Coulomb for 1-4 interactions only */
2356 fr->tab14 = make_tables(fp,oenv,fr,MASTER(cr),tabpfn,rtab,
2357 GMX_MAKETABLES_14ONLY);
2360 /* Read AdResS Thermo Force table if needed */
2361 if(fr->adress_icor == eAdressICThermoForce)
2363 /* old todo replace */
2365 if (ir->adress->n_tf_grps > 0){
2366 make_adress_tf_tables(fp,oenv,fr,ir,tabfn, mtop, box);
2369 /* load the default table */
2370 snew(fr->atf_tabs, 1);
2371 fr->atf_tabs[DEFAULT_TF_TABLE] = make_atf_table(fp,oenv,fr,tabafn, box);
2376 fr->nwall = ir->nwall;
2377 if (ir->nwall && ir->wall_type==ewtTABLE)
2379 make_wall_tables(fp,oenv,ir,tabfn,&mtop->groups,fr);
2382 if (fcd && tabbfn) {
2383 fcd->bondtab = make_bonded_tables(fp,
2384 F_TABBONDS,F_TABBONDSNC,
2386 fcd->angletab = make_bonded_tables(fp,
2389 fcd->dihtab = make_bonded_tables(fp,
2394 fprintf(debug,"No fcdata or table file name passed, can not read table, can not do bonded interactions\n");
2397 /* QM/MM initialization if requested
2401 fprintf(stderr,"QM/MM calculation requested.\n");
2404 fr->bQMMM = ir->bQMMM;
2405 fr->qr = mk_QMMMrec();
2407 /* Set all the static charge group info */
2408 fr->cginfo_mb = init_cginfo_mb(fp,mtop,fr,bNoSolvOpt,
2409 &fr->bExcl_IntraCGAll_InterCGNone);
2410 if (DOMAINDECOMP(cr)) {
2413 fr->cginfo = cginfo_expand(mtop->nmolblock,fr->cginfo_mb);
2416 if (!DOMAINDECOMP(cr))
2418 /* When using particle decomposition, the effect of the second argument,
2419 * which sets fr->hcg, is corrected later in do_md and init_em.
2421 forcerec_set_ranges(fr,ncg_mtop(mtop),ncg_mtop(mtop),
2422 mtop->natoms,mtop->natoms,mtop->natoms);
2425 fr->print_force = print_force;
2428 /* coarse load balancing vars */
2433 /* Initialize neighbor search */
2434 init_ns(fp,cr,&fr->ns,fr,mtop,box);
2436 if (cr->duty & DUTY_PP)
2438 gmx_setup_kernels(fp,fr,bGenericKernelOnly);
2441 gmx_setup_adress_kernels(fp,bGenericKernelOnly);
2445 /* Initialize the thread working data for bonded interactions */
2446 init_forcerec_f_threads(fr,mtop->groups.grps[egcENER].nr);
2448 snew(fr->excl_load,fr->nthreads+1);
2450 if (fr->cutoff_scheme == ecutsVERLET)
2452 if (ir->rcoulomb != ir->rvdw)
2454 gmx_fatal(FARGS,"With Verlet lists rcoulomb and rvdw should be identical");
2457 init_nb_verlet(fp, &fr->nbv, ir, fr, cr, nbpu_opt);
2459 /* initialize interaction constants
2460 * TODO should be moved out during modularization.
2462 init_interaction_const(fp, &fr->ic, fr);
2465 if (ir->eDispCorr != edispcNO)
2467 calc_enervirdiff(fp,ir->eDispCorr,fr);
2471 #define pr_real(fp,r) fprintf(fp,"%s: %e\n",#r,r)
2472 #define pr_int(fp,i) fprintf((fp),"%s: %d\n",#i,i)
2473 #define pr_bool(fp,b) fprintf((fp),"%s: %s\n",#b,bool_names[b])
2475 void pr_forcerec(FILE *fp,t_forcerec *fr,t_commrec *cr)
2479 pr_real(fp,fr->rlist);
2480 pr_real(fp,fr->rcoulomb);
2481 pr_real(fp,fr->fudgeQQ);
2482 pr_bool(fp,fr->bGrid);
2483 pr_bool(fp,fr->bTwinRange);
2484 /*pr_int(fp,fr->cg0);
2485 pr_int(fp,fr->hcg);*/
2486 for(i=0; i<fr->nnblists; i++)
2487 pr_int(fp,fr->nblists[i].tab.n);
2488 pr_real(fp,fr->rcoulomb_switch);
2489 pr_real(fp,fr->rcoulomb);
2494 void forcerec_set_excl_load(t_forcerec *fr,
2495 const gmx_localtop_t *top,const t_commrec *cr)
2498 int t,i,j,ntot,n,ntarget;
2500 if (cr != NULL && PARTDECOMP(cr))
2502 /* No OpenMP with particle decomposition */
2510 ind = top->excls.index;
2514 for(i=0; i<top->excls.nr; i++)
2516 for(j=ind[i]; j<ind[i+1]; j++)
2525 fr->excl_load[0] = 0;
2528 for(t=1; t<=fr->nthreads; t++)
2530 ntarget = (ntot*t)/fr->nthreads;
2531 while(i < top->excls.nr && n < ntarget)
2533 for(j=ind[i]; j<ind[i+1]; j++)
2542 fr->excl_load[t] = i;