2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
51 #include "gromacs/domdec/domdec.h"
52 #include "gromacs/domdec/domdec_struct.h"
53 #include "gromacs/ewald/ewald.h"
54 #include "gromacs/fileio/filetypes.h"
55 #include "gromacs/gmxlib/md_logging.h"
56 #include "gromacs/gmxlib/network.h"
57 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
58 #include "gromacs/gpu_utils/gpu_utils.h"
59 #include "gromacs/hardware/detecthardware.h"
60 #include "gromacs/listed-forces/manage-threading.h"
61 #include "gromacs/listed-forces/pairs.h"
62 #include "gromacs/math/calculate-ewald-splitting-coefficient.h"
63 #include "gromacs/math/functions.h"
64 #include "gromacs/math/units.h"
65 #include "gromacs/math/utilities.h"
66 #include "gromacs/math/vec.h"
67 #include "gromacs/mdlib/force.h"
68 #include "gromacs/mdlib/forcerec-threading.h"
69 #include "gromacs/mdlib/gmx_omp_nthreads.h"
70 #include "gromacs/mdlib/md_support.h"
71 #include "gromacs/mdlib/nb_verlet.h"
72 #include "gromacs/mdlib/nbnxn_atomdata.h"
73 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
74 #include "gromacs/mdlib/nbnxn_search.h"
75 #include "gromacs/mdlib/nbnxn_simd.h"
76 #include "gromacs/mdlib/nbnxn_util.h"
77 #include "gromacs/mdlib/ns.h"
78 #include "gromacs/mdlib/qmmm.h"
79 #include "gromacs/mdlib/sim_util.h"
80 #include "gromacs/mdtypes/commrec.h"
81 #include "gromacs/mdtypes/fcdata.h"
82 #include "gromacs/mdtypes/group.h"
83 #include "gromacs/mdtypes/inputrec.h"
84 #include "gromacs/mdtypes/md_enums.h"
85 #include "gromacs/pbcutil/ishift.h"
86 #include "gromacs/pbcutil/pbc.h"
87 #include "gromacs/simd/simd.h"
88 #include "gromacs/tables/forcetable.h"
89 #include "gromacs/topology/mtop_util.h"
90 #include "gromacs/trajectory/trajectoryframe.h"
91 #include "gromacs/utility/cstringutil.h"
92 #include "gromacs/utility/fatalerror.h"
93 #include "gromacs/utility/gmxassert.h"
94 #include "gromacs/utility/pleasecite.h"
95 #include "gromacs/utility/smalloc.h"
96 #include "gromacs/utility/stringutil.h"
98 #include "nbnxn_gpu_jit_support.h"
100 const char *egrp_nm[egNR+1] = {
101 "Coul-SR", "LJ-SR", "Buck-SR",
102 "Coul-14", "LJ-14", NULL
105 t_forcerec *mk_forcerec(void)
115 static void pr_nbfp(FILE *fp, real *nbfp, gmx_bool bBHAM, int atnr)
119 for (i = 0; (i < atnr); i++)
121 for (j = 0; (j < atnr); j++)
123 fprintf(fp, "%2d - %2d", i, j);
126 fprintf(fp, " a=%10g, b=%10g, c=%10g\n", BHAMA(nbfp, atnr, i, j),
127 BHAMB(nbfp, atnr, i, j), BHAMC(nbfp, atnr, i, j)/6.0);
131 fprintf(fp, " c6=%10g, c12=%10g\n", C6(nbfp, atnr, i, j)/6.0,
132 C12(nbfp, atnr, i, j)/12.0);
139 static real *mk_nbfp(const gmx_ffparams_t *idef, gmx_bool bBHAM)
147 snew(nbfp, 3*atnr*atnr);
148 for (i = k = 0; (i < atnr); i++)
150 for (j = 0; (j < atnr); j++, k++)
152 BHAMA(nbfp, atnr, i, j) = idef->iparams[k].bham.a;
153 BHAMB(nbfp, atnr, i, j) = idef->iparams[k].bham.b;
154 /* nbfp now includes the 6.0 derivative prefactor */
155 BHAMC(nbfp, atnr, i, j) = idef->iparams[k].bham.c*6.0;
161 snew(nbfp, 2*atnr*atnr);
162 for (i = k = 0; (i < atnr); i++)
164 for (j = 0; (j < atnr); j++, k++)
166 /* nbfp now includes the 6.0/12.0 derivative prefactors */
167 C6(nbfp, atnr, i, j) = idef->iparams[k].lj.c6*6.0;
168 C12(nbfp, atnr, i, j) = idef->iparams[k].lj.c12*12.0;
176 static real *make_ljpme_c6grid(const gmx_ffparams_t *idef, t_forcerec *fr)
179 real c6, c6i, c6j, c12i, c12j, epsi, epsj, sigmai, sigmaj;
182 /* For LJ-PME simulations, we correct the energies with the reciprocal space
183 * inside of the cut-off. To do this the non-bonded kernels needs to have
184 * access to the C6-values used on the reciprocal grid in pme.c
188 snew(grid, 2*atnr*atnr);
189 for (i = k = 0; (i < atnr); i++)
191 for (j = 0; (j < atnr); j++, k++)
193 c6i = idef->iparams[i*(atnr+1)].lj.c6;
194 c12i = idef->iparams[i*(atnr+1)].lj.c12;
195 c6j = idef->iparams[j*(atnr+1)].lj.c6;
196 c12j = idef->iparams[j*(atnr+1)].lj.c12;
197 c6 = std::sqrt(c6i * c6j);
198 if (fr->ljpme_combination_rule == eljpmeLB
199 && !gmx_numzero(c6) && !gmx_numzero(c12i) && !gmx_numzero(c12j))
201 sigmai = gmx::sixthroot(c12i / c6i);
202 sigmaj = gmx::sixthroot(c12j / c6j);
203 epsi = c6i * c6i / c12i;
204 epsj = c6j * c6j / c12j;
205 c6 = std::sqrt(epsi * epsj) * gmx::power6(0.5*(sigmai+sigmaj));
207 /* Store the elements at the same relative positions as C6 in nbfp in order
208 * to simplify access in the kernels
210 grid[2*(atnr*i+j)] = c6*6.0;
216 static real *mk_nbfp_combination_rule(const gmx_ffparams_t *idef, int comb_rule)
220 real c6i, c6j, c12i, c12j, epsi, epsj, sigmai, sigmaj;
224 snew(nbfp, 2*atnr*atnr);
225 for (i = 0; i < atnr; ++i)
227 for (j = 0; j < atnr; ++j)
229 c6i = idef->iparams[i*(atnr+1)].lj.c6;
230 c12i = idef->iparams[i*(atnr+1)].lj.c12;
231 c6j = idef->iparams[j*(atnr+1)].lj.c6;
232 c12j = idef->iparams[j*(atnr+1)].lj.c12;
233 c6 = std::sqrt(c6i * c6j);
234 c12 = std::sqrt(c12i * c12j);
235 if (comb_rule == eCOMB_ARITHMETIC
236 && !gmx_numzero(c6) && !gmx_numzero(c12))
238 sigmai = gmx::sixthroot(c12i / c6i);
239 sigmaj = gmx::sixthroot(c12j / c6j);
240 epsi = c6i * c6i / c12i;
241 epsj = c6j * c6j / c12j;
242 c6 = std::sqrt(epsi * epsj) * gmx::power6(0.5*(sigmai+sigmaj));
243 c12 = std::sqrt(epsi * epsj) * gmx::power12(0.5*(sigmai+sigmaj));
245 C6(nbfp, atnr, i, j) = c6*6.0;
246 C12(nbfp, atnr, i, j) = c12*12.0;
252 /* This routine sets fr->solvent_opt to the most common solvent in the
253 * system, e.g. esolSPC or esolTIP4P. It will also mark each charge group in
254 * the fr->solvent_type array with the correct type (or esolNO).
256 * Charge groups that fulfill the conditions but are not identical to the
257 * most common one will be marked as esolNO in the solvent_type array.
259 * TIP3p is identical to SPC for these purposes, so we call it
260 * SPC in the arrays (Apologies to Bill Jorgensen ;-)
262 * NOTE: QM particle should not
263 * become an optimized solvent. Not even if there is only one charge
273 } solvent_parameters_t;
276 check_solvent_cg(const gmx_moltype_t *molt,
279 const unsigned char *qm_grpnr,
280 const t_grps *qm_grps,
282 int *n_solvent_parameters,
283 solvent_parameters_t **solvent_parameters_p,
293 real tmp_charge[4] = { 0.0 }; /* init to zero to make gcc4.8 happy */
294 int tmp_vdwtype[4] = { 0 }; /* init to zero to make gcc4.8 happy */
297 solvent_parameters_t *solvent_parameters;
299 /* We use a list with parameters for each solvent type.
300 * Every time we discover a new molecule that fulfills the basic
301 * conditions for a solvent we compare with the previous entries
302 * in these lists. If the parameters are the same we just increment
303 * the counter for that type, and otherwise we create a new type
304 * based on the current molecule.
306 * Once we've finished going through all molecules we check which
307 * solvent is most common, and mark all those molecules while we
308 * clear the flag on all others.
311 solvent_parameters = *solvent_parameters_p;
313 /* Mark the cg first as non optimized */
316 /* Check if this cg has no exclusions with atoms in other charge groups
317 * and all atoms inside the charge group excluded.
318 * We only have 3 or 4 atom solvent loops.
320 if (GET_CGINFO_EXCL_INTER(cginfo) ||
321 !GET_CGINFO_EXCL_INTRA(cginfo))
326 /* Get the indices of the first atom in this charge group */
327 j0 = molt->cgs.index[cg0];
328 j1 = molt->cgs.index[cg0+1];
330 /* Number of atoms in our molecule */
336 "Moltype '%s': there are %d atoms in this charge group\n",
340 /* Check if it could be an SPC (3 atoms) or TIP4p (4) water,
343 if (nj < 3 || nj > 4)
348 /* Check if we are doing QM on this group */
350 if (qm_grpnr != NULL)
352 for (j = j0; j < j1 && !qm; j++)
354 qm = (qm_grpnr[j] < qm_grps->nr - 1);
357 /* Cannot use solvent optimization with QM */
363 atom = molt->atoms.atom;
365 /* Still looks like a solvent, time to check parameters */
367 /* If it is perturbed (free energy) we can't use the solvent loops,
368 * so then we just skip to the next molecule.
372 for (j = j0; j < j1 && !perturbed; j++)
374 perturbed = PERTURBED(atom[j]);
382 /* Now it's only a question if the VdW and charge parameters
383 * are OK. Before doing the check we compare and see if they are
384 * identical to a possible previous solvent type.
385 * First we assign the current types and charges.
387 for (j = 0; j < nj; j++)
389 tmp_vdwtype[j] = atom[j0+j].type;
390 tmp_charge[j] = atom[j0+j].q;
393 /* Does it match any previous solvent type? */
394 for (k = 0; k < *n_solvent_parameters; k++)
399 /* We can only match SPC with 3 atoms and TIP4p with 4 atoms */
400 if ( (solvent_parameters[k].model == esolSPC && nj != 3) ||
401 (solvent_parameters[k].model == esolTIP4P && nj != 4) )
406 /* Check that types & charges match for all atoms in molecule */
407 for (j = 0; j < nj && match == TRUE; j++)
409 if (tmp_vdwtype[j] != solvent_parameters[k].vdwtype[j])
413 if (tmp_charge[j] != solvent_parameters[k].charge[j])
420 /* Congratulations! We have a matched solvent.
421 * Flag it with this type for later processing.
424 solvent_parameters[k].count += nmol;
426 /* We are done with this charge group */
431 /* If we get here, we have a tentative new solvent type.
432 * Before we add it we must check that it fulfills the requirements
433 * of the solvent optimized loops. First determine which atoms have
436 for (j = 0; j < nj; j++)
439 tjA = tmp_vdwtype[j];
441 /* Go through all other tpes and see if any have non-zero
442 * VdW parameters when combined with this one.
444 for (k = 0; k < fr->ntype && (has_vdw[j] == FALSE); k++)
446 /* We already checked that the atoms weren't perturbed,
447 * so we only need to check state A now.
451 has_vdw[j] = (has_vdw[j] ||
452 (BHAMA(fr->nbfp, fr->ntype, tjA, k) != 0.0) ||
453 (BHAMB(fr->nbfp, fr->ntype, tjA, k) != 0.0) ||
454 (BHAMC(fr->nbfp, fr->ntype, tjA, k) != 0.0));
459 has_vdw[j] = (has_vdw[j] ||
460 (C6(fr->nbfp, fr->ntype, tjA, k) != 0.0) ||
461 (C12(fr->nbfp, fr->ntype, tjA, k) != 0.0));
466 /* Now we know all we need to make the final check and assignment. */
470 * For this we require thatn all atoms have charge,
471 * the charges on atom 2 & 3 should be the same, and only
472 * atom 1 might have VdW.
474 if (has_vdw[1] == FALSE &&
475 has_vdw[2] == FALSE &&
476 tmp_charge[0] != 0 &&
477 tmp_charge[1] != 0 &&
478 tmp_charge[2] == tmp_charge[1])
480 srenew(solvent_parameters, *n_solvent_parameters+1);
481 solvent_parameters[*n_solvent_parameters].model = esolSPC;
482 solvent_parameters[*n_solvent_parameters].count = nmol;
483 for (k = 0; k < 3; k++)
485 solvent_parameters[*n_solvent_parameters].vdwtype[k] = tmp_vdwtype[k];
486 solvent_parameters[*n_solvent_parameters].charge[k] = tmp_charge[k];
489 *cg_sp = *n_solvent_parameters;
490 (*n_solvent_parameters)++;
495 /* Or could it be a TIP4P?
496 * For this we require thatn atoms 2,3,4 have charge, but not atom 1.
497 * Only atom 1 mght have VdW.
499 if (has_vdw[1] == FALSE &&
500 has_vdw[2] == FALSE &&
501 has_vdw[3] == FALSE &&
502 tmp_charge[0] == 0 &&
503 tmp_charge[1] != 0 &&
504 tmp_charge[2] == tmp_charge[1] &&
507 srenew(solvent_parameters, *n_solvent_parameters+1);
508 solvent_parameters[*n_solvent_parameters].model = esolTIP4P;
509 solvent_parameters[*n_solvent_parameters].count = nmol;
510 for (k = 0; k < 4; k++)
512 solvent_parameters[*n_solvent_parameters].vdwtype[k] = tmp_vdwtype[k];
513 solvent_parameters[*n_solvent_parameters].charge[k] = tmp_charge[k];
516 *cg_sp = *n_solvent_parameters;
517 (*n_solvent_parameters)++;
521 *solvent_parameters_p = solvent_parameters;
525 check_solvent(FILE * fp,
526 const gmx_mtop_t * mtop,
528 cginfo_mb_t *cginfo_mb)
531 const gmx_moltype_t *molt;
532 int mb, mol, cg_mol, at_offset, am, cgm, i, nmol_ch, nmol;
533 int n_solvent_parameters;
534 solvent_parameters_t *solvent_parameters;
540 fprintf(debug, "Going to determine what solvent types we have.\n");
543 n_solvent_parameters = 0;
544 solvent_parameters = NULL;
545 /* Allocate temporary array for solvent type */
546 snew(cg_sp, mtop->nmolblock);
549 for (mb = 0; mb < mtop->nmolblock; mb++)
551 molt = &mtop->moltype[mtop->molblock[mb].type];
553 /* Here we have to loop over all individual molecules
554 * because we need to check for QMMM particles.
556 snew(cg_sp[mb], cginfo_mb[mb].cg_mod);
557 nmol_ch = cginfo_mb[mb].cg_mod/cgs->nr;
558 nmol = mtop->molblock[mb].nmol/nmol_ch;
559 for (mol = 0; mol < nmol_ch; mol++)
562 am = mol*cgs->index[cgs->nr];
563 for (cg_mol = 0; cg_mol < cgs->nr; cg_mol++)
565 check_solvent_cg(molt, cg_mol, nmol,
566 mtop->groups.grpnr[egcQMMM] ?
567 mtop->groups.grpnr[egcQMMM]+at_offset+am : 0,
568 &mtop->groups.grps[egcQMMM],
570 &n_solvent_parameters, &solvent_parameters,
571 cginfo_mb[mb].cginfo[cgm+cg_mol],
572 &cg_sp[mb][cgm+cg_mol]);
575 at_offset += cgs->index[cgs->nr];
578 /* Puh! We finished going through all charge groups.
579 * Now find the most common solvent model.
582 /* Most common solvent this far */
584 for (i = 0; i < n_solvent_parameters; i++)
587 solvent_parameters[i].count > solvent_parameters[bestsp].count)
595 bestsol = solvent_parameters[bestsp].model;
603 for (mb = 0; mb < mtop->nmolblock; mb++)
605 cgs = &mtop->moltype[mtop->molblock[mb].type].cgs;
606 nmol = (mtop->molblock[mb].nmol*cgs->nr)/cginfo_mb[mb].cg_mod;
607 for (i = 0; i < cginfo_mb[mb].cg_mod; i++)
609 if (cg_sp[mb][i] == bestsp)
611 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[i], bestsol);
616 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[i], esolNO);
623 if (bestsol != esolNO && fp != NULL)
625 fprintf(fp, "\nEnabling %s-like water optimization for %d molecules.\n\n",
627 solvent_parameters[bestsp].count);
630 sfree(solvent_parameters);
631 fr->solvent_opt = bestsol;
635 acNONE = 0, acCONSTRAINT, acSETTLE
638 static cginfo_mb_t *init_cginfo_mb(FILE *fplog, const gmx_mtop_t *mtop,
639 t_forcerec *fr, gmx_bool bNoSolvOpt,
640 gmx_bool *bFEP_NonBonded,
641 gmx_bool *bExcl_IntraCGAll_InterCGNone)
644 const t_blocka *excl;
645 const gmx_moltype_t *molt;
646 const gmx_molblock_t *molb;
647 cginfo_mb_t *cginfo_mb;
650 int cg_offset, a_offset;
651 int mb, m, cg, a0, a1, gid, ai, j, aj, excl_nalloc;
655 gmx_bool bId, *bExcl, bExclIntraAll, bExclInter, bHaveVDW, bHaveQ, bHavePerturbedAtoms;
657 snew(cginfo_mb, mtop->nmolblock);
659 snew(type_VDW, fr->ntype);
660 for (ai = 0; ai < fr->ntype; ai++)
662 type_VDW[ai] = FALSE;
663 for (j = 0; j < fr->ntype; j++)
665 type_VDW[ai] = type_VDW[ai] ||
667 C6(fr->nbfp, fr->ntype, ai, j) != 0 ||
668 C12(fr->nbfp, fr->ntype, ai, j) != 0;
672 *bFEP_NonBonded = FALSE;
673 *bExcl_IntraCGAll_InterCGNone = TRUE;
676 snew(bExcl, excl_nalloc);
679 for (mb = 0; mb < mtop->nmolblock; mb++)
681 molb = &mtop->molblock[mb];
682 molt = &mtop->moltype[molb->type];
686 /* Check if the cginfo is identical for all molecules in this block.
687 * If so, we only need an array of the size of one molecule.
688 * Otherwise we make an array of #mol times #cgs per molecule.
691 for (m = 0; m < molb->nmol; m++)
693 int am = m*cgs->index[cgs->nr];
694 for (cg = 0; cg < cgs->nr; cg++)
697 a1 = cgs->index[cg+1];
698 if (ggrpnr(&mtop->groups, egcENER, a_offset+am+a0) !=
699 ggrpnr(&mtop->groups, egcENER, a_offset +a0))
703 if (mtop->groups.grpnr[egcQMMM] != NULL)
705 for (ai = a0; ai < a1; ai++)
707 if (mtop->groups.grpnr[egcQMMM][a_offset+am+ai] !=
708 mtop->groups.grpnr[egcQMMM][a_offset +ai])
717 cginfo_mb[mb].cg_start = cg_offset;
718 cginfo_mb[mb].cg_end = cg_offset + molb->nmol*cgs->nr;
719 cginfo_mb[mb].cg_mod = (bId ? 1 : molb->nmol)*cgs->nr;
720 snew(cginfo_mb[mb].cginfo, cginfo_mb[mb].cg_mod);
721 cginfo = cginfo_mb[mb].cginfo;
723 /* Set constraints flags for constrained atoms */
724 snew(a_con, molt->atoms.nr);
725 for (ftype = 0; ftype < F_NRE; ftype++)
727 if (interaction_function[ftype].flags & IF_CONSTRAINT)
732 for (ia = 0; ia < molt->ilist[ftype].nr; ia += 1+nral)
736 for (a = 0; a < nral; a++)
738 a_con[molt->ilist[ftype].iatoms[ia+1+a]] =
739 (ftype == F_SETTLE ? acSETTLE : acCONSTRAINT);
745 for (m = 0; m < (bId ? 1 : molb->nmol); m++)
748 int am = m*cgs->index[cgs->nr];
749 for (cg = 0; cg < cgs->nr; cg++)
752 a1 = cgs->index[cg+1];
754 /* Store the energy group in cginfo */
755 gid = ggrpnr(&mtop->groups, egcENER, a_offset+am+a0);
756 SET_CGINFO_GID(cginfo[cgm+cg], gid);
758 /* Check the intra/inter charge group exclusions */
759 if (a1-a0 > excl_nalloc)
761 excl_nalloc = a1 - a0;
762 srenew(bExcl, excl_nalloc);
764 /* bExclIntraAll: all intra cg interactions excluded
765 * bExclInter: any inter cg interactions excluded
767 bExclIntraAll = TRUE;
771 bHavePerturbedAtoms = FALSE;
772 for (ai = a0; ai < a1; ai++)
774 /* Check VDW and electrostatic interactions */
775 bHaveVDW = bHaveVDW || (type_VDW[molt->atoms.atom[ai].type] ||
776 type_VDW[molt->atoms.atom[ai].typeB]);
777 bHaveQ = bHaveQ || (molt->atoms.atom[ai].q != 0 ||
778 molt->atoms.atom[ai].qB != 0);
780 bHavePerturbedAtoms = bHavePerturbedAtoms || (PERTURBED(molt->atoms.atom[ai]) != 0);
782 /* Clear the exclusion list for atom ai */
783 for (aj = a0; aj < a1; aj++)
785 bExcl[aj-a0] = FALSE;
787 /* Loop over all the exclusions of atom ai */
788 for (j = excl->index[ai]; j < excl->index[ai+1]; j++)
791 if (aj < a0 || aj >= a1)
800 /* Check if ai excludes a0 to a1 */
801 for (aj = a0; aj < a1; aj++)
805 bExclIntraAll = FALSE;
812 SET_CGINFO_CONSTR(cginfo[cgm+cg]);
815 SET_CGINFO_SETTLE(cginfo[cgm+cg]);
823 SET_CGINFO_EXCL_INTRA(cginfo[cgm+cg]);
827 SET_CGINFO_EXCL_INTER(cginfo[cgm+cg]);
829 if (a1 - a0 > MAX_CHARGEGROUP_SIZE)
831 /* The size in cginfo is currently only read with DD */
832 gmx_fatal(FARGS, "A charge group has size %d which is larger than the limit of %d atoms", a1-a0, MAX_CHARGEGROUP_SIZE);
836 SET_CGINFO_HAS_VDW(cginfo[cgm+cg]);
840 SET_CGINFO_HAS_Q(cginfo[cgm+cg]);
842 if (bHavePerturbedAtoms && fr->efep != efepNO)
844 SET_CGINFO_FEP(cginfo[cgm+cg]);
845 *bFEP_NonBonded = TRUE;
847 /* Store the charge group size */
848 SET_CGINFO_NATOMS(cginfo[cgm+cg], a1-a0);
850 if (!bExclIntraAll || bExclInter)
852 *bExcl_IntraCGAll_InterCGNone = FALSE;
859 cg_offset += molb->nmol*cgs->nr;
860 a_offset += molb->nmol*cgs->index[cgs->nr];
864 /* the solvent optimizer is called after the QM is initialized,
865 * because we don't want to have the QM subsystemto become an
869 check_solvent(fplog, mtop, fr, cginfo_mb);
871 if (getenv("GMX_NO_SOLV_OPT"))
875 fprintf(fplog, "Found environment variable GMX_NO_SOLV_OPT.\n"
876 "Disabling all solvent optimization\n");
878 fr->solvent_opt = esolNO;
882 fr->solvent_opt = esolNO;
884 if (!fr->solvent_opt)
886 for (mb = 0; mb < mtop->nmolblock; mb++)
888 for (cg = 0; cg < cginfo_mb[mb].cg_mod; cg++)
890 SET_CGINFO_SOLOPT(cginfo_mb[mb].cginfo[cg], esolNO);
898 static int *cginfo_expand(int nmb, cginfo_mb_t *cgi_mb)
903 ncg = cgi_mb[nmb-1].cg_end;
906 for (cg = 0; cg < ncg; cg++)
908 while (cg >= cgi_mb[mb].cg_end)
913 cgi_mb[mb].cginfo[(cg - cgi_mb[mb].cg_start) % cgi_mb[mb].cg_mod];
919 static void set_chargesum(FILE *log, t_forcerec *fr, const gmx_mtop_t *mtop)
921 /*This now calculates sum for q and c6*/
922 double qsum, q2sum, q, c6sum, c6;
924 const t_atoms *atoms;
929 for (mb = 0; mb < mtop->nmolblock; mb++)
931 nmol = mtop->molblock[mb].nmol;
932 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
933 for (i = 0; i < atoms->nr; i++)
935 q = atoms->atom[i].q;
938 c6 = mtop->ffparams.iparams[atoms->atom[i].type*(mtop->ffparams.atnr+1)].lj.c6;
943 fr->q2sum[0] = q2sum;
944 fr->c6sum[0] = c6sum;
946 if (fr->efep != efepNO)
951 for (mb = 0; mb < mtop->nmolblock; mb++)
953 nmol = mtop->molblock[mb].nmol;
954 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
955 for (i = 0; i < atoms->nr; i++)
957 q = atoms->atom[i].qB;
960 c6 = mtop->ffparams.iparams[atoms->atom[i].typeB*(mtop->ffparams.atnr+1)].lj.c6;
964 fr->q2sum[1] = q2sum;
965 fr->c6sum[1] = c6sum;
970 fr->qsum[1] = fr->qsum[0];
971 fr->q2sum[1] = fr->q2sum[0];
972 fr->c6sum[1] = fr->c6sum[0];
976 if (fr->efep == efepNO)
978 fprintf(log, "System total charge: %.3f\n", fr->qsum[0]);
982 fprintf(log, "System total charge, top. A: %.3f top. B: %.3f\n",
983 fr->qsum[0], fr->qsum[1]);
988 void update_forcerec(t_forcerec *fr, matrix box)
990 if (fr->eeltype == eelGRF)
992 calc_rffac(NULL, fr->eeltype, fr->epsilon_r, fr->epsilon_rf,
993 fr->rcoulomb, fr->temp, fr->zsquare, box,
994 &fr->kappa, &fr->k_rf, &fr->c_rf);
998 void set_avcsixtwelve(FILE *fplog, t_forcerec *fr, const gmx_mtop_t *mtop)
1000 const t_atoms *atoms, *atoms_tpi;
1001 const t_blocka *excl;
1002 int mb, nmol, nmolc, i, j, tpi, tpj, j1, j2, k, nexcl, q;
1003 gmx_int64_t npair, npair_ij, tmpi, tmpj;
1004 double csix, ctwelve;
1005 int ntp, *typecount;
1008 real *nbfp_comb = NULL;
1014 /* For LJ-PME, we want to correct for the difference between the
1015 * actual C6 values and the C6 values used by the LJ-PME based on
1016 * combination rules. */
1018 if (EVDW_PME(fr->vdwtype))
1020 nbfp_comb = mk_nbfp_combination_rule(&mtop->ffparams,
1021 (fr->ljpme_combination_rule == eljpmeLB) ? eCOMB_ARITHMETIC : eCOMB_GEOMETRIC);
1022 for (tpi = 0; tpi < ntp; ++tpi)
1024 for (tpj = 0; tpj < ntp; ++tpj)
1026 C6(nbfp_comb, ntp, tpi, tpj) =
1027 C6(nbfp, ntp, tpi, tpj) - C6(nbfp_comb, ntp, tpi, tpj);
1028 C12(nbfp_comb, ntp, tpi, tpj) = C12(nbfp, ntp, tpi, tpj);
1033 for (q = 0; q < (fr->efep == efepNO ? 1 : 2); q++)
1041 /* Count the types so we avoid natoms^2 operations */
1042 snew(typecount, ntp);
1043 gmx_mtop_count_atomtypes(mtop, q, typecount);
1045 for (tpi = 0; tpi < ntp; tpi++)
1047 for (tpj = tpi; tpj < ntp; tpj++)
1049 tmpi = typecount[tpi];
1050 tmpj = typecount[tpj];
1053 npair_ij = tmpi*tmpj;
1057 npair_ij = tmpi*(tmpi - 1)/2;
1061 /* nbfp now includes the 6.0 derivative prefactor */
1062 csix += npair_ij*BHAMC(nbfp, ntp, tpi, tpj)/6.0;
1066 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1067 csix += npair_ij* C6(nbfp, ntp, tpi, tpj)/6.0;
1068 ctwelve += npair_ij* C12(nbfp, ntp, tpi, tpj)/12.0;
1074 /* Subtract the excluded pairs.
1075 * The main reason for substracting exclusions is that in some cases
1076 * some combinations might never occur and the parameters could have
1077 * any value. These unused values should not influence the dispersion
1080 for (mb = 0; mb < mtop->nmolblock; mb++)
1082 nmol = mtop->molblock[mb].nmol;
1083 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
1084 excl = &mtop->moltype[mtop->molblock[mb].type].excls;
1085 for (i = 0; (i < atoms->nr); i++)
1089 tpi = atoms->atom[i].type;
1093 tpi = atoms->atom[i].typeB;
1095 j1 = excl->index[i];
1096 j2 = excl->index[i+1];
1097 for (j = j1; j < j2; j++)
1104 tpj = atoms->atom[k].type;
1108 tpj = atoms->atom[k].typeB;
1112 /* nbfp now includes the 6.0 derivative prefactor */
1113 csix -= nmol*BHAMC(nbfp, ntp, tpi, tpj)/6.0;
1117 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1118 csix -= nmol*C6 (nbfp, ntp, tpi, tpj)/6.0;
1119 ctwelve -= nmol*C12(nbfp, ntp, tpi, tpj)/12.0;
1129 /* Only correct for the interaction of the test particle
1130 * with the rest of the system.
1133 &mtop->moltype[mtop->molblock[mtop->nmolblock-1].type].atoms;
1136 for (mb = 0; mb < mtop->nmolblock; mb++)
1138 nmol = mtop->molblock[mb].nmol;
1139 atoms = &mtop->moltype[mtop->molblock[mb].type].atoms;
1140 for (j = 0; j < atoms->nr; j++)
1143 /* Remove the interaction of the test charge group
1146 if (mb == mtop->nmolblock-1)
1150 if (mb == 0 && nmol == 1)
1152 gmx_fatal(FARGS, "Old format tpr with TPI, please generate a new tpr file");
1157 tpj = atoms->atom[j].type;
1161 tpj = atoms->atom[j].typeB;
1163 for (i = 0; i < fr->n_tpi; i++)
1167 tpi = atoms_tpi->atom[i].type;
1171 tpi = atoms_tpi->atom[i].typeB;
1175 /* nbfp now includes the 6.0 derivative prefactor */
1176 csix += nmolc*BHAMC(nbfp, ntp, tpi, tpj)/6.0;
1180 /* nbfp now includes the 6.0/12.0 derivative prefactors */
1181 csix += nmolc*C6 (nbfp, ntp, tpi, tpj)/6.0;
1182 ctwelve += nmolc*C12(nbfp, ntp, tpi, tpj)/12.0;
1189 if (npair - nexcl <= 0 && fplog)
1191 fprintf(fplog, "\nWARNING: There are no atom pairs for dispersion correction\n\n");
1197 csix /= npair - nexcl;
1198 ctwelve /= npair - nexcl;
1202 fprintf(debug, "Counted %d exclusions\n", nexcl);
1203 fprintf(debug, "Average C6 parameter is: %10g\n", (double)csix);
1204 fprintf(debug, "Average C12 parameter is: %10g\n", (double)ctwelve);
1206 fr->avcsix[q] = csix;
1207 fr->avctwelve[q] = ctwelve;
1210 if (EVDW_PME(fr->vdwtype))
1217 if (fr->eDispCorr == edispcAllEner ||
1218 fr->eDispCorr == edispcAllEnerPres)
1220 fprintf(fplog, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
1221 fr->avcsix[0], fr->avctwelve[0]);
1225 fprintf(fplog, "Long Range LJ corr.: <C6> %10.4e\n", fr->avcsix[0]);
1231 static void set_bham_b_max(FILE *fplog, t_forcerec *fr,
1232 const gmx_mtop_t *mtop)
1234 const t_atoms *at1, *at2;
1235 int mt1, mt2, i, j, tpi, tpj, ntypes;
1241 fprintf(fplog, "Determining largest Buckingham b parameter for table\n");
1248 for (mt1 = 0; mt1 < mtop->nmoltype; mt1++)
1250 at1 = &mtop->moltype[mt1].atoms;
1251 for (i = 0; (i < at1->nr); i++)
1253 tpi = at1->atom[i].type;
1256 gmx_fatal(FARGS, "Atomtype[%d] = %d, maximum = %d", i, tpi, ntypes);
1259 for (mt2 = mt1; mt2 < mtop->nmoltype; mt2++)
1261 at2 = &mtop->moltype[mt2].atoms;
1262 for (j = 0; (j < at2->nr); j++)
1264 tpj = at2->atom[j].type;
1267 gmx_fatal(FARGS, "Atomtype[%d] = %d, maximum = %d", j, tpj, ntypes);
1269 b = BHAMB(nbfp, ntypes, tpi, tpj);
1270 if (b > fr->bham_b_max)
1274 if ((b < bmin) || (bmin == -1))
1284 fprintf(fplog, "Buckingham b parameters, min: %g, max: %g\n",
1285 bmin, fr->bham_b_max);
1289 static void make_nbf_tables(FILE *fp,
1290 t_forcerec *fr, real rtab,
1291 const char *tabfn, char *eg1, char *eg2,
1301 fprintf(debug, "No table file name passed, can not read table, can not do non-bonded interactions\n");
1306 sprintf(buf, "%s", tabfn);
1309 /* Append the two energy group names */
1310 sprintf(buf + strlen(tabfn) - strlen(ftp2ext(efXVG)) - 1, "_%s_%s.%s",
1311 eg1, eg2, ftp2ext(efXVG));
1313 nbl->table_elec_vdw = make_tables(fp, fr, buf, rtab, 0);
1314 /* Copy the contents of the table to separate coulomb and LJ tables too,
1315 * to improve cache performance.
1317 /* For performance reasons we want
1318 * the table data to be aligned to 16-byte. The pointers could be freed
1319 * but currently aren't.
1321 snew(nbl->table_elec, 1);
1322 nbl->table_elec->interaction = GMX_TABLE_INTERACTION_ELEC;
1323 nbl->table_elec->format = nbl->table_elec_vdw->format;
1324 nbl->table_elec->r = nbl->table_elec_vdw->r;
1325 nbl->table_elec->n = nbl->table_elec_vdw->n;
1326 nbl->table_elec->scale = nbl->table_elec_vdw->scale;
1327 nbl->table_elec->formatsize = nbl->table_elec_vdw->formatsize;
1328 nbl->table_elec->ninteractions = 1;
1329 nbl->table_elec->stride = nbl->table_elec->formatsize * nbl->table_elec->ninteractions;
1330 snew_aligned(nbl->table_elec->data, nbl->table_elec->stride*(nbl->table_elec->n+1), 32);
1332 snew(nbl->table_vdw, 1);
1333 nbl->table_vdw->interaction = GMX_TABLE_INTERACTION_VDWREP_VDWDISP;
1334 nbl->table_vdw->format = nbl->table_elec_vdw->format;
1335 nbl->table_vdw->r = nbl->table_elec_vdw->r;
1336 nbl->table_vdw->n = nbl->table_elec_vdw->n;
1337 nbl->table_vdw->scale = nbl->table_elec_vdw->scale;
1338 nbl->table_vdw->formatsize = nbl->table_elec_vdw->formatsize;
1339 nbl->table_vdw->ninteractions = 2;
1340 nbl->table_vdw->stride = nbl->table_vdw->formatsize * nbl->table_vdw->ninteractions;
1341 snew_aligned(nbl->table_vdw->data, nbl->table_vdw->stride*(nbl->table_vdw->n+1), 32);
1343 for (i = 0; i <= nbl->table_elec_vdw->n; i++)
1345 for (j = 0; j < 4; j++)
1347 nbl->table_elec->data[4*i+j] = nbl->table_elec_vdw->data[12*i+j];
1349 for (j = 0; j < 8; j++)
1351 nbl->table_vdw->data[8*i+j] = nbl->table_elec_vdw->data[12*i+4+j];
1356 static void count_tables(int ftype1, int ftype2, const gmx_mtop_t *mtop,
1357 int *ncount, int **count)
1359 const gmx_moltype_t *molt;
1361 int mt, ftype, stride, i, j, tabnr;
1363 for (mt = 0; mt < mtop->nmoltype; mt++)
1365 molt = &mtop->moltype[mt];
1366 for (ftype = 0; ftype < F_NRE; ftype++)
1368 if (ftype == ftype1 || ftype == ftype2)
1370 il = &molt->ilist[ftype];
1371 stride = 1 + NRAL(ftype);
1372 for (i = 0; i < il->nr; i += stride)
1374 tabnr = mtop->ffparams.iparams[il->iatoms[i]].tab.table;
1377 gmx_fatal(FARGS, "A bonded table number is smaller than 0: %d\n", tabnr);
1379 if (tabnr >= *ncount)
1381 srenew(*count, tabnr+1);
1382 for (j = *ncount; j < tabnr+1; j++)
1395 static bondedtable_t *make_bonded_tables(FILE *fplog,
1396 int ftype1, int ftype2,
1397 const gmx_mtop_t *mtop,
1398 const char *basefn, const char *tabext)
1400 int i, ncount, *count;
1408 count_tables(ftype1, ftype2, mtop, &ncount, &count);
1413 for (i = 0; i < ncount; i++)
1417 sprintf(tabfn, "%s", basefn);
1418 sprintf(tabfn + strlen(basefn) - strlen(ftp2ext(efXVG)) - 1, "_%s%d.%s",
1419 tabext, i, ftp2ext(efXVG));
1420 tab[i] = make_bonded_table(fplog, tabfn, NRAL(ftype1)-2);
1429 void forcerec_set_ranges(t_forcerec *fr,
1430 int ncg_home, int ncg_force,
1432 int natoms_force_constr, int natoms_f_novirsum)
1437 /* fr->ncg_force is unused in the standard code,
1438 * but it can be useful for modified code dealing with charge groups.
1440 fr->ncg_force = ncg_force;
1441 fr->natoms_force = natoms_force;
1442 fr->natoms_force_constr = natoms_force_constr;
1444 if (fr->natoms_force_constr > fr->nalloc_force)
1446 fr->nalloc_force = over_alloc_dd(fr->natoms_force_constr);
1449 if (fr->bF_NoVirSum)
1451 fr->f_novirsum_n = natoms_f_novirsum;
1452 if (fr->f_novirsum_n > fr->f_novirsum_nalloc)
1454 fr->f_novirsum_nalloc = over_alloc_dd(fr->f_novirsum_n);
1455 srenew(fr->f_novirsum_alloc, fr->f_novirsum_nalloc);
1460 fr->f_novirsum_n = 0;
1464 static real cutoff_inf(real cutoff)
1468 cutoff = GMX_CUTOFF_INF;
1474 gmx_bool can_use_allvsall(const t_inputrec *ir, gmx_bool bPrintNote, t_commrec *cr, FILE *fp)
1481 ir->rcoulomb == 0 &&
1483 ir->ePBC == epbcNONE &&
1484 ir->vdwtype == evdwCUT &&
1485 ir->coulombtype == eelCUT &&
1486 ir->efep == efepNO &&
1487 (ir->implicit_solvent == eisNO ||
1488 (ir->implicit_solvent == eisGBSA && (ir->gb_algorithm == egbSTILL ||
1489 ir->gb_algorithm == egbHCT ||
1490 ir->gb_algorithm == egbOBC))) &&
1491 getenv("GMX_NO_ALLVSALL") == NULL
1494 if (bAllvsAll && ir->opts.ngener > 1)
1496 const char *note = "NOTE: Can not use all-vs-all force loops, because there are multiple energy monitor groups; you might get significantly higher performance when using only a single energy monitor group.\n";
1502 fprintf(stderr, "\n%s\n", note);
1506 fprintf(fp, "\n%s\n", note);
1512 if (bAllvsAll && fp && MASTER(cr))
1514 fprintf(fp, "\nUsing SIMD all-vs-all kernels.\n\n");
1521 gmx_bool nbnxn_gpu_acceleration_supported(FILE *fplog,
1522 const t_commrec *cr,
1523 const t_inputrec *ir,
1526 if (bRerunMD && ir->opts.ngener > 1)
1528 /* Rerun execution time is dominated by I/O and pair search,
1529 * so GPUs are not very useful, plus they do not support more
1530 * than one energy group. If the user requested GPUs
1531 * explicitly, a fatal error is given later. With non-reruns,
1532 * we fall back to a single whole-of system energy group
1533 * (which runs much faster than a multiple-energy-groups
1534 * implementation would), and issue a note in the .log
1535 * file. Users can re-run if they want the information. */
1536 md_print_warn(cr, fplog, "Rerun with energy groups is not implemented for GPUs, falling back to the CPU\n");
1543 gmx_bool nbnxn_simd_supported(FILE *fplog,
1544 const t_commrec *cr,
1545 const t_inputrec *ir)
1547 if (ir->vdwtype == evdwPME && ir->ljpme_combination_rule == eljpmeLB)
1549 /* LJ PME with LB combination rule does 7 mesh operations.
1550 * This so slow that we don't compile SIMD non-bonded kernels
1552 md_print_warn(cr, fplog, "LJ-PME with Lorentz-Berthelot is not supported with SIMD kernels, falling back to plain C kernels\n");
1560 static void pick_nbnxn_kernel_cpu(const t_inputrec gmx_unused *ir,
1564 *kernel_type = nbnxnk4x4_PlainC;
1565 *ewald_excl = ewaldexclTable;
1569 #ifdef GMX_NBNXN_SIMD_4XN
1570 *kernel_type = nbnxnk4xN_SIMD_4xN;
1572 #ifdef GMX_NBNXN_SIMD_2XNN
1573 *kernel_type = nbnxnk4xN_SIMD_2xNN;
1576 #if defined GMX_NBNXN_SIMD_2XNN && defined GMX_NBNXN_SIMD_4XN
1577 /* We need to choose if we want 2x(N+N) or 4xN kernels.
1578 * Currently this is based on the SIMD acceleration choice,
1579 * but it might be better to decide this at runtime based on CPU.
1581 * 4xN calculates more (zero) interactions, but has less pair-search
1582 * work and much better kernel instruction scheduling.
1584 * Up till now we have only seen that on Intel Sandy/Ivy Bridge,
1585 * which doesn't have FMA, both the analytical and tabulated Ewald
1586 * kernels have similar pair rates for 4x8 and 2x(4+4), so we choose
1587 * 2x(4+4) because it results in significantly fewer pairs.
1588 * For RF, the raw pair rate of the 4x8 kernel is higher than 2x(4+4),
1589 * 10% with HT, 50% without HT. As we currently don't detect the actual
1590 * use of HT, use 4x8 to avoid a potential performance hit.
1591 * On Intel Haswell 4x8 is always faster.
1593 *kernel_type = nbnxnk4xN_SIMD_4xN;
1595 #if !GMX_SIMD_HAVE_FMA
1596 if (EEL_PME_EWALD(ir->coulombtype) ||
1597 EVDW_PME(ir->vdwtype))
1599 /* We have Ewald kernels without FMA (Intel Sandy/Ivy Bridge).
1600 * There are enough instructions to make 2x(4+4) efficient.
1602 *kernel_type = nbnxnk4xN_SIMD_2xNN;
1605 #endif /* GMX_NBNXN_SIMD_2XNN && GMX_NBNXN_SIMD_4XN */
1608 if (getenv("GMX_NBNXN_SIMD_4XN") != NULL)
1610 #ifdef GMX_NBNXN_SIMD_4XN
1611 *kernel_type = nbnxnk4xN_SIMD_4xN;
1613 gmx_fatal(FARGS, "SIMD 4xN kernels requested, but GROMACS has been compiled without support for these kernels");
1616 if (getenv("GMX_NBNXN_SIMD_2XNN") != NULL)
1618 #ifdef GMX_NBNXN_SIMD_2XNN
1619 *kernel_type = nbnxnk4xN_SIMD_2xNN;
1621 gmx_fatal(FARGS, "SIMD 2x(N+N) kernels requested, but GROMACS has been compiled without support for these kernels");
1625 /* Analytical Ewald exclusion correction is only an option in
1627 * Since table lookup's don't parallelize with SIMD, analytical
1628 * will probably always be faster for a SIMD width of 8 or more.
1629 * With FMA analytical is sometimes faster for a width if 4 as well.
1630 * On BlueGene/Q, this is faster regardless of precision.
1631 * In single precision, this is faster on Bulldozer.
1633 #if GMX_SIMD_REAL_WIDTH >= 8 || \
1634 (GMX_SIMD_REAL_WIDTH >= 4 && GMX_SIMD_HAVE_FMA && !GMX_DOUBLE) || GMX_SIMD_IBM_QPX
1635 *ewald_excl = ewaldexclAnalytical;
1637 if (getenv("GMX_NBNXN_EWALD_TABLE") != NULL)
1639 *ewald_excl = ewaldexclTable;
1641 if (getenv("GMX_NBNXN_EWALD_ANALYTICAL") != NULL)
1643 *ewald_excl = ewaldexclAnalytical;
1651 const char *lookup_nbnxn_kernel_name(int kernel_type)
1653 const char *returnvalue = NULL;
1654 switch (kernel_type)
1657 returnvalue = "not set";
1659 case nbnxnk4x4_PlainC:
1660 returnvalue = "plain C";
1662 case nbnxnk4xN_SIMD_4xN:
1663 case nbnxnk4xN_SIMD_2xNN:
1665 returnvalue = "SIMD";
1667 returnvalue = "not available";
1670 case nbnxnk8x8x8_GPU: returnvalue = "GPU"; break;
1671 case nbnxnk8x8x8_PlainC: returnvalue = "plain C"; break;
1675 gmx_fatal(FARGS, "Illegal kernel type selected");
1682 static void pick_nbnxn_kernel(FILE *fp,
1683 const t_commrec *cr,
1684 gmx_bool use_simd_kernels,
1686 gmx_bool bEmulateGPU,
1687 const t_inputrec *ir,
1690 gmx_bool bDoNonbonded)
1692 assert(kernel_type);
1694 *kernel_type = nbnxnkNotSet;
1695 *ewald_excl = ewaldexclTable;
1699 *kernel_type = nbnxnk8x8x8_PlainC;
1703 md_print_warn(cr, fp, "Emulating a GPU run on the CPU (slow)");
1708 *kernel_type = nbnxnk8x8x8_GPU;
1711 if (*kernel_type == nbnxnkNotSet)
1713 if (use_simd_kernels &&
1714 nbnxn_simd_supported(fp, cr, ir))
1716 pick_nbnxn_kernel_cpu(ir, kernel_type, ewald_excl);
1720 *kernel_type = nbnxnk4x4_PlainC;
1724 if (bDoNonbonded && fp != NULL)
1726 fprintf(fp, "\nUsing %s %dx%d non-bonded kernels\n\n",
1727 lookup_nbnxn_kernel_name(*kernel_type),
1728 nbnxn_kernel_to_cluster_i_size(*kernel_type),
1729 nbnxn_kernel_to_cluster_j_size(*kernel_type));
1731 if (nbnxnk4x4_PlainC == *kernel_type ||
1732 nbnxnk8x8x8_PlainC == *kernel_type)
1734 md_print_warn(cr, fp,
1735 "WARNING: Using the slow %s kernels. This should\n"
1736 "not happen during routine usage on supported platforms.\n\n",
1737 lookup_nbnxn_kernel_name(*kernel_type));
1742 static void pick_nbnxn_resources(FILE *fp,
1743 const t_commrec *cr,
1744 const gmx_hw_info_t *hwinfo,
1745 gmx_bool bDoNonbonded,
1747 gmx_bool *bEmulateGPU,
1748 const gmx_gpu_opt_t *gpu_opt)
1750 gmx_bool bEmulateGPUEnvVarSet;
1751 char gpu_err_str[STRLEN];
1755 bEmulateGPUEnvVarSet = (getenv("GMX_EMULATE_GPU") != NULL);
1757 /* Run GPU emulation mode if GMX_EMULATE_GPU is defined. Because
1758 * GPUs (currently) only handle non-bonded calculations, we will
1759 * automatically switch to emulation if non-bonded calculations are
1760 * turned off via GMX_NO_NONBONDED - this is the simple and elegant
1761 * way to turn off GPU initialization, data movement, and cleanup.
1763 * GPU emulation can be useful to assess the performance one can expect by
1764 * adding GPU(s) to the machine. The conditional below allows this even
1765 * if mdrun is compiled without GPU acceleration support.
1766 * Note that you should freezing the system as otherwise it will explode.
1768 *bEmulateGPU = (bEmulateGPUEnvVarSet ||
1769 (!bDoNonbonded && gpu_opt->n_dev_use > 0));
1771 /* Enable GPU mode when GPUs are available or no GPU emulation is requested.
1773 if (gpu_opt->n_dev_use > 0 && !(*bEmulateGPU))
1775 /* Each PP node will use the intra-node id-th device from the
1776 * list of detected/selected GPUs. */
1777 if (!init_gpu(fp, cr->rank_pp_intranode, gpu_err_str,
1778 &hwinfo->gpu_info, gpu_opt))
1780 /* At this point the init should never fail as we made sure that
1781 * we have all the GPUs we need. If it still does, we'll bail. */
1782 /* TODO the decorating of gpu_err_str is nicer if it
1783 happens inside init_gpu. Out here, the decorating with
1784 the MPI rank makes sense. */
1785 gmx_fatal(FARGS, "On rank %d failed to initialize GPU #%d: %s",
1787 get_gpu_device_id(&hwinfo->gpu_info, gpu_opt,
1788 cr->rank_pp_intranode),
1792 /* Here we actually turn on hardware GPU acceleration */
1797 gmx_bool uses_simple_tables(int cutoff_scheme,
1798 nonbonded_verlet_t *nbv,
1801 gmx_bool bUsesSimpleTables = TRUE;
1804 switch (cutoff_scheme)
1807 bUsesSimpleTables = TRUE;
1810 assert(NULL != nbv && NULL != nbv->grp);
1811 grp_index = (group < 0) ? 0 : (nbv->ngrp - 1);
1812 bUsesSimpleTables = nbnxn_kernel_pairlist_simple(nbv->grp[grp_index].kernel_type);
1815 gmx_incons("unimplemented");
1817 return bUsesSimpleTables;
1820 static void init_ewald_f_table(interaction_const_t *ic,
1825 /* Get the Ewald table spacing based on Coulomb and/or LJ
1826 * Ewald coefficients and rtol.
1828 ic->tabq_scale = ewald_spline3_table_scale(ic);
1830 if (ic->cutoff_scheme == ecutsVERLET)
1832 maxr = ic->rcoulomb;
1836 maxr = std::max(ic->rcoulomb, rtab);
1838 ic->tabq_size = static_cast<int>(maxr*ic->tabq_scale) + 2;
1840 sfree_aligned(ic->tabq_coul_FDV0);
1841 sfree_aligned(ic->tabq_coul_F);
1842 sfree_aligned(ic->tabq_coul_V);
1844 sfree_aligned(ic->tabq_vdw_FDV0);
1845 sfree_aligned(ic->tabq_vdw_F);
1846 sfree_aligned(ic->tabq_vdw_V);
1848 if (EEL_PME_EWALD(ic->eeltype))
1850 /* Create the original table data in FDV0 */
1851 snew_aligned(ic->tabq_coul_FDV0, ic->tabq_size*4, 32);
1852 snew_aligned(ic->tabq_coul_F, ic->tabq_size, 32);
1853 snew_aligned(ic->tabq_coul_V, ic->tabq_size, 32);
1854 table_spline3_fill_ewald_lr(ic->tabq_coul_F, ic->tabq_coul_V, ic->tabq_coul_FDV0,
1855 ic->tabq_size, 1/ic->tabq_scale, ic->ewaldcoeff_q, v_q_ewald_lr);
1858 if (EVDW_PME(ic->vdwtype))
1860 snew_aligned(ic->tabq_vdw_FDV0, ic->tabq_size*4, 32);
1861 snew_aligned(ic->tabq_vdw_F, ic->tabq_size, 32);
1862 snew_aligned(ic->tabq_vdw_V, ic->tabq_size, 32);
1863 table_spline3_fill_ewald_lr(ic->tabq_vdw_F, ic->tabq_vdw_V, ic->tabq_vdw_FDV0,
1864 ic->tabq_size, 1/ic->tabq_scale, ic->ewaldcoeff_lj, v_lj_ewald_lr);
1868 void init_interaction_const_tables(FILE *fp,
1869 interaction_const_t *ic,
1872 if (EEL_PME_EWALD(ic->eeltype) || EVDW_PME(ic->vdwtype))
1874 init_ewald_f_table(ic, rtab);
1878 fprintf(fp, "Initialized non-bonded Ewald correction tables, spacing: %.2e size: %d\n\n",
1879 1/ic->tabq_scale, ic->tabq_size);
1884 static void clear_force_switch_constants(shift_consts_t *sc)
1891 static void force_switch_constants(real p,
1895 /* Here we determine the coefficient for shifting the force to zero
1896 * between distance rsw and the cut-off rc.
1897 * For a potential of r^-p, we have force p*r^-(p+1).
1898 * But to save flops we absorb p in the coefficient.
1900 * force/p = r^-(p+1) + c2*r^2 + c3*r^3
1901 * potential = r^-p + c2/3*r^3 + c3/4*r^4 + cpot
1903 sc->c2 = ((p + 1)*rsw - (p + 4)*rc)/(pow(rc, p + 2)*gmx::square(rc - rsw));
1904 sc->c3 = -((p + 1)*rsw - (p + 3)*rc)/(pow(rc, p + 2)*gmx::power3(rc - rsw));
1905 sc->cpot = -pow(rc, -p) + p*sc->c2/3*gmx::power3(rc - rsw) + p*sc->c3/4*gmx::power4(rc - rsw);
1908 static void potential_switch_constants(real rsw, real rc,
1909 switch_consts_t *sc)
1911 /* The switch function is 1 at rsw and 0 at rc.
1912 * The derivative and second derivate are zero at both ends.
1913 * rsw = max(r - r_switch, 0)
1914 * sw = 1 + c3*rsw^3 + c4*rsw^4 + c5*rsw^5
1915 * dsw = 3*c3*rsw^2 + 4*c4*rsw^3 + 5*c5*rsw^4
1916 * force = force*dsw - potential*sw
1919 sc->c3 = -10/gmx::power3(rc - rsw);
1920 sc->c4 = 15/gmx::power4(rc - rsw);
1921 sc->c5 = -6/gmx::power5(rc - rsw);
1924 /*! \brief Construct interaction constants
1926 * This data is used (particularly) by search and force code for
1927 * short-range interactions. Many of these are constant for the whole
1928 * simulation; some are constant only after PME tuning completes.
1931 init_interaction_const(FILE *fp,
1932 interaction_const_t **interaction_const,
1933 const t_forcerec *fr)
1935 interaction_const_t *ic;
1939 ic->cutoff_scheme = fr->cutoff_scheme;
1941 /* Just allocate something so we can free it */
1942 snew_aligned(ic->tabq_coul_FDV0, 16, 32);
1943 snew_aligned(ic->tabq_coul_F, 16, 32);
1944 snew_aligned(ic->tabq_coul_V, 16, 32);
1946 ic->rlist = fr->rlist;
1949 ic->vdwtype = fr->vdwtype;
1950 ic->vdw_modifier = fr->vdw_modifier;
1951 ic->rvdw = fr->rvdw;
1952 ic->rvdw_switch = fr->rvdw_switch;
1953 ic->ewaldcoeff_lj = fr->ewaldcoeff_lj;
1954 ic->ljpme_comb_rule = fr->ljpme_combination_rule;
1955 ic->sh_lj_ewald = 0;
1956 clear_force_switch_constants(&ic->dispersion_shift);
1957 clear_force_switch_constants(&ic->repulsion_shift);
1959 switch (ic->vdw_modifier)
1961 case eintmodPOTSHIFT:
1962 /* Only shift the potential, don't touch the force */
1963 ic->dispersion_shift.cpot = -1.0/gmx::power6(ic->rvdw);
1964 ic->repulsion_shift.cpot = -1.0/gmx::power12(ic->rvdw);
1965 if (EVDW_PME(ic->vdwtype))
1969 crc2 = gmx::square(ic->ewaldcoeff_lj*ic->rvdw);
1970 ic->sh_lj_ewald = (std::exp(-crc2)*(1 + crc2 + 0.5*crc2*crc2) - 1)/gmx::power6(ic->rvdw);
1973 case eintmodFORCESWITCH:
1974 /* Switch the force, switch and shift the potential */
1975 force_switch_constants(6.0, ic->rvdw_switch, ic->rvdw,
1976 &ic->dispersion_shift);
1977 force_switch_constants(12.0, ic->rvdw_switch, ic->rvdw,
1978 &ic->repulsion_shift);
1980 case eintmodPOTSWITCH:
1981 /* Switch the potential and force */
1982 potential_switch_constants(ic->rvdw_switch, ic->rvdw,
1986 case eintmodEXACTCUTOFF:
1987 /* Nothing to do here */
1990 gmx_incons("unimplemented potential modifier");
1993 ic->sh_invrc6 = -ic->dispersion_shift.cpot;
1995 /* Electrostatics */
1996 ic->eeltype = fr->eeltype;
1997 ic->coulomb_modifier = fr->coulomb_modifier;
1998 ic->rcoulomb = fr->rcoulomb;
1999 ic->epsilon_r = fr->epsilon_r;
2000 ic->epsfac = fr->epsfac;
2001 ic->ewaldcoeff_q = fr->ewaldcoeff_q;
2003 if (fr->coulomb_modifier == eintmodPOTSHIFT)
2005 ic->sh_ewald = std::erfc(ic->ewaldcoeff_q*ic->rcoulomb);
2012 /* Reaction-field */
2013 if (EEL_RF(ic->eeltype))
2015 ic->epsilon_rf = fr->epsilon_rf;
2016 ic->k_rf = fr->k_rf;
2017 ic->c_rf = fr->c_rf;
2021 /* For plain cut-off we might use the reaction-field kernels */
2022 ic->epsilon_rf = ic->epsilon_r;
2024 if (fr->coulomb_modifier == eintmodPOTSHIFT)
2026 ic->c_rf = 1/ic->rcoulomb;
2036 real dispersion_shift;
2038 dispersion_shift = ic->dispersion_shift.cpot;
2039 if (EVDW_PME(ic->vdwtype))
2041 dispersion_shift -= ic->sh_lj_ewald;
2043 fprintf(fp, "Potential shift: LJ r^-12: %.3e r^-6: %.3e",
2044 ic->repulsion_shift.cpot, dispersion_shift);
2046 if (ic->eeltype == eelCUT)
2048 fprintf(fp, ", Coulomb %.e", -ic->c_rf);
2050 else if (EEL_PME(ic->eeltype))
2052 fprintf(fp, ", Ewald %.3e", -ic->sh_ewald);
2057 *interaction_const = ic;
2060 static void init_nb_verlet(FILE *fp,
2061 nonbonded_verlet_t **nb_verlet,
2062 gmx_bool bFEP_NonBonded,
2063 const t_inputrec *ir,
2064 const t_forcerec *fr,
2065 const t_commrec *cr,
2066 const char *nbpu_opt)
2068 nonbonded_verlet_t *nbv;
2071 gmx_bool bEmulateGPU, bHybridGPURun = FALSE;
2073 nbnxn_alloc_t *nb_alloc;
2074 nbnxn_free_t *nb_free;
2078 pick_nbnxn_resources(fp, cr, fr->hwinfo,
2085 nbv->min_ci_balanced = 0;
2087 nbv->ngrp = (DOMAINDECOMP(cr) ? 2 : 1);
2088 for (i = 0; i < nbv->ngrp; i++)
2090 nbv->grp[i].nbl_lists.nnbl = 0;
2091 nbv->grp[i].nbat = NULL;
2092 nbv->grp[i].kernel_type = nbnxnkNotSet;
2094 if (i == 0) /* local */
2096 pick_nbnxn_kernel(fp, cr, fr->use_simd_kernels,
2097 nbv->bUseGPU, bEmulateGPU, ir,
2098 &nbv->grp[i].kernel_type,
2099 &nbv->grp[i].ewald_excl,
2102 else /* non-local */
2104 if (nbpu_opt != NULL && strcmp(nbpu_opt, "gpu_cpu") == 0)
2106 /* Use GPU for local, select a CPU kernel for non-local */
2107 pick_nbnxn_kernel(fp, cr, fr->use_simd_kernels,
2109 &nbv->grp[i].kernel_type,
2110 &nbv->grp[i].ewald_excl,
2113 bHybridGPURun = TRUE;
2117 /* Use the same kernel for local and non-local interactions */
2118 nbv->grp[i].kernel_type = nbv->grp[0].kernel_type;
2119 nbv->grp[i].ewald_excl = nbv->grp[0].ewald_excl;
2124 nbnxn_init_search(&nbv->nbs,
2125 DOMAINDECOMP(cr) ? &cr->dd->nc : NULL,
2126 DOMAINDECOMP(cr) ? domdec_zones(cr->dd) : NULL,
2128 gmx_omp_nthreads_get(emntPairsearch));
2130 for (i = 0; i < nbv->ngrp; i++)
2132 gpu_set_host_malloc_and_free(nbv->grp[0].kernel_type == nbnxnk8x8x8_GPU,
2133 &nb_alloc, &nb_free);
2135 nbnxn_init_pairlist_set(&nbv->grp[i].nbl_lists,
2136 nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type),
2137 /* 8x8x8 "non-simple" lists are ATM always combined */
2138 !nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type),
2142 nbv->grp[0].kernel_type != nbv->grp[i].kernel_type)
2144 gmx_bool bSimpleList;
2145 int enbnxninitcombrule;
2147 bSimpleList = nbnxn_kernel_pairlist_simple(nbv->grp[i].kernel_type);
2149 if (fr->vdwtype == evdwCUT &&
2150 (fr->vdw_modifier == eintmodNONE ||
2151 fr->vdw_modifier == eintmodPOTSHIFT) &&
2152 getenv("GMX_NO_LJ_COMB_RULE") == NULL)
2154 /* Plain LJ cut-off: we can optimize with combination rules */
2155 enbnxninitcombrule = enbnxninitcombruleDETECT;
2157 else if (fr->vdwtype == evdwPME)
2159 /* LJ-PME: we need to use a combination rule for the grid */
2160 if (fr->ljpme_combination_rule == eljpmeGEOM)
2162 enbnxninitcombrule = enbnxninitcombruleGEOM;
2166 enbnxninitcombrule = enbnxninitcombruleLB;
2171 /* We use a full combination matrix: no rule required */
2172 enbnxninitcombrule = enbnxninitcombruleNONE;
2176 snew(nbv->grp[i].nbat, 1);
2177 nbnxn_atomdata_init(fp,
2179 nbv->grp[i].kernel_type,
2181 fr->ntype, fr->nbfp,
2183 bSimpleList ? gmx_omp_nthreads_get(emntNonbonded) : 1,
2188 nbv->grp[i].nbat = nbv->grp[0].nbat;
2194 /* init the NxN GPU data; the last argument tells whether we'll have
2195 * both local and non-local NB calculation on GPU */
2196 nbnxn_gpu_init(&nbv->gpu_nbv,
2197 &fr->hwinfo->gpu_info,
2201 cr->rank_pp_intranode,
2203 (nbv->ngrp > 1) && !bHybridGPURun);
2205 /* With tMPI + GPUs some ranks may be sharing GPU(s) and therefore
2206 * also sharing texture references. To keep the code simple, we don't
2207 * treat texture references as shared resources, but this means that
2208 * the coulomb_tab and nbfp texture refs will get updated by multiple threads.
2209 * Hence, to ensure that the non-bonded kernels don't start before all
2210 * texture binding operations are finished, we need to wait for all ranks
2211 * to arrive here before continuing.
2213 * Note that we could omit this barrier if GPUs are not shared (or
2214 * texture objects are used), but as this is initialization code, there
2215 * is no point in complicating things.
2222 #endif /* GMX_THREAD_MPI */
2224 if ((env = getenv("GMX_NB_MIN_CI")) != NULL)
2228 nbv->min_ci_balanced = strtol(env, &end, 10);
2229 if (!end || (*end != 0) || nbv->min_ci_balanced < 0)
2231 gmx_fatal(FARGS, "Invalid value passed in GMX_NB_MIN_CI=%s, non-negative integer required", env);
2236 fprintf(debug, "Neighbor-list balancing parameter: %d (passed as env. var.)\n",
2237 nbv->min_ci_balanced);
2242 nbv->min_ci_balanced = nbnxn_gpu_min_ci_balanced(nbv->gpu_nbv);
2245 fprintf(debug, "Neighbor-list balancing parameter: %d (auto-adjusted to the number of GPU multi-processors)\n",
2246 nbv->min_ci_balanced);
2255 gmx_bool usingGpu(nonbonded_verlet_t *nbv)
2257 return nbv != NULL && nbv->bUseGPU;
2260 void init_forcerec(FILE *fp,
2263 const t_inputrec *ir,
2264 const gmx_mtop_t *mtop,
2265 const t_commrec *cr,
2270 const char *nbpu_opt,
2271 gmx_bool bNoSolvOpt,
2274 int i, m, negp_pp, negptable, egi, egj;
2279 gmx_bool bGenericKernelOnly;
2280 gmx_bool needGroupSchemeTables, bSomeNormalNbListsAreInUse;
2281 gmx_bool bFEP_NonBonded;
2282 int *nm_ind, egp_flags;
2284 if (fr->hwinfo == NULL)
2286 /* Detect hardware, gather information.
2287 * In mdrun, hwinfo has already been set before calling init_forcerec.
2288 * Here we ignore GPUs, as tools will not use them anyhow.
2290 fr->hwinfo = gmx_detect_hardware(fp, cr, FALSE);
2293 /* By default we turn SIMD kernels on, but it might be turned off further down... */
2294 fr->use_simd_kernels = TRUE;
2296 fr->bDomDec = DOMAINDECOMP(cr);
2298 if (check_box(ir->ePBC, box))
2300 gmx_fatal(FARGS, check_box(ir->ePBC, box));
2303 /* Test particle insertion ? */
2306 /* Set to the size of the molecule to be inserted (the last one) */
2307 /* Because of old style topologies, we have to use the last cg
2308 * instead of the last molecule type.
2310 cgs = &mtop->moltype[mtop->molblock[mtop->nmolblock-1].type].cgs;
2311 fr->n_tpi = cgs->index[cgs->nr] - cgs->index[cgs->nr-1];
2312 if (fr->n_tpi != mtop->mols.index[mtop->mols.nr] - mtop->mols.index[mtop->mols.nr-1])
2314 gmx_fatal(FARGS, "The molecule to insert can not consist of multiple charge groups.\nMake it a single charge group.");
2322 if (ir->coulombtype == eelRF_NEC_UNSUPPORTED)
2324 gmx_fatal(FARGS, "%s electrostatics is no longer supported",
2325 eel_names[ir->coulombtype]);
2330 gmx_fatal(FARGS, "AdResS simulations are no longer supported");
2332 if (ir->useTwinRange)
2334 gmx_fatal(FARGS, "Twin-range simulations are no longer supported");
2336 /* Copy the user determined parameters */
2337 fr->userint1 = ir->userint1;
2338 fr->userint2 = ir->userint2;
2339 fr->userint3 = ir->userint3;
2340 fr->userint4 = ir->userint4;
2341 fr->userreal1 = ir->userreal1;
2342 fr->userreal2 = ir->userreal2;
2343 fr->userreal3 = ir->userreal3;
2344 fr->userreal4 = ir->userreal4;
2347 fr->fc_stepsize = ir->fc_stepsize;
2350 fr->efep = ir->efep;
2351 fr->sc_alphavdw = ir->fepvals->sc_alpha;
2352 if (ir->fepvals->bScCoul)
2354 fr->sc_alphacoul = ir->fepvals->sc_alpha;
2355 fr->sc_sigma6_min = gmx::power6(ir->fepvals->sc_sigma_min);
2359 fr->sc_alphacoul = 0;
2360 fr->sc_sigma6_min = 0; /* only needed when bScCoul is on */
2362 fr->sc_power = ir->fepvals->sc_power;
2363 fr->sc_r_power = ir->fepvals->sc_r_power;
2364 fr->sc_sigma6_def = gmx::power6(ir->fepvals->sc_sigma);
2366 env = getenv("GMX_SCSIGMA_MIN");
2370 sscanf(env, "%20lf", &dbl);
2371 fr->sc_sigma6_min = gmx::power6(dbl);
2374 fprintf(fp, "Setting the minimum soft core sigma to %g nm\n", dbl);
2378 fr->bNonbonded = TRUE;
2379 if (getenv("GMX_NO_NONBONDED") != NULL)
2381 /* turn off non-bonded calculations */
2382 fr->bNonbonded = FALSE;
2383 md_print_warn(cr, fp,
2384 "Found environment variable GMX_NO_NONBONDED.\n"
2385 "Disabling nonbonded calculations.\n");
2388 bGenericKernelOnly = FALSE;
2390 /* We now check in the NS code whether a particular combination of interactions
2391 * can be used with water optimization, and disable it if that is not the case.
2394 if (getenv("GMX_NB_GENERIC") != NULL)
2399 "Found environment variable GMX_NB_GENERIC.\n"
2400 "Disabling all interaction-specific nonbonded kernels, will only\n"
2401 "use the slow generic ones in src/gmxlib/nonbonded/nb_generic.c\n\n");
2403 bGenericKernelOnly = TRUE;
2406 if (bGenericKernelOnly == TRUE)
2411 if ( (getenv("GMX_DISABLE_SIMD_KERNELS") != NULL) || (getenv("GMX_NOOPTIMIZEDKERNELS") != NULL) )
2413 fr->use_simd_kernels = FALSE;
2417 "\nFound environment variable GMX_DISABLE_SIMD_KERNELS.\n"
2418 "Disabling the usage of any SIMD-specific non-bonded & bonded kernel routines\n"
2419 "(e.g. SSE2/SSE4.1/AVX).\n\n");
2423 fr->bBHAM = (mtop->ffparams.functype[0] == F_BHAM);
2425 /* Check if we can/should do all-vs-all kernels */
2426 fr->bAllvsAll = can_use_allvsall(ir, FALSE, NULL, NULL);
2427 fr->AllvsAll_work = NULL;
2428 fr->AllvsAll_workgb = NULL;
2430 /* All-vs-all kernels have not been implemented in 4.6 and later.
2431 * See Redmine #1249. */
2434 fr->bAllvsAll = FALSE;
2438 "\nYour simulation settings would have triggered the efficient all-vs-all\n"
2439 "kernels in GROMACS 4.5, but these have not been implemented in GROMACS\n"
2440 "4.6 and 5.x. If performance is important, please use GROMACS 4.5.7\n"
2441 "or try cutoff-scheme = Verlet.\n\n");
2445 /* Neighbour searching stuff */
2446 fr->cutoff_scheme = ir->cutoff_scheme;
2447 fr->bGrid = (ir->ns_type == ensGRID);
2448 fr->ePBC = ir->ePBC;
2450 if (fr->cutoff_scheme == ecutsGROUP)
2452 const char *note = "NOTE: This file uses the deprecated 'group' cutoff_scheme. This will be\n"
2453 "removed in a future release when 'verlet' supports all interaction forms.\n";
2457 fprintf(stderr, "\n%s\n", note);
2461 fprintf(fp, "\n%s\n", note);
2465 /* Determine if we will do PBC for distances in bonded interactions */
2466 if (fr->ePBC == epbcNONE)
2468 fr->bMolPBC = FALSE;
2472 if (!DOMAINDECOMP(cr))
2476 bSHAKE = (ir->eConstrAlg == econtSHAKE &&
2477 (gmx_mtop_ftype_count(mtop, F_CONSTR) > 0 ||
2478 gmx_mtop_ftype_count(mtop, F_CONSTRNC) > 0));
2480 /* The group cut-off scheme and SHAKE assume charge groups
2481 * are whole, but not using molpbc is faster in most cases.
2482 * With intermolecular interactions we need PBC for calculating
2483 * distances between atoms in different molecules.
2485 if ((fr->cutoff_scheme == ecutsGROUP || bSHAKE) &&
2486 !mtop->bIntermolecularInteractions)
2488 fr->bMolPBC = ir->bPeriodicMols;
2490 if (bSHAKE && fr->bMolPBC)
2492 gmx_fatal(FARGS, "SHAKE is not supported with periodic molecules");
2499 if (getenv("GMX_USE_GRAPH") != NULL)
2501 fr->bMolPBC = FALSE;
2504 md_print_warn(cr, fp, "GMX_USE_GRAPH is set, using the graph for bonded interactions\n");
2507 if (mtop->bIntermolecularInteractions)
2509 md_print_warn(cr, fp, "WARNING: Molecules linked by intermolecular interactions have to reside in the same periodic image, otherwise artifacts will occur!\n");
2513 if (bSHAKE && fr->bMolPBC)
2515 gmx_fatal(FARGS, "SHAKE is not properly supported with intermolecular interactions. For short simulations where linked molecules remain in the same periodic image, the environment variable GMX_USE_GRAPH can be used to override this check.\n");
2521 fr->bMolPBC = dd_bonded_molpbc(cr->dd, fr->ePBC);
2524 fr->bGB = (ir->implicit_solvent == eisGBSA);
2526 fr->rc_scaling = ir->refcoord_scaling;
2527 copy_rvec(ir->posres_com, fr->posres_com);
2528 copy_rvec(ir->posres_comB, fr->posres_comB);
2529 fr->rlist = cutoff_inf(ir->rlist);
2530 fr->eeltype = ir->coulombtype;
2531 fr->vdwtype = ir->vdwtype;
2532 fr->ljpme_combination_rule = ir->ljpme_combination_rule;
2534 fr->coulomb_modifier = ir->coulomb_modifier;
2535 fr->vdw_modifier = ir->vdw_modifier;
2537 /* Electrostatics: Translate from interaction-setting-in-mdp-file to kernel interaction format */
2538 switch (fr->eeltype)
2541 fr->nbkernel_elec_interaction = (fr->bGB) ? GMX_NBKERNEL_ELEC_GENERALIZEDBORN : GMX_NBKERNEL_ELEC_COULOMB;
2546 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_REACTIONFIELD;
2550 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_REACTIONFIELD;
2551 fr->coulomb_modifier = eintmodEXACTCUTOFF;
2560 case eelPMEUSERSWITCH:
2561 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_CUBICSPLINETABLE;
2567 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_EWALD;
2571 gmx_fatal(FARGS, "Unsupported electrostatic interaction: %s", eel_names[fr->eeltype]);
2575 /* Vdw: Translate from mdp settings to kernel format */
2576 switch (fr->vdwtype)
2581 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_BUCKINGHAM;
2585 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_LENNARDJONES;
2589 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_LJEWALD;
2595 case evdwENCADSHIFT:
2596 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_CUBICSPLINETABLE;
2600 gmx_fatal(FARGS, "Unsupported vdw interaction: %s", evdw_names[fr->vdwtype]);
2604 /* These start out identical to ir, but might be altered if we e.g. tabulate the interaction in the kernel */
2605 fr->nbkernel_elec_modifier = fr->coulomb_modifier;
2606 fr->nbkernel_vdw_modifier = fr->vdw_modifier;
2608 fr->rvdw = cutoff_inf(ir->rvdw);
2609 fr->rvdw_switch = ir->rvdw_switch;
2610 fr->rcoulomb = cutoff_inf(ir->rcoulomb);
2611 fr->rcoulomb_switch = ir->rcoulomb_switch;
2613 fr->bEwald = EEL_PME_EWALD(fr->eeltype);
2615 fr->reppow = mtop->ffparams.reppow;
2617 if (ir->cutoff_scheme == ecutsGROUP)
2619 fr->bvdwtab = ((fr->vdwtype != evdwCUT || !gmx_within_tol(fr->reppow, 12.0, 10*GMX_DOUBLE_EPS))
2620 && !EVDW_PME(fr->vdwtype));
2621 /* We have special kernels for standard Ewald and PME, but the pme-switch ones are tabulated above */
2622 fr->bcoultab = !(fr->eeltype == eelCUT ||
2623 fr->eeltype == eelEWALD ||
2624 fr->eeltype == eelPME ||
2625 fr->eeltype == eelRF ||
2626 fr->eeltype == eelRF_ZERO);
2628 /* If the user absolutely wants different switch/shift settings for coul/vdw, it is likely
2629 * going to be faster to tabulate the interaction than calling the generic kernel.
2630 * However, if generic kernels have been requested we keep things analytically.
2632 if (fr->nbkernel_elec_modifier == eintmodPOTSWITCH &&
2633 fr->nbkernel_vdw_modifier == eintmodPOTSWITCH &&
2634 bGenericKernelOnly == FALSE)
2636 if ((fr->rcoulomb_switch != fr->rvdw_switch) || (fr->rcoulomb != fr->rvdw))
2638 fr->bcoultab = TRUE;
2639 /* Once we tabulate electrostatics, we can use the switch function for LJ,
2640 * which would otherwise need two tables.
2644 else if ((fr->nbkernel_elec_modifier == eintmodPOTSHIFT && fr->nbkernel_vdw_modifier == eintmodPOTSHIFT) ||
2645 ((fr->nbkernel_elec_interaction == GMX_NBKERNEL_ELEC_REACTIONFIELD &&
2646 fr->nbkernel_elec_modifier == eintmodEXACTCUTOFF &&
2647 (fr->nbkernel_vdw_modifier == eintmodPOTSWITCH || fr->nbkernel_vdw_modifier == eintmodPOTSHIFT))))
2649 if ((fr->rcoulomb != fr->rvdw) && (bGenericKernelOnly == FALSE))
2651 fr->bcoultab = TRUE;
2655 if (fr->nbkernel_elec_modifier == eintmodFORCESWITCH)
2657 fr->bcoultab = TRUE;
2659 if (fr->nbkernel_vdw_modifier == eintmodFORCESWITCH)
2664 if (getenv("GMX_REQUIRE_TABLES"))
2667 fr->bcoultab = TRUE;
2672 fprintf(fp, "Table routines are used for coulomb: %s\n",
2673 gmx::boolToString(fr->bcoultab));
2674 fprintf(fp, "Table routines are used for vdw: %s\n",
2675 gmx::boolToString(fr->bvdwtab));
2678 if (fr->bvdwtab == TRUE)
2680 fr->nbkernel_vdw_interaction = GMX_NBKERNEL_VDW_CUBICSPLINETABLE;
2681 fr->nbkernel_vdw_modifier = eintmodNONE;
2683 if (fr->bcoultab == TRUE)
2685 fr->nbkernel_elec_interaction = GMX_NBKERNEL_ELEC_CUBICSPLINETABLE;
2686 fr->nbkernel_elec_modifier = eintmodNONE;
2690 if (ir->cutoff_scheme == ecutsVERLET)
2692 if (!gmx_within_tol(fr->reppow, 12.0, 10*GMX_DOUBLE_EPS))
2694 gmx_fatal(FARGS, "Cut-off scheme %S only supports LJ repulsion power 12", ecutscheme_names[ir->cutoff_scheme]);
2696 fr->bvdwtab = FALSE;
2697 fr->bcoultab = FALSE;
2700 /* Tables are used for direct ewald sum */
2703 if (EEL_PME(ir->coulombtype))
2707 fprintf(fp, "Will do PME sum in reciprocal space for electrostatic interactions.\n");
2709 if (ir->coulombtype == eelP3M_AD)
2711 please_cite(fp, "Hockney1988");
2712 please_cite(fp, "Ballenegger2012");
2716 please_cite(fp, "Essmann95a");
2719 if (ir->ewald_geometry == eewg3DC)
2723 fprintf(fp, "Using the Ewald3DC correction for systems with a slab geometry.\n");
2725 please_cite(fp, "In-Chul99a");
2728 fr->ewaldcoeff_q = calc_ewaldcoeff_q(ir->rcoulomb, ir->ewald_rtol);
2729 init_ewald_tab(&(fr->ewald_table), ir, fp);
2732 fprintf(fp, "Using a Gaussian width (1/beta) of %g nm for Ewald\n",
2733 1/fr->ewaldcoeff_q);
2737 if (EVDW_PME(ir->vdwtype))
2741 fprintf(fp, "Will do PME sum in reciprocal space for LJ dispersion interactions.\n");
2743 please_cite(fp, "Essmann95a");
2744 fr->ewaldcoeff_lj = calc_ewaldcoeff_lj(ir->rvdw, ir->ewald_rtol_lj);
2747 fprintf(fp, "Using a Gaussian width (1/beta) of %g nm for LJ Ewald\n",
2748 1/fr->ewaldcoeff_lj);
2752 /* Electrostatics */
2753 fr->epsilon_r = ir->epsilon_r;
2754 fr->epsilon_rf = ir->epsilon_rf;
2755 fr->fudgeQQ = mtop->ffparams.fudgeQQ;
2757 /* Parameters for generalized RF */
2761 if (fr->eeltype == eelGRF)
2763 init_generalized_rf(fp, mtop, ir, fr);
2766 fr->bF_NoVirSum = (EEL_FULL(fr->eeltype) || EVDW_PME(fr->vdwtype) ||
2767 gmx_mtop_ftype_count(mtop, F_POSRES) > 0 ||
2768 gmx_mtop_ftype_count(mtop, F_FBPOSRES) > 0 ||
2769 inputrecElecField(ir)
2772 if (fr->cutoff_scheme == ecutsGROUP &&
2773 ncg_mtop(mtop) > fr->cg_nalloc && !DOMAINDECOMP(cr))
2775 /* Count the total number of charge groups */
2776 fr->cg_nalloc = ncg_mtop(mtop);
2777 srenew(fr->cg_cm, fr->cg_nalloc);
2779 if (fr->shift_vec == NULL)
2781 snew(fr->shift_vec, SHIFTS);
2784 if (fr->fshift == NULL)
2786 snew(fr->fshift, SHIFTS);
2789 if (fr->nbfp == NULL)
2791 fr->ntype = mtop->ffparams.atnr;
2792 fr->nbfp = mk_nbfp(&mtop->ffparams, fr->bBHAM);
2793 if (EVDW_PME(fr->vdwtype))
2795 fr->ljpme_c6grid = make_ljpme_c6grid(&mtop->ffparams, fr);
2799 /* Copy the energy group exclusions */
2800 fr->egp_flags = ir->opts.egp_flags;
2802 /* Van der Waals stuff */
2803 if ((fr->vdwtype != evdwCUT) && (fr->vdwtype != evdwUSER) && !fr->bBHAM)
2805 if (fr->rvdw_switch >= fr->rvdw)
2807 gmx_fatal(FARGS, "rvdw_switch (%f) must be < rvdw (%f)",
2808 fr->rvdw_switch, fr->rvdw);
2812 fprintf(fp, "Using %s Lennard-Jones, switch between %g and %g nm\n",
2813 (fr->eeltype == eelSWITCH) ? "switched" : "shifted",
2814 fr->rvdw_switch, fr->rvdw);
2818 if (fr->bBHAM && EVDW_PME(fr->vdwtype))
2820 gmx_fatal(FARGS, "LJ PME not supported with Buckingham");
2823 if (fr->bBHAM && (fr->vdwtype == evdwSHIFT || fr->vdwtype == evdwSWITCH))
2825 gmx_fatal(FARGS, "Switch/shift interaction not supported with Buckingham");
2828 if (fr->bBHAM && fr->cutoff_scheme == ecutsVERLET)
2830 gmx_fatal(FARGS, "Verlet cutoff-scheme is not supported with Buckingham");
2835 fprintf(fp, "Cut-off's: NS: %g Coulomb: %g %s: %g\n",
2836 fr->rlist, fr->rcoulomb, fr->bBHAM ? "BHAM" : "LJ", fr->rvdw);
2839 fr->eDispCorr = ir->eDispCorr;
2840 fr->numAtomsForDispersionCorrection = mtop->natoms;
2841 if (ir->eDispCorr != edispcNO)
2843 set_avcsixtwelve(fp, fr, mtop);
2848 set_bham_b_max(fp, fr, mtop);
2851 fr->gb_epsilon_solvent = ir->gb_epsilon_solvent;
2853 /* Copy the GBSA data (radius, volume and surftens for each
2854 * atomtype) from the topology atomtype section to forcerec.
2856 snew(fr->atype_radius, fr->ntype);
2857 snew(fr->atype_vol, fr->ntype);
2858 snew(fr->atype_surftens, fr->ntype);
2859 snew(fr->atype_gb_radius, fr->ntype);
2860 snew(fr->atype_S_hct, fr->ntype);
2862 if (mtop->atomtypes.nr > 0)
2864 for (i = 0; i < fr->ntype; i++)
2866 fr->atype_radius[i] = mtop->atomtypes.radius[i];
2868 for (i = 0; i < fr->ntype; i++)
2870 fr->atype_vol[i] = mtop->atomtypes.vol[i];
2872 for (i = 0; i < fr->ntype; i++)
2874 fr->atype_surftens[i] = mtop->atomtypes.surftens[i];
2876 for (i = 0; i < fr->ntype; i++)
2878 fr->atype_gb_radius[i] = mtop->atomtypes.gb_radius[i];
2880 for (i = 0; i < fr->ntype; i++)
2882 fr->atype_S_hct[i] = mtop->atomtypes.S_hct[i];
2886 /* Generate the GB table if needed */
2890 fr->gbtabscale = 2000;
2892 fr->gbtabscale = 500;
2896 fr->gbtab = make_gb_table(fr);
2898 init_gb(&fr->born, fr, ir, mtop, ir->gb_algorithm);
2900 /* Copy local gb data (for dd, this is done in dd_partition_system) */
2901 if (!DOMAINDECOMP(cr))
2903 make_local_gb(cr, fr->born, ir->gb_algorithm);
2907 /* Set the charge scaling */
2908 if (fr->epsilon_r != 0)
2910 fr->epsfac = ONE_4PI_EPS0/fr->epsilon_r;
2914 /* eps = 0 is infinite dieletric: no coulomb interactions */
2918 /* Reaction field constants */
2919 if (EEL_RF(fr->eeltype))
2921 calc_rffac(fp, fr->eeltype, fr->epsilon_r, fr->epsilon_rf,
2922 fr->rcoulomb, fr->temp, fr->zsquare, box,
2923 &fr->kappa, &fr->k_rf, &fr->c_rf);
2926 /*This now calculates sum for q and c6*/
2927 set_chargesum(fp, fr, mtop);
2929 /* Construct tables for the group scheme. A little unnecessary to
2930 * make both vdw and coul tables sometimes, but what the
2931 * heck. Note that both cutoff schemes construct Ewald tables in
2932 * init_interaction_const_tables. */
2933 needGroupSchemeTables = (ir->cutoff_scheme == ecutsGROUP &&
2934 (fr->bcoultab || fr->bvdwtab));
2936 negp_pp = ir->opts.ngener - ir->nwall;
2938 if (!needGroupSchemeTables)
2940 bSomeNormalNbListsAreInUse = TRUE;
2945 bSomeNormalNbListsAreInUse = FALSE;
2946 for (egi = 0; egi < negp_pp; egi++)
2948 for (egj = egi; egj < negp_pp; egj++)
2950 egp_flags = ir->opts.egp_flags[GID(egi, egj, ir->opts.ngener)];
2951 if (!(egp_flags & EGP_EXCL))
2953 if (egp_flags & EGP_TABLE)
2959 bSomeNormalNbListsAreInUse = TRUE;
2964 if (bSomeNormalNbListsAreInUse)
2966 fr->nnblists = negptable + 1;
2970 fr->nnblists = negptable;
2972 if (fr->nnblists > 1)
2974 snew(fr->gid2nblists, ir->opts.ngener*ir->opts.ngener);
2978 snew(fr->nblists, fr->nnblists);
2980 /* This code automatically gives table length tabext without cut-off's,
2981 * in that case grompp should already have checked that we do not need
2982 * normal tables and we only generate tables for 1-4 interactions.
2984 rtab = ir->rlist + ir->tabext;
2986 if (needGroupSchemeTables)
2988 /* make tables for ordinary interactions */
2989 if (bSomeNormalNbListsAreInUse)
2991 make_nbf_tables(fp, fr, rtab, tabfn, NULL, NULL, &fr->nblists[0]);
3000 /* Read the special tables for certain energy group pairs */
3001 nm_ind = mtop->groups.grps[egcENER].nm_ind;
3002 for (egi = 0; egi < negp_pp; egi++)
3004 for (egj = egi; egj < negp_pp; egj++)
3006 egp_flags = ir->opts.egp_flags[GID(egi, egj, ir->opts.ngener)];
3007 if ((egp_flags & EGP_TABLE) && !(egp_flags & EGP_EXCL))
3009 if (fr->nnblists > 1)
3011 fr->gid2nblists[GID(egi, egj, ir->opts.ngener)] = m;
3013 /* Read the table file with the two energy groups names appended */
3014 make_nbf_tables(fp, fr, rtab, tabfn,
3015 *mtop->groups.grpname[nm_ind[egi]],
3016 *mtop->groups.grpname[nm_ind[egj]],
3020 else if (fr->nnblists > 1)
3022 fr->gid2nblists[GID(egi, egj, ir->opts.ngener)] = 0;
3029 /* Tables might not be used for the potential modifier
3030 * interactions per se, but we still need them to evaluate
3031 * switch/shift dispersion corrections in this case. */
3032 if (fr->eDispCorr != edispcNO)
3034 fr->dispersionCorrectionTable = makeDispersionCorrectionTable(fp, fr, rtab, tabfn);
3037 /* We want to use unmodified tables for 1-4 coulombic
3038 * interactions, so we must in general have an extra set of
3040 if (gmx_mtop_ftype_count(mtop, F_LJ14) > 0 ||
3041 gmx_mtop_ftype_count(mtop, F_LJC14_Q) > 0 ||
3042 gmx_mtop_ftype_count(mtop, F_LJC_PAIRS_NB) > 0)
3044 fr->pairsTable = make_tables(fp, fr, tabpfn, rtab,
3045 GMX_MAKETABLES_14ONLY);
3049 fr->nwall = ir->nwall;
3050 if (ir->nwall && ir->wall_type == ewtTABLE)
3052 make_wall_tables(fp, ir, tabfn, &mtop->groups, fr);
3057 fcd->bondtab = make_bonded_tables(fp,
3058 F_TABBONDS, F_TABBONDSNC,
3060 fcd->angletab = make_bonded_tables(fp,
3063 fcd->dihtab = make_bonded_tables(fp,
3071 fprintf(debug, "No fcdata or table file name passed, can not read table, can not do bonded interactions\n");
3075 /* QM/MM initialization if requested
3079 fprintf(stderr, "QM/MM calculation requested.\n");
3082 fr->bQMMM = ir->bQMMM;
3083 fr->qr = mk_QMMMrec();
3085 /* Set all the static charge group info */
3086 fr->cginfo_mb = init_cginfo_mb(fp, mtop, fr, bNoSolvOpt,
3088 &fr->bExcl_IntraCGAll_InterCGNone);
3089 if (DOMAINDECOMP(cr))
3095 fr->cginfo = cginfo_expand(mtop->nmolblock, fr->cginfo_mb);
3098 if (!DOMAINDECOMP(cr))
3100 forcerec_set_ranges(fr, ncg_mtop(mtop), ncg_mtop(mtop),
3101 mtop->natoms, mtop->natoms, mtop->natoms);
3104 fr->print_force = print_force;
3107 /* coarse load balancing vars */
3112 /* Initialize neighbor search */
3114 init_ns(fp, cr, fr->ns, fr, mtop);
3116 if (cr->duty & DUTY_PP)
3118 gmx_nonbonded_setup(fr, bGenericKernelOnly);
3121 /* Initialize the thread working data for bonded interactions */
3122 init_bonded_threading(fp, mtop->groups.grps[egcENER].nr,
3123 &fr->bonded_threading);
3125 fr->nthread_ewc = gmx_omp_nthreads_get(emntBonded);
3126 snew(fr->ewc_t, fr->nthread_ewc);
3127 snew(fr->excl_load, fr->nthread_ewc + 1);
3129 /* fr->ic is used both by verlet and group kernels (to some extent) now */
3130 init_interaction_const(fp, &fr->ic, fr);
3131 init_interaction_const_tables(fp, fr->ic, rtab);
3133 if (fr->cutoff_scheme == ecutsVERLET)
3135 // We checked the cut-offs in grompp, but double-check here.
3136 // We have PME+LJcutoff kernels for rcoulomb>rvdw.
3137 if (EEL_PME_EWALD(ir->coulombtype) && ir->vdwtype == eelCUT)
3139 GMX_RELEASE_ASSERT(ir->rcoulomb >= ir->rvdw, "With Verlet lists and PME we should have rcoulomb>=rvdw");
3143 GMX_RELEASE_ASSERT(ir->rcoulomb == ir->rvdw, "With Verlet lists and no PME rcoulomb and rvdw should be identical");
3146 init_nb_verlet(fp, &fr->nbv, bFEP_NonBonded, ir, fr, cr, nbpu_opt);
3149 if (ir->eDispCorr != edispcNO)
3151 calc_enervirdiff(fp, ir->eDispCorr, fr);
3155 #define pr_real(fp, r) fprintf(fp, "%s: %e\n",#r, r)
3156 #define pr_int(fp, i) fprintf((fp), "%s: %d\n",#i, i)
3157 #define pr_bool(fp, b) fprintf((fp), "%s: %s\n",#b, gmx::boolToString(b))
3159 void pr_forcerec(FILE *fp, t_forcerec *fr)
3163 pr_real(fp, fr->rlist);
3164 pr_real(fp, fr->rcoulomb);
3165 pr_real(fp, fr->fudgeQQ);
3166 pr_bool(fp, fr->bGrid);
3167 /*pr_int(fp,fr->cg0);
3168 pr_int(fp,fr->hcg);*/
3169 for (i = 0; i < fr->nnblists; i++)
3171 pr_int(fp, fr->nblists[i].table_elec_vdw->n);
3173 pr_real(fp, fr->rcoulomb_switch);
3174 pr_real(fp, fr->rcoulomb);
3179 void forcerec_set_excl_load(t_forcerec *fr,
3180 const gmx_localtop_t *top)
3183 int t, i, j, ntot, n, ntarget;
3185 ind = top->excls.index;
3189 for (i = 0; i < top->excls.nr; i++)
3191 for (j = ind[i]; j < ind[i+1]; j++)
3200 fr->excl_load[0] = 0;
3203 for (t = 1; t <= fr->nthread_ewc; t++)
3205 ntarget = (ntot*t)/fr->nthread_ewc;
3206 while (i < top->excls.nr && n < ntarget)
3208 for (j = ind[i]; j < ind[i+1]; j++)
3217 fr->excl_load[t] = i;
3221 /* Frees GPU memory and destroys the GPU context.
3223 * Note that this function needs to be called even if GPUs are not used
3224 * in this run because the PME ranks have no knowledge of whether GPUs
3225 * are used or not, but all ranks need to enter the barrier below.
3227 void free_gpu_resources(const t_forcerec *fr,
3228 const t_commrec *cr,
3229 const gmx_gpu_info_t *gpu_info,
3230 const gmx_gpu_opt_t *gpu_opt)
3232 gmx_bool bIsPPrankUsingGPU;
3233 char gpu_err_str[STRLEN];
3235 bIsPPrankUsingGPU = (cr->duty & DUTY_PP) && fr && fr->nbv && fr->nbv->bUseGPU;
3237 if (bIsPPrankUsingGPU)
3239 /* free nbnxn data in GPU memory */
3240 nbnxn_gpu_free(fr->nbv->gpu_nbv);
3241 /* stop the GPU profiler (only CUDA) */
3244 /* With tMPI we need to wait for all ranks to finish deallocation before
3245 * destroying the CUDA context in free_gpu() as some tMPI ranks may be sharing
3248 * This is not a concern in OpenCL where we use one context per rank which
3249 * is freed in nbnxn_gpu_free().
3251 * Note: as only PP ranks need to free GPU resources, so it is safe to
3252 * not call the barrier on PME ranks.
3259 #endif /* GMX_THREAD_MPI */
3261 /* uninitialize GPU (by destroying the context) */
3262 if (!free_cuda_gpu(cr->rank_pp_intranode, gpu_err_str, gpu_info, gpu_opt))
3264 gmx_warning("On rank %d failed to free GPU #%d: %s",
3265 cr->nodeid, get_current_cuda_gpu_device_id(), gpu_err_str);