Tidy: modernize-use-nullptr
[alexxy/gromacs.git] / src / gromacs / mdlib / sim_util.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #include "gmxpre.h"
38
39 #include "sim_util.h"
40
41 #include "config.h"
42
43 #include <assert.h>
44 #include <math.h>
45 #include <stdio.h>
46 #include <string.h>
47
48 #include <cstdint>
49
50 #include <array>
51
52 #include "gromacs/domdec/domdec.h"
53 #include "gromacs/domdec/domdec_struct.h"
54 #include "gromacs/essentialdynamics/edsam.h"
55 #include "gromacs/ewald/pme.h"
56 #include "gromacs/gmxlib/chargegroup.h"
57 #include "gromacs/gmxlib/network.h"
58 #include "gromacs/gmxlib/nrnb.h"
59 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
60 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
61 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
62 #include "gromacs/imd/imd.h"
63 #include "gromacs/listed-forces/bonded.h"
64 #include "gromacs/listed-forces/disre.h"
65 #include "gromacs/listed-forces/orires.h"
66 #include "gromacs/math/functions.h"
67 #include "gromacs/math/units.h"
68 #include "gromacs/math/vec.h"
69 #include "gromacs/math/vecdump.h"
70 #include "gromacs/mdlib/calcmu.h"
71 #include "gromacs/mdlib/constr.h"
72 #include "gromacs/mdlib/force.h"
73 #include "gromacs/mdlib/forcerec.h"
74 #include "gromacs/mdlib/genborn.h"
75 #include "gromacs/mdlib/gmx_omp_nthreads.h"
76 #include "gromacs/mdlib/mdrun.h"
77 #include "gromacs/mdlib/nb_verlet.h"
78 #include "gromacs/mdlib/nbnxn_atomdata.h"
79 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
80 #include "gromacs/mdlib/nbnxn_grid.h"
81 #include "gromacs/mdlib/nbnxn_search.h"
82 #include "gromacs/mdlib/qmmm.h"
83 #include "gromacs/mdlib/update.h"
84 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
85 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref.h"
86 #include "gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
87 #include "gromacs/mdlib/nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
88 #include "gromacs/mdtypes/commrec.h"
89 #include "gromacs/mdtypes/inputrec.h"
90 #include "gromacs/mdtypes/md_enums.h"
91 #include "gromacs/pbcutil/ishift.h"
92 #include "gromacs/pbcutil/mshift.h"
93 #include "gromacs/pbcutil/pbc.h"
94 #include "gromacs/pulling/pull.h"
95 #include "gromacs/pulling/pull_rotation.h"
96 #include "gromacs/timing/cyclecounter.h"
97 #include "gromacs/timing/gpu_timing.h"
98 #include "gromacs/timing/wallcycle.h"
99 #include "gromacs/timing/wallcyclereporting.h"
100 #include "gromacs/timing/walltime_accounting.h"
101 #include "gromacs/utility/basedefinitions.h"
102 #include "gromacs/utility/cstringutil.h"
103 #include "gromacs/utility/exceptions.h"
104 #include "gromacs/utility/fatalerror.h"
105 #include "gromacs/utility/gmxassert.h"
106 #include "gromacs/utility/gmxmpi.h"
107 #include "gromacs/utility/pleasecite.h"
108 #include "gromacs/utility/smalloc.h"
109 #include "gromacs/utility/sysinfo.h"
110
111 #include "nbnxn_gpu.h"
112
113 void print_time(FILE                     *out,
114                 gmx_walltime_accounting_t walltime_accounting,
115                 gmx_int64_t               step,
116                 t_inputrec               *ir,
117                 t_commrec gmx_unused     *cr)
118 {
119     time_t finish;
120     char   timebuf[STRLEN];
121     double dt, elapsed_seconds, time_per_step;
122     char   buf[48];
123
124 #if !GMX_THREAD_MPI
125     if (!PAR(cr))
126 #endif
127     {
128         fprintf(out, "\r");
129     }
130     fprintf(out, "step %s", gmx_step_str(step, buf));
131     fflush(out);
132
133     if ((step >= ir->nstlist))
134     {
135         double seconds_since_epoch = gmx_gettime();
136         elapsed_seconds = seconds_since_epoch - walltime_accounting_get_start_time_stamp(walltime_accounting);
137         time_per_step   = elapsed_seconds/(step - ir->init_step + 1);
138         dt              = (ir->nsteps + ir->init_step - step) * time_per_step;
139
140         if (ir->nsteps >= 0)
141         {
142             if (dt >= 300)
143             {
144                 finish = (time_t) (seconds_since_epoch + dt);
145                 gmx_ctime_r(&finish, timebuf, STRLEN);
146                 sprintf(buf, "%s", timebuf);
147                 buf[strlen(buf)-1] = '\0';
148                 fprintf(out, ", will finish %s", buf);
149             }
150             else
151             {
152                 fprintf(out, ", remaining wall clock time: %5d s          ", (int)dt);
153             }
154         }
155         else
156         {
157             fprintf(out, " performance: %.1f ns/day    ",
158                     ir->delta_t/1000*24*60*60/time_per_step);
159         }
160     }
161 #if !GMX_THREAD_MPI
162     if (PAR(cr))
163     {
164         fprintf(out, "\n");
165     }
166 #endif
167
168     fflush(out);
169 }
170
171 void print_date_and_time(FILE *fplog, int nodeid, const char *title,
172                          double the_time)
173 {
174     char   time_string[STRLEN];
175
176     if (!fplog)
177     {
178         return;
179     }
180
181     {
182         int    i;
183         char   timebuf[STRLEN];
184         time_t temp_time = (time_t) the_time;
185
186         gmx_ctime_r(&temp_time, timebuf, STRLEN);
187         for (i = 0; timebuf[i] >= ' '; i++)
188         {
189             time_string[i] = timebuf[i];
190         }
191         time_string[i] = '\0';
192     }
193
194     fprintf(fplog, "%s on rank %d %s\n", title, nodeid, time_string);
195 }
196
197 void print_start(FILE *fplog, t_commrec *cr,
198                  gmx_walltime_accounting_t walltime_accounting,
199                  const char *name)
200 {
201     char buf[STRLEN];
202
203     sprintf(buf, "Started %s", name);
204     print_date_and_time(fplog, cr->nodeid, buf,
205                         walltime_accounting_get_start_time_stamp(walltime_accounting));
206 }
207
208 static void sum_forces(rvec f[], const PaddedRVecVector *forceToAdd)
209 {
210     /* TODO: remove this - 1 when padding is properly implemented */
211     int         end  = forceToAdd->size() - 1;
212     const rvec *fAdd = as_rvec_array(forceToAdd->data());
213
214     // cppcheck-suppress unreadVariable
215     int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
216 #pragma omp parallel for num_threads(nt) schedule(static)
217     for (int i = 0; i < end; i++)
218     {
219         rvec_inc(f[i], fAdd[i]);
220     }
221 }
222
223 static void calc_virial(int start, int homenr, rvec x[], rvec f[],
224                         tensor vir_part, t_graph *graph, matrix box,
225                         t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
226 {
227     int    i;
228
229     /* The short-range virial from surrounding boxes */
230     clear_mat(vir_part);
231     calc_vir(SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
232     inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
233
234     /* Calculate partial virial, for local atoms only, based on short range.
235      * Total virial is computed in global_stat, called from do_md
236      */
237     f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
238     inc_nrnb(nrnb, eNR_VIRIAL, homenr);
239
240     /* Add position restraint contribution */
241     for (i = 0; i < DIM; i++)
242     {
243         vir_part[i][i] += fr->vir_diag_posres[i];
244     }
245
246     /* Add wall contribution */
247     for (i = 0; i < DIM; i++)
248     {
249         vir_part[i][ZZ] += fr->vir_wall_z[i];
250     }
251
252     if (debug)
253     {
254         pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
255     }
256 }
257
258 static void pull_potential_wrapper(t_commrec *cr,
259                                    t_inputrec *ir,
260                                    matrix box, rvec x[],
261                                    rvec f[],
262                                    tensor vir_force,
263                                    t_mdatoms *mdatoms,
264                                    gmx_enerdata_t *enerd,
265                                    real *lambda,
266                                    double t,
267                                    gmx_wallcycle_t wcycle)
268 {
269     t_pbc  pbc;
270     real   dvdl;
271
272     /* Calculate the center of mass forces, this requires communication,
273      * which is why pull_potential is called close to other communication.
274      * The virial contribution is calculated directly,
275      * which is why we call pull_potential after calc_virial.
276      */
277     wallcycle_start(wcycle, ewcPULLPOT);
278     set_pbc(&pbc, ir->ePBC, box);
279     dvdl                     = 0;
280     enerd->term[F_COM_PULL] +=
281         pull_potential(ir->pull_work, mdatoms, &pbc,
282                        cr, t, lambda[efptRESTRAINT], x, f, vir_force, &dvdl);
283     enerd->dvdl_lin[efptRESTRAINT] += dvdl;
284     wallcycle_stop(wcycle, ewcPULLPOT);
285 }
286
287 static void pme_receive_force_ener(t_commrec      *cr,
288                                    gmx_wallcycle_t wcycle,
289                                    gmx_enerdata_t *enerd,
290                                    t_forcerec     *fr)
291 {
292     real   e_q, e_lj, dvdl_q, dvdl_lj;
293     float  cycles_ppdpme, cycles_seppme;
294
295     cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
296     dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
297
298     /* In case of node-splitting, the PP nodes receive the long-range
299      * forces, virial and energy from the PME nodes here.
300      */
301     wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
302     dvdl_q  = 0;
303     dvdl_lj = 0;
304     gmx_pme_receive_f(cr, as_rvec_array(fr->f_novirsum->data()), fr->vir_el_recip, &e_q,
305                       fr->vir_lj_recip, &e_lj, &dvdl_q, &dvdl_lj,
306                       &cycles_seppme);
307     enerd->term[F_COUL_RECIP] += e_q;
308     enerd->term[F_LJ_RECIP]   += e_lj;
309     enerd->dvdl_lin[efptCOUL] += dvdl_q;
310     enerd->dvdl_lin[efptVDW]  += dvdl_lj;
311
312     if (wcycle)
313     {
314         dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
315     }
316     wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
317 }
318
319 static void print_large_forces(FILE *fp, t_mdatoms *md, t_commrec *cr,
320                                gmx_int64_t step, real forceTolerance,
321                                const rvec *x, const rvec *f)
322 {
323     real           force2Tolerance = gmx::square(forceTolerance);
324     std::uintmax_t numNonFinite    = 0;
325     for (int i = 0; i < md->homenr; i++)
326     {
327         real force2    = norm2(f[i]);
328         bool nonFinite = !std::isfinite(force2);
329         if (force2 >= force2Tolerance || nonFinite)
330         {
331             fprintf(fp, "step %" GMX_PRId64 " atom %6d  x %8.3f %8.3f %8.3f  force %12.5e\n",
332                     step,
333                     ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], std::sqrt(force2));
334         }
335         if (nonFinite)
336         {
337             numNonFinite++;
338         }
339     }
340     if (numNonFinite > 0)
341     {
342         /* Note that with MPI this fatal call on one rank might interrupt
343          * the printing on other ranks. But we can only avoid that with
344          * an expensive MPI barrier that we would need at each step.
345          */
346         gmx_fatal(FARGS, "At step %" GMX_PRId64 " detected non-finite forces on %ju atoms", step, numNonFinite);
347     }
348 }
349
350 static void post_process_forces(t_commrec *cr,
351                                 gmx_int64_t step,
352                                 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
353                                 gmx_localtop_t *top,
354                                 matrix box, rvec x[],
355                                 rvec f[],
356                                 tensor vir_force,
357                                 t_mdatoms *mdatoms,
358                                 t_graph *graph,
359                                 t_forcerec *fr, gmx_vsite_t *vsite,
360                                 int flags)
361 {
362     if (fr->bF_NoVirSum)
363     {
364         if (vsite)
365         {
366             /* Spread the mesh force on virtual sites to the other particles...
367              * This is parallellized. MPI communication is performed
368              * if the constructing atoms aren't local.
369              */
370             wallcycle_start(wcycle, ewcVSITESPREAD);
371             spread_vsite_f(vsite, x, as_rvec_array(fr->f_novirsum->data()), nullptr,
372                            (flags & GMX_FORCE_VIRIAL), fr->vir_el_recip,
373                            nrnb,
374                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
375             wallcycle_stop(wcycle, ewcVSITESPREAD);
376         }
377         if (flags & GMX_FORCE_VIRIAL)
378         {
379             /* Now add the forces, this is local */
380             sum_forces(f, fr->f_novirsum);
381
382             if (EEL_FULL(fr->eeltype))
383             {
384                 /* Add the mesh contribution to the virial */
385                 m_add(vir_force, fr->vir_el_recip, vir_force);
386             }
387             if (EVDW_PME(fr->vdwtype))
388             {
389                 /* Add the mesh contribution to the virial */
390                 m_add(vir_force, fr->vir_lj_recip, vir_force);
391             }
392             if (debug)
393             {
394                 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
395             }
396         }
397     }
398
399     if (fr->print_force >= 0)
400     {
401         print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
402     }
403 }
404
405 static void do_nb_verlet(t_forcerec *fr,
406                          interaction_const_t *ic,
407                          gmx_enerdata_t *enerd,
408                          int flags, int ilocality,
409                          int clearF,
410                          t_nrnb *nrnb,
411                          gmx_wallcycle_t wcycle)
412 {
413     int                        enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
414     nonbonded_verlet_group_t  *nbvg;
415     gmx_bool                   bUsingGpuKernels;
416
417     if (!(flags & GMX_FORCE_NONBONDED))
418     {
419         /* skip non-bonded calculation */
420         return;
421     }
422
423     nbvg = &fr->nbv->grp[ilocality];
424
425     /* GPU kernel launch overhead is already timed separately */
426     if (fr->cutoff_scheme != ecutsVERLET)
427     {
428         gmx_incons("Invalid cut-off scheme passed!");
429     }
430
431     bUsingGpuKernels = (nbvg->kernel_type == nbnxnk8x8x8_GPU);
432
433     if (!bUsingGpuKernels)
434     {
435         wallcycle_sub_start(wcycle, ewcsNONBONDED);
436     }
437     switch (nbvg->kernel_type)
438     {
439         case nbnxnk4x4_PlainC:
440             nbnxn_kernel_ref(&nbvg->nbl_lists,
441                              nbvg->nbat, ic,
442                              fr->shift_vec,
443                              flags,
444                              clearF,
445                              fr->fshift[0],
446                              enerd->grpp.ener[egCOULSR],
447                              fr->bBHAM ?
448                              enerd->grpp.ener[egBHAMSR] :
449                              enerd->grpp.ener[egLJSR]);
450             break;
451
452         case nbnxnk4xN_SIMD_4xN:
453             nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
454                                   nbvg->nbat, ic,
455                                   nbvg->ewald_excl,
456                                   fr->shift_vec,
457                                   flags,
458                                   clearF,
459                                   fr->fshift[0],
460                                   enerd->grpp.ener[egCOULSR],
461                                   fr->bBHAM ?
462                                   enerd->grpp.ener[egBHAMSR] :
463                                   enerd->grpp.ener[egLJSR]);
464             break;
465         case nbnxnk4xN_SIMD_2xNN:
466             nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
467                                    nbvg->nbat, ic,
468                                    nbvg->ewald_excl,
469                                    fr->shift_vec,
470                                    flags,
471                                    clearF,
472                                    fr->fshift[0],
473                                    enerd->grpp.ener[egCOULSR],
474                                    fr->bBHAM ?
475                                    enerd->grpp.ener[egBHAMSR] :
476                                    enerd->grpp.ener[egLJSR]);
477             break;
478
479         case nbnxnk8x8x8_GPU:
480             nbnxn_gpu_launch_kernel(fr->nbv->gpu_nbv, nbvg->nbat, flags, ilocality);
481             break;
482
483         case nbnxnk8x8x8_PlainC:
484             nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
485                                  nbvg->nbat, ic,
486                                  fr->shift_vec,
487                                  flags,
488                                  clearF,
489                                  nbvg->nbat->out[0].f,
490                                  fr->fshift[0],
491                                  enerd->grpp.ener[egCOULSR],
492                                  fr->bBHAM ?
493                                  enerd->grpp.ener[egBHAMSR] :
494                                  enerd->grpp.ener[egLJSR]);
495             break;
496
497         default:
498             gmx_incons("Invalid nonbonded kernel type passed!");
499
500     }
501     if (!bUsingGpuKernels)
502     {
503         wallcycle_sub_stop(wcycle, ewcsNONBONDED);
504     }
505
506     if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
507     {
508         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
509     }
510     else if ((!bUsingGpuKernels && nbvg->ewald_excl == ewaldexclAnalytical) ||
511              (bUsingGpuKernels && nbnxn_gpu_is_kernel_ewald_analytical(fr->nbv->gpu_nbv)))
512     {
513         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
514     }
515     else
516     {
517         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
518     }
519     enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
520     if (flags & GMX_FORCE_ENERGY)
521     {
522         /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
523         enr_nbnxn_kernel_ljc += 1;
524         enr_nbnxn_kernel_lj  += 1;
525     }
526
527     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
528              nbvg->nbl_lists.natpair_ljq);
529     inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
530              nbvg->nbl_lists.natpair_lj);
531     /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
532     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
533              nbvg->nbl_lists.natpair_q);
534
535     if (ic->vdw_modifier == eintmodFORCESWITCH)
536     {
537         /* We add up the switch cost separately */
538         inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_FSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
539                  nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
540     }
541     if (ic->vdw_modifier == eintmodPOTSWITCH)
542     {
543         /* We add up the switch cost separately */
544         inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_PSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
545                  nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
546     }
547     if (ic->vdwtype == evdwPME)
548     {
549         /* We add up the LJ Ewald cost separately */
550         inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_EWALD+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
551                  nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
552     }
553 }
554
555 static void do_nb_verlet_fep(nbnxn_pairlist_set_t *nbl_lists,
556                              t_forcerec           *fr,
557                              rvec                  x[],
558                              rvec                  f[],
559                              t_mdatoms            *mdatoms,
560                              t_lambda             *fepvals,
561                              real                 *lambda,
562                              gmx_enerdata_t       *enerd,
563                              int                   flags,
564                              t_nrnb               *nrnb,
565                              gmx_wallcycle_t       wcycle)
566 {
567     int              donb_flags;
568     nb_kernel_data_t kernel_data;
569     real             lam_i[efptNR];
570     real             dvdl_nb[efptNR];
571     int              th;
572     int              i, j;
573
574     donb_flags = 0;
575     /* Add short-range interactions */
576     donb_flags |= GMX_NONBONDED_DO_SR;
577
578     /* Currently all group scheme kernels always calculate (shift-)forces */
579     if (flags & GMX_FORCE_FORCES)
580     {
581         donb_flags |= GMX_NONBONDED_DO_FORCE;
582     }
583     if (flags & GMX_FORCE_VIRIAL)
584     {
585         donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
586     }
587     if (flags & GMX_FORCE_ENERGY)
588     {
589         donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
590     }
591
592     kernel_data.flags  = donb_flags;
593     kernel_data.lambda = lambda;
594     kernel_data.dvdl   = dvdl_nb;
595
596     kernel_data.energygrp_elec = enerd->grpp.ener[egCOULSR];
597     kernel_data.energygrp_vdw  = enerd->grpp.ener[egLJSR];
598
599     /* reset free energy components */
600     for (i = 0; i < efptNR; i++)
601     {
602         dvdl_nb[i]  = 0;
603     }
604
605     assert(gmx_omp_nthreads_get(emntNonbonded) == nbl_lists->nnbl);
606
607     wallcycle_sub_start(wcycle, ewcsNONBONDED);
608 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
609     for (th = 0; th < nbl_lists->nnbl; th++)
610     {
611         try
612         {
613             gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
614                                       x, f, fr, mdatoms, &kernel_data, nrnb);
615         }
616         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
617     }
618
619     if (fepvals->sc_alpha != 0)
620     {
621         enerd->dvdl_nonlin[efptVDW]  += dvdl_nb[efptVDW];
622         enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
623     }
624     else
625     {
626         enerd->dvdl_lin[efptVDW]  += dvdl_nb[efptVDW];
627         enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
628     }
629
630     /* If we do foreign lambda and we have soft-core interactions
631      * we have to recalculate the (non-linear) energies contributions.
632      */
633     if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
634     {
635         kernel_data.flags          = (donb_flags & ~(GMX_NONBONDED_DO_FORCE | GMX_NONBONDED_DO_SHIFTFORCE)) | GMX_NONBONDED_DO_FOREIGNLAMBDA;
636         kernel_data.lambda         = lam_i;
637         kernel_data.energygrp_elec = enerd->foreign_grpp.ener[egCOULSR];
638         kernel_data.energygrp_vdw  = enerd->foreign_grpp.ener[egLJSR];
639         /* Note that we add to kernel_data.dvdl, but ignore the result */
640
641         for (i = 0; i < enerd->n_lambda; i++)
642         {
643             for (j = 0; j < efptNR; j++)
644             {
645                 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
646             }
647             reset_foreign_enerdata(enerd);
648 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
649             for (th = 0; th < nbl_lists->nnbl; th++)
650             {
651                 try
652                 {
653                     gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
654                                               x, f, fr, mdatoms, &kernel_data, nrnb);
655                 }
656                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
657             }
658
659             sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
660             enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
661         }
662     }
663
664     wallcycle_sub_stop(wcycle, ewcsNONBONDED);
665 }
666
667 gmx_bool use_GPU(const nonbonded_verlet_t *nbv)
668 {
669     return nbv != nullptr && nbv->bUseGPU;
670 }
671
672 static gmx_inline void clear_rvecs_omp(int n, rvec v[])
673 {
674     int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, n);
675
676     /* Note that we would like to avoid this conditional by putting it
677      * into the omp pragma instead, but then we still take the full
678      * omp parallel for overhead (at least with gcc5).
679      */
680     if (nth == 1)
681     {
682         for (int i = 0; i < n; i++)
683         {
684             clear_rvec(v[i]);
685         }
686     }
687     else
688     {
689 #pragma omp parallel for num_threads(nth) schedule(static)
690         for (int i = 0; i < n; i++)
691         {
692             clear_rvec(v[i]);
693         }
694     }
695 }
696
697 /*! \brief  This routine checks if the potential energy is finite.
698  *
699  * Note that passing this check does not guarantee finite forces,
700  * since those use slightly different arithmetics. But in most cases
701  * there is just a narrow coordinate range where forces are not finite
702  * and energies are finite.
703  *
704  * \param[in] enerd  The energy data; the non-bonded group energies need to be added in here before calling this routine
705  */
706 static void checkPotentialEnergyValidity(const gmx_enerdata_t *enerd)
707 {
708     if (!std::isfinite(enerd->term[F_EPOT]))
709     {
710         gmx_fatal(FARGS, "The total potential energy is %g, which is not finite. The LJ and electrostatic contributions to the energy are %g and %g, respectively. A non-finite potential energy can be caused by overlapping interactions in bonded interactions or very large or NaN coordinate values. Usually this is caused by a badly or non-equilibrated initial configuration or incorrect interactions or parameters in the topology.",
711                   enerd->term[F_EPOT],
712                   enerd->term[F_LJ],
713                   enerd->term[F_COUL_SR]);
714     }
715 }
716
717 void do_force_cutsVERLET(FILE *fplog, t_commrec *cr,
718                          t_inputrec *inputrec,
719                          gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
720                          gmx_localtop_t *top,
721                          gmx_groups_t gmx_unused *groups,
722                          matrix box, rvec x[], history_t *hist,
723                          PaddedRVecVector *force,
724                          tensor vir_force,
725                          t_mdatoms *mdatoms,
726                          gmx_enerdata_t *enerd, t_fcdata *fcd,
727                          real *lambda, t_graph *graph,
728                          t_forcerec *fr, interaction_const_t *ic,
729                          gmx_vsite_t *vsite, rvec mu_tot,
730                          double t, gmx_edsam_t ed,
731                          gmx_bool bBornRadii,
732                          int flags)
733 {
734     int                 cg1, i, j;
735     double              mu[2*DIM];
736     gmx_bool            bStateChanged, bNS, bFillGrid, bCalcCGCM;
737     gmx_bool            bDoForces, bUseGPU, bUseOrEmulGPU;
738     gmx_bool            bDiffKernels = FALSE;
739     rvec                vzero, box_diag;
740     float               cycles_pme, cycles_force, cycles_wait_gpu;
741     /* TODO To avoid loss of precision, float can't be used for a
742      * cycle count. Build an object that can do this right and perhaps
743      * also be used by gmx_wallcycle_t */
744     gmx_cycles_t        cycleCountBeforeLocalWorkCompletes = 0;
745     nonbonded_verlet_t *nbv;
746
747     cycles_force    = 0;
748     cycles_wait_gpu = 0;
749     nbv             = fr->nbv;
750
751     const int start  = 0;
752     const int homenr = mdatoms->homenr;
753
754     clear_mat(vir_force);
755
756     if (DOMAINDECOMP(cr))
757     {
758         cg1 = cr->dd->ncg_tot;
759     }
760     else
761     {
762         cg1 = top->cgs.nr;
763     }
764     if (fr->n_tpi > 0)
765     {
766         cg1--;
767     }
768
769     bStateChanged = (flags & GMX_FORCE_STATECHANGED);
770     bNS           = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
771     bFillGrid     = (bNS && bStateChanged);
772     bCalcCGCM     = (bFillGrid && !DOMAINDECOMP(cr));
773     bDoForces     = (flags & GMX_FORCE_FORCES);
774     bUseGPU       = fr->nbv->bUseGPU;
775     bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
776
777     if (bStateChanged)
778     {
779         update_forcerec(fr, box);
780
781         if (inputrecNeedMutot(inputrec))
782         {
783             /* Calculate total (local) dipole moment in a temporary common array.
784              * This makes it possible to sum them over nodes faster.
785              */
786             calc_mu(start, homenr,
787                     x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
788                     mu, mu+DIM);
789         }
790     }
791
792     if (fr->ePBC != epbcNONE)
793     {
794         /* Compute shift vectors every step,
795          * because of pressure coupling or box deformation!
796          */
797         if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
798         {
799             calc_shifts(box, fr->shift_vec);
800         }
801
802         if (bCalcCGCM)
803         {
804             put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
805             inc_nrnb(nrnb, eNR_SHIFTX, homenr);
806         }
807         else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
808         {
809             unshift_self(graph, box, x);
810         }
811     }
812
813     nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
814                                  fr->shift_vec, nbv->grp[0].nbat);
815
816 #if GMX_MPI
817     if (!(cr->duty & DUTY_PME))
818     {
819         gmx_bool bBS;
820         matrix   boxs;
821
822         /* Send particle coordinates to the pme nodes.
823          * Since this is only implemented for domain decomposition
824          * and domain decomposition does not use the graph,
825          * we do not need to worry about shifting.
826          */
827
828         wallcycle_start(wcycle, ewcPP_PMESENDX);
829
830         bBS = (inputrec->nwall == 2);
831         if (bBS)
832         {
833             copy_mat(box, boxs);
834             svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
835         }
836
837         gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
838                                  lambda[efptCOUL], lambda[efptVDW],
839                                  (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
840                                  step);
841
842         wallcycle_stop(wcycle, ewcPP_PMESENDX);
843     }
844 #endif /* GMX_MPI */
845
846     /* do gridding for pair search */
847     if (bNS)
848     {
849         if (graph && bStateChanged)
850         {
851             /* Calculate intramolecular shift vectors to make molecules whole */
852             mk_mshift(fplog, graph, fr->ePBC, box, x);
853         }
854
855         clear_rvec(vzero);
856         box_diag[XX] = box[XX][XX];
857         box_diag[YY] = box[YY][YY];
858         box_diag[ZZ] = box[ZZ][ZZ];
859
860         wallcycle_start(wcycle, ewcNS);
861         if (!fr->bDomDec)
862         {
863             wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
864             nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
865                               0, vzero, box_diag,
866                               0, mdatoms->homenr, -1, fr->cginfo, x,
867                               0, nullptr,
868                               nbv->grp[eintLocal].kernel_type,
869                               nbv->grp[eintLocal].nbat);
870             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
871         }
872         else
873         {
874             wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
875             nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
876                                        fr->cginfo, x,
877                                        nbv->grp[eintNonlocal].kernel_type,
878                                        nbv->grp[eintNonlocal].nbat);
879             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
880         }
881
882         if (nbv->ngrp == 1 ||
883             nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
884         {
885             nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatAll,
886                                nbv->nbs, mdatoms, fr->cginfo);
887         }
888         else
889         {
890             nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatLocal,
891                                nbv->nbs, mdatoms, fr->cginfo);
892             nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat, eatAll,
893                                nbv->nbs, mdatoms, fr->cginfo);
894         }
895         wallcycle_stop(wcycle, ewcNS);
896     }
897
898     /* initialize the GPU atom data and copy shift vector */
899     if (bUseGPU)
900     {
901         if (bNS)
902         {
903             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
904             nbnxn_gpu_init_atomdata(nbv->gpu_nbv, nbv->grp[eintLocal].nbat);
905             wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
906         }
907
908         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
909         nbnxn_gpu_upload_shiftvec(nbv->gpu_nbv, nbv->grp[eintLocal].nbat);
910         wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
911     }
912
913     /* do local pair search */
914     if (bNS)
915     {
916         wallcycle_start_nocount(wcycle, ewcNS);
917         wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
918         nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintLocal].nbat,
919                             &top->excls,
920                             ic->rlist,
921                             nbv->min_ci_balanced,
922                             &nbv->grp[eintLocal].nbl_lists,
923                             eintLocal,
924                             nbv->grp[eintLocal].kernel_type,
925                             nrnb);
926         wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
927
928         if (bUseGPU)
929         {
930             /* initialize local pair-list on the GPU */
931             nbnxn_gpu_init_pairlist(nbv->gpu_nbv,
932                                     nbv->grp[eintLocal].nbl_lists.nbl[0],
933                                     eintLocal);
934         }
935         wallcycle_stop(wcycle, ewcNS);
936     }
937     else
938     {
939         wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
940         wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
941         nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
942                                         nbv->grp[eintLocal].nbat);
943         wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
944         wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
945     }
946
947     if (bUseGPU)
948     {
949         wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
950         /* launch local nonbonded F on GPU */
951         do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
952                      nrnb, wcycle);
953         wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
954     }
955
956     /* Communicate coordinates and sum dipole if necessary +
957        do non-local pair search */
958     if (DOMAINDECOMP(cr))
959     {
960         bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
961                         nbv->grp[eintLocal].kernel_type);
962
963         if (bDiffKernels)
964         {
965             /* With GPU+CPU non-bonded calculations we need to copy
966              * the local coordinates to the non-local nbat struct
967              * (in CPU format) as the non-local kernel call also
968              * calculates the local - non-local interactions.
969              */
970             wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
971             wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
972             nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, TRUE, x,
973                                             nbv->grp[eintNonlocal].nbat);
974             wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
975             wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
976         }
977
978         if (bNS)
979         {
980             wallcycle_start_nocount(wcycle, ewcNS);
981             wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
982
983             if (bDiffKernels)
984             {
985                 nbnxn_grid_add_simple(nbv->nbs, nbv->grp[eintNonlocal].nbat);
986             }
987
988             nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintNonlocal].nbat,
989                                 &top->excls,
990                                 ic->rlist,
991                                 nbv->min_ci_balanced,
992                                 &nbv->grp[eintNonlocal].nbl_lists,
993                                 eintNonlocal,
994                                 nbv->grp[eintNonlocal].kernel_type,
995                                 nrnb);
996
997             wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
998
999             if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_GPU)
1000             {
1001                 /* initialize non-local pair-list on the GPU */
1002                 nbnxn_gpu_init_pairlist(nbv->gpu_nbv,
1003                                         nbv->grp[eintNonlocal].nbl_lists.nbl[0],
1004                                         eintNonlocal);
1005             }
1006             wallcycle_stop(wcycle, ewcNS);
1007         }
1008         else
1009         {
1010             wallcycle_start(wcycle, ewcMOVEX);
1011             dd_move_x(cr->dd, box, x);
1012             wallcycle_stop(wcycle, ewcMOVEX);
1013
1014             wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1015             wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1016             nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
1017                                             nbv->grp[eintNonlocal].nbat);
1018             wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1019             cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1020         }
1021
1022         if (bUseGPU && !bDiffKernels)
1023         {
1024             wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1025             /* launch non-local nonbonded F on GPU */
1026             do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
1027                          nrnb, wcycle);
1028             cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1029         }
1030     }
1031
1032     if (bUseGPU)
1033     {
1034         /* launch D2H copy-back F */
1035         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1036         if (DOMAINDECOMP(cr) && !bDiffKernels)
1037         {
1038             nbnxn_gpu_launch_cpyback(nbv->gpu_nbv, nbv->grp[eintNonlocal].nbat,
1039                                      flags, eatNonlocal);
1040         }
1041         nbnxn_gpu_launch_cpyback(nbv->gpu_nbv, nbv->grp[eintLocal].nbat,
1042                                  flags, eatLocal);
1043         cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1044     }
1045
1046     if (bStateChanged && inputrecNeedMutot(inputrec))
1047     {
1048         if (PAR(cr))
1049         {
1050             gmx_sumd(2*DIM, mu, cr);
1051         }
1052
1053         for (i = 0; i < 2; i++)
1054         {
1055             for (j = 0; j < DIM; j++)
1056             {
1057                 fr->mu_tot[i][j] = mu[i*DIM + j];
1058             }
1059         }
1060     }
1061     if (fr->efep == efepNO)
1062     {
1063         copy_rvec(fr->mu_tot[0], mu_tot);
1064     }
1065     else
1066     {
1067         for (j = 0; j < DIM; j++)
1068         {
1069             mu_tot[j] =
1070                 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1071                 lambda[efptCOUL]*fr->mu_tot[1][j];
1072         }
1073     }
1074
1075     /* Reset energies */
1076     reset_enerdata(enerd);
1077     clear_rvecs(SHIFTS, fr->fshift);
1078
1079     if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1080     {
1081         wallcycle_start(wcycle, ewcPPDURINGPME);
1082         dd_force_flop_start(cr->dd, nrnb);
1083     }
1084
1085     if (inputrec->bRot)
1086     {
1087         /* Enforced rotation has its own cycle counter that starts after the collective
1088          * coordinates have been communicated. It is added to ddCyclF to allow
1089          * for proper load-balancing */
1090         wallcycle_start(wcycle, ewcROT);
1091         do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1092         wallcycle_stop(wcycle, ewcROT);
1093     }
1094
1095     /* Temporary solution until all routines take PaddedRVecVector */
1096     rvec *f = as_rvec_array(force->data());
1097
1098     /* Start the force cycle counter.
1099      * This counter is stopped after do_force_lowlevel.
1100      * No parallel communication should occur while this counter is running,
1101      * since that will interfere with the dynamic load balancing.
1102      */
1103     wallcycle_start(wcycle, ewcFORCE);
1104     if (bDoForces)
1105     {
1106         /* Reset forces for which the virial is calculated separately:
1107          * PME/Ewald forces if necessary */
1108         if (fr->bF_NoVirSum)
1109         {
1110             if (flags & GMX_FORCE_VIRIAL)
1111             {
1112                 fr->f_novirsum = fr->forceBufferNoVirialSummation;
1113             }
1114             else
1115             {
1116                 /* We are not calculating the pressure so we do not need
1117                  * a separate array for forces that do not contribute
1118                  * to the pressure.
1119                  */
1120                 fr->f_novirsum = force;
1121             }
1122         }
1123
1124         if (fr->bF_NoVirSum)
1125         {
1126             if (flags & GMX_FORCE_VIRIAL)
1127             {
1128                 /* TODO: remove this - 1 when padding is properly implemented */
1129                 clear_rvecs_omp(fr->f_novirsum->size() - 1,
1130                                 as_rvec_array(fr->f_novirsum->data()));
1131             }
1132         }
1133         /* Clear the short- and long-range forces */
1134         clear_rvecs_omp(fr->natoms_force_constr, f);
1135
1136         clear_rvec(fr->vir_diag_posres);
1137     }
1138
1139     if (inputrec->bPull && pull_have_constraint(inputrec->pull_work))
1140     {
1141         clear_pull_forces(inputrec->pull_work);
1142     }
1143
1144     /* We calculate the non-bonded forces, when done on the CPU, here.
1145      * We do this before calling do_force_lowlevel, because in that
1146      * function, the listed forces are calculated before PME, which
1147      * does communication.  With this order, non-bonded and listed
1148      * force calculation imbalance can be balanced out by the domain
1149      * decomposition load balancing.
1150      */
1151
1152     if (!bUseOrEmulGPU)
1153     {
1154         /* Maybe we should move this into do_force_lowlevel */
1155         do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
1156                      nrnb, wcycle);
1157     }
1158
1159     if (fr->efep != efepNO)
1160     {
1161         /* Calculate the local and non-local free energy interactions here.
1162          * Happens here on the CPU both with and without GPU.
1163          */
1164         if (fr->nbv->grp[eintLocal].nbl_lists.nbl_fep[0]->nrj > 0)
1165         {
1166             do_nb_verlet_fep(&fr->nbv->grp[eintLocal].nbl_lists,
1167                              fr, x, f, mdatoms,
1168                              inputrec->fepvals, lambda,
1169                              enerd, flags, nrnb, wcycle);
1170         }
1171
1172         if (DOMAINDECOMP(cr) &&
1173             fr->nbv->grp[eintNonlocal].nbl_lists.nbl_fep[0]->nrj > 0)
1174         {
1175             do_nb_verlet_fep(&fr->nbv->grp[eintNonlocal].nbl_lists,
1176                              fr, x, f, mdatoms,
1177                              inputrec->fepvals, lambda,
1178                              enerd, flags, nrnb, wcycle);
1179         }
1180     }
1181
1182     if (!bUseOrEmulGPU || bDiffKernels)
1183     {
1184         int aloc;
1185
1186         if (DOMAINDECOMP(cr))
1187         {
1188             do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
1189                          bDiffKernels ? enbvClearFYes : enbvClearFNo,
1190                          nrnb, wcycle);
1191         }
1192
1193         if (!bUseOrEmulGPU)
1194         {
1195             aloc = eintLocal;
1196         }
1197         else
1198         {
1199             aloc = eintNonlocal;
1200         }
1201
1202         /* Add all the non-bonded force to the normal force array.
1203          * This can be split into a local and a non-local part when overlapping
1204          * communication with calculation with domain decomposition.
1205          */
1206         cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1207         wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1208         wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1209         nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatAll, nbv->grp[aloc].nbat, f);
1210         wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1211         cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1212         wallcycle_start_nocount(wcycle, ewcFORCE);
1213
1214         /* if there are multiple fshift output buffers reduce them */
1215         if ((flags & GMX_FORCE_VIRIAL) &&
1216             nbv->grp[aloc].nbl_lists.nnbl > 1)
1217         {
1218             /* This is not in a subcounter because it takes a
1219                negligible and constant-sized amount of time */
1220             nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
1221                                                      fr->fshift);
1222         }
1223     }
1224
1225     /* update QMMMrec, if necessary */
1226     if (fr->bQMMM)
1227     {
1228         update_QMMMrec(cr, fr, x, mdatoms, box, top);
1229     }
1230
1231     /* Compute the bonded and non-bonded energies and optionally forces */
1232     do_force_lowlevel(fr, inputrec, &(top->idef),
1233                       cr, nrnb, wcycle, mdatoms,
1234                       x, hist, f, enerd, fcd, top, fr->born,
1235                       bBornRadii, box,
1236                       inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1237                       flags, &cycles_pme);
1238
1239     cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1240
1241     if (ed)
1242     {
1243         do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1244     }
1245
1246     if (bUseOrEmulGPU && !bDiffKernels)
1247     {
1248         /* wait for non-local forces (or calculate in emulation mode) */
1249         if (DOMAINDECOMP(cr))
1250         {
1251             if (bUseGPU)
1252             {
1253                 float cycles_tmp;
1254
1255                 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1256                 nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1257                                        flags, eatNonlocal,
1258                                        enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1259                                        fr->fshift);
1260                 cycles_tmp       = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1261                 cycles_wait_gpu += cycles_tmp;
1262                 cycles_force    += cycles_tmp;
1263             }
1264             else
1265             {
1266                 wallcycle_start_nocount(wcycle, ewcFORCE);
1267                 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
1268                              nrnb, wcycle);
1269                 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1270             }
1271             wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1272             wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1273             /* skip the reduction if there was no non-local work to do */
1274             if (nbv->grp[eintNonlocal].nbl_lists.nbl[0]->nsci > 0)
1275             {
1276                 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatNonlocal,
1277                                                nbv->grp[eintNonlocal].nbat, f);
1278             }
1279             wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1280             cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1281         }
1282     }
1283
1284     if (bDoForces && DOMAINDECOMP(cr))
1285     {
1286         if (bUseGPU)
1287         {
1288             /* We are done with the CPU compute, but the GPU local non-bonded
1289              * kernel can still be running while we communicate the forces.
1290              * We start a counter here, so we can, hopefully, time the rest
1291              * of the GPU kernel execution and data transfer.
1292              */
1293             cycleCountBeforeLocalWorkCompletes = gmx_cycles_read();
1294         }
1295
1296         /* Communicate the forces */
1297         wallcycle_start(wcycle, ewcMOVEF);
1298         dd_move_f(cr->dd, f, fr->fshift);
1299         wallcycle_stop(wcycle, ewcMOVEF);
1300     }
1301
1302     if (bUseOrEmulGPU)
1303     {
1304         /* wait for local forces (or calculate in emulation mode) */
1305         if (bUseGPU)
1306         {
1307             float       cycles_tmp, cycles_wait_est;
1308             /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1309              * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1310              * but even with a step of 0.1 ms the difference is less than 1%
1311              * of the step time.
1312              */
1313             const float gpuWaitApiOverheadMargin = 2e6f; /* cycles */
1314
1315             wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1316             nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1317                                    flags, eatLocal,
1318                                    enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1319                                    fr->fshift);
1320             cycles_tmp      = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1321
1322             if (bDoForces && DOMAINDECOMP(cr))
1323             {
1324                 cycles_wait_est = gmx_cycles_read() - cycleCountBeforeLocalWorkCompletes;
1325
1326                 if (cycles_tmp < gpuWaitApiOverheadMargin)
1327                 {
1328                     /* We measured few cycles, it could be that the kernel
1329                      * and transfer finished earlier and there was no actual
1330                      * wait time, only API call overhead.
1331                      * Then the actual time could be anywhere between 0 and
1332                      * cycles_wait_est. As a compromise, we use half the time.
1333                      */
1334                     cycles_wait_est *= 0.5f;
1335                 }
1336             }
1337             else
1338             {
1339                 /* No force communication so we actually timed the wait */
1340                 cycles_wait_est = cycles_tmp;
1341             }
1342             /* Even though this is after dd_move_f, the actual task we are
1343              * waiting for runs asynchronously with dd_move_f and we usually
1344              * have nothing to balance it with, so we can and should add
1345              * the time to the force time for load balancing.
1346              */
1347             cycles_force    += cycles_wait_est;
1348             cycles_wait_gpu += cycles_wait_est;
1349
1350             /* now clear the GPU outputs while we finish the step on the CPU */
1351             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1352             nbnxn_gpu_clear_outputs(nbv->gpu_nbv, flags);
1353             wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1354         }
1355         else
1356         {
1357             wallcycle_start_nocount(wcycle, ewcFORCE);
1358             do_nb_verlet(fr, ic, enerd, flags, eintLocal,
1359                          DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1360                          nrnb, wcycle);
1361             wallcycle_stop(wcycle, ewcFORCE);
1362         }
1363         wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1364         wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1365         nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatLocal,
1366                                        nbv->grp[eintLocal].nbat, f);
1367         wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1368         wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1369     }
1370
1371     if (DOMAINDECOMP(cr))
1372     {
1373         dd_force_flop_stop(cr->dd, nrnb);
1374         if (wcycle)
1375         {
1376             dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1377             if (bUseGPU)
1378             {
1379                 dd_cycles_add(cr->dd, cycles_wait_gpu, ddCyclWaitGPU);
1380             }
1381         }
1382     }
1383
1384     if (bDoForces)
1385     {
1386         /* Compute forces due to electric field */
1387         if (fr->efield != nullptr)
1388         {
1389             fr->efield->calculateForces(cr, mdatoms, fr->f_novirsum, t);
1390         }
1391
1392         /* If we have NoVirSum forces, but we do not calculate the virial,
1393          * we sum fr->f_novirsum=f later.
1394          */
1395         if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1396         {
1397             wallcycle_start(wcycle, ewcVSITESPREAD);
1398             spread_vsite_f(vsite, x, f, fr->fshift, FALSE, nullptr, nrnb,
1399                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1400             wallcycle_stop(wcycle, ewcVSITESPREAD);
1401         }
1402
1403         if (flags & GMX_FORCE_VIRIAL)
1404         {
1405             /* Calculation of the virial must be done after vsites! */
1406             calc_virial(0, mdatoms->homenr, x, f,
1407                         vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1408         }
1409     }
1410
1411     if (inputrec->bPull && pull_have_potential(inputrec->pull_work))
1412     {
1413         /* Since the COM pulling is always done mass-weighted, no forces are
1414          * applied to vsites and this call can be done after vsite spreading.
1415          */
1416         pull_potential_wrapper(cr, inputrec, box, x,
1417                                f, vir_force, mdatoms, enerd, lambda, t,
1418                                wcycle);
1419     }
1420
1421     /* Add the forces from enforced rotation potentials (if any) */
1422     if (inputrec->bRot)
1423     {
1424         wallcycle_start(wcycle, ewcROTadd);
1425         enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1426         wallcycle_stop(wcycle, ewcROTadd);
1427     }
1428
1429     /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1430     IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1431
1432     if (PAR(cr) && !(cr->duty & DUTY_PME))
1433     {
1434         /* In case of node-splitting, the PP nodes receive the long-range
1435          * forces, virial and energy from the PME nodes here.
1436          */
1437         pme_receive_force_ener(cr, wcycle, enerd, fr);
1438     }
1439
1440     if (bDoForces)
1441     {
1442         post_process_forces(cr, step, nrnb, wcycle,
1443                             top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1444                             flags);
1445     }
1446
1447     if (flags & GMX_FORCE_ENERGY)
1448     {
1449         /* Sum the potential energy terms from group contributions */
1450         sum_epot(&(enerd->grpp), enerd->term);
1451
1452         if (!EI_TPI(inputrec->eI))
1453         {
1454             checkPotentialEnergyValidity(enerd);
1455         }
1456     }
1457 }
1458
1459 void do_force_cutsGROUP(FILE *fplog, t_commrec *cr,
1460                         t_inputrec *inputrec,
1461                         gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1462                         gmx_localtop_t *top,
1463                         gmx_groups_t *groups,
1464                         matrix box, rvec x[], history_t *hist,
1465                         PaddedRVecVector *force,
1466                         tensor vir_force,
1467                         t_mdatoms *mdatoms,
1468                         gmx_enerdata_t *enerd, t_fcdata *fcd,
1469                         real *lambda, t_graph *graph,
1470                         t_forcerec *fr, gmx_vsite_t *vsite, rvec mu_tot,
1471                         double t, gmx_edsam_t ed,
1472                         gmx_bool bBornRadii,
1473                         int flags)
1474 {
1475     int        cg0, cg1, i, j;
1476     double     mu[2*DIM];
1477     gmx_bool   bStateChanged, bNS, bFillGrid, bCalcCGCM;
1478     gmx_bool   bDoForces;
1479     float      cycles_pme, cycles_force;
1480
1481     const int  start  = 0;
1482     const int  homenr = mdatoms->homenr;
1483
1484     clear_mat(vir_force);
1485
1486     cg0 = 0;
1487     if (DOMAINDECOMP(cr))
1488     {
1489         cg1 = cr->dd->ncg_tot;
1490     }
1491     else
1492     {
1493         cg1 = top->cgs.nr;
1494     }
1495     if (fr->n_tpi > 0)
1496     {
1497         cg1--;
1498     }
1499
1500     bStateChanged  = (flags & GMX_FORCE_STATECHANGED);
1501     bNS            = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
1502     /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1503     bFillGrid      = (bNS && bStateChanged);
1504     bCalcCGCM      = (bFillGrid && !DOMAINDECOMP(cr));
1505     bDoForces      = (flags & GMX_FORCE_FORCES);
1506
1507     if (bStateChanged)
1508     {
1509         update_forcerec(fr, box);
1510
1511         if (inputrecNeedMutot(inputrec))
1512         {
1513             /* Calculate total (local) dipole moment in a temporary common array.
1514              * This makes it possible to sum them over nodes faster.
1515              */
1516             calc_mu(start, homenr,
1517                     x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1518                     mu, mu+DIM);
1519         }
1520     }
1521
1522     if (fr->ePBC != epbcNONE)
1523     {
1524         /* Compute shift vectors every step,
1525          * because of pressure coupling or box deformation!
1526          */
1527         if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1528         {
1529             calc_shifts(box, fr->shift_vec);
1530         }
1531
1532         if (bCalcCGCM)
1533         {
1534             put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1535                                      &(top->cgs), x, fr->cg_cm);
1536             inc_nrnb(nrnb, eNR_CGCM, homenr);
1537             inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1538         }
1539         else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1540         {
1541             unshift_self(graph, box, x);
1542         }
1543     }
1544     else if (bCalcCGCM)
1545     {
1546         calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
1547         inc_nrnb(nrnb, eNR_CGCM, homenr);
1548     }
1549
1550     if (bCalcCGCM && gmx_debug_at)
1551     {
1552         pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1553     }
1554
1555 #if GMX_MPI
1556     if (!(cr->duty & DUTY_PME))
1557     {
1558         gmx_bool bBS;
1559         matrix   boxs;
1560
1561         /* Send particle coordinates to the pme nodes.
1562          * Since this is only implemented for domain decomposition
1563          * and domain decomposition does not use the graph,
1564          * we do not need to worry about shifting.
1565          */
1566
1567         wallcycle_start(wcycle, ewcPP_PMESENDX);
1568
1569         bBS = (inputrec->nwall == 2);
1570         if (bBS)
1571         {
1572             copy_mat(box, boxs);
1573             svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
1574         }
1575
1576         gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
1577                                  lambda[efptCOUL], lambda[efptVDW],
1578                                  (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
1579                                  step);
1580
1581         wallcycle_stop(wcycle, ewcPP_PMESENDX);
1582     }
1583 #endif /* GMX_MPI */
1584
1585     /* Communicate coordinates and sum dipole if necessary */
1586     if (DOMAINDECOMP(cr))
1587     {
1588         wallcycle_start(wcycle, ewcMOVEX);
1589         dd_move_x(cr->dd, box, x);
1590         wallcycle_stop(wcycle, ewcMOVEX);
1591     }
1592
1593     if (inputrecNeedMutot(inputrec))
1594     {
1595         if (bStateChanged)
1596         {
1597             if (PAR(cr))
1598             {
1599                 gmx_sumd(2*DIM, mu, cr);
1600             }
1601             for (i = 0; i < 2; i++)
1602             {
1603                 for (j = 0; j < DIM; j++)
1604                 {
1605                     fr->mu_tot[i][j] = mu[i*DIM + j];
1606                 }
1607             }
1608         }
1609         if (fr->efep == efepNO)
1610         {
1611             copy_rvec(fr->mu_tot[0], mu_tot);
1612         }
1613         else
1614         {
1615             for (j = 0; j < DIM; j++)
1616             {
1617                 mu_tot[j] =
1618                     (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1619             }
1620         }
1621     }
1622
1623     /* Reset energies */
1624     reset_enerdata(enerd);
1625     clear_rvecs(SHIFTS, fr->fshift);
1626
1627     if (bNS)
1628     {
1629         wallcycle_start(wcycle, ewcNS);
1630
1631         if (graph && bStateChanged)
1632         {
1633             /* Calculate intramolecular shift vectors to make molecules whole */
1634             mk_mshift(fplog, graph, fr->ePBC, box, x);
1635         }
1636
1637         /* Do the actual neighbour searching */
1638         ns(fplog, fr, box,
1639            groups, top, mdatoms,
1640            cr, nrnb, bFillGrid);
1641
1642         wallcycle_stop(wcycle, ewcNS);
1643     }
1644
1645     if (inputrec->implicit_solvent && bNS)
1646     {
1647         make_gb_nblist(cr, inputrec->gb_algorithm,
1648                        x, box, fr, &top->idef, graph, fr->born);
1649     }
1650
1651     if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1652     {
1653         wallcycle_start(wcycle, ewcPPDURINGPME);
1654         dd_force_flop_start(cr->dd, nrnb);
1655     }
1656
1657     if (inputrec->bRot)
1658     {
1659         /* Enforced rotation has its own cycle counter that starts after the collective
1660          * coordinates have been communicated. It is added to ddCyclF to allow
1661          * for proper load-balancing */
1662         wallcycle_start(wcycle, ewcROT);
1663         do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1664         wallcycle_stop(wcycle, ewcROT);
1665     }
1666
1667     /* Temporary solution until all routines take PaddedRVecVector */
1668     rvec *f = as_rvec_array(force->data());
1669
1670     /* Start the force cycle counter.
1671      * This counter is stopped after do_force_lowlevel.
1672      * No parallel communication should occur while this counter is running,
1673      * since that will interfere with the dynamic load balancing.
1674      */
1675     wallcycle_start(wcycle, ewcFORCE);
1676
1677     if (bDoForces)
1678     {
1679         /* Reset forces for which the virial is calculated separately:
1680          * PME/Ewald forces if necessary */
1681         if (fr->bF_NoVirSum)
1682         {
1683             if (flags & GMX_FORCE_VIRIAL)
1684             {
1685                 fr->f_novirsum = fr->forceBufferNoVirialSummation;
1686                 /* TODO: remove this - 1 when padding is properly implemented */
1687                 clear_rvecs(fr->f_novirsum->size() - 1,
1688                             as_rvec_array(fr->f_novirsum->data()));
1689             }
1690             else
1691             {
1692                 /* We are not calculating the pressure so we do not need
1693                  * a separate array for forces that do not contribute
1694                  * to the pressure.
1695                  */
1696                 fr->f_novirsum = force;
1697             }
1698         }
1699
1700         /* Clear the short- and long-range forces */
1701         clear_rvecs(fr->natoms_force_constr, f);
1702
1703         clear_rvec(fr->vir_diag_posres);
1704     }
1705     if (inputrec->bPull && pull_have_constraint(inputrec->pull_work))
1706     {
1707         clear_pull_forces(inputrec->pull_work);
1708     }
1709
1710     /* update QMMMrec, if necessary */
1711     if (fr->bQMMM)
1712     {
1713         update_QMMMrec(cr, fr, x, mdatoms, box, top);
1714     }
1715
1716     /* Compute the bonded and non-bonded energies and optionally forces */
1717     do_force_lowlevel(fr, inputrec, &(top->idef),
1718                       cr, nrnb, wcycle, mdatoms,
1719                       x, hist, f, enerd, fcd, top, fr->born,
1720                       bBornRadii, box,
1721                       inputrec->fepvals, lambda,
1722                       graph, &(top->excls), fr->mu_tot,
1723                       flags,
1724                       &cycles_pme);
1725
1726     cycles_force = wallcycle_stop(wcycle, ewcFORCE);
1727
1728     if (ed)
1729     {
1730         do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1731     }
1732
1733     if (DOMAINDECOMP(cr))
1734     {
1735         dd_force_flop_stop(cr->dd, nrnb);
1736         if (wcycle)
1737         {
1738             dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1739         }
1740     }
1741
1742     if (bDoForces)
1743     {
1744         /* Compute forces due to electric field */
1745         if (fr->efield != nullptr)
1746         {
1747             fr->efield->calculateForces(cr, mdatoms, fr->f_novirsum, t);
1748         }
1749
1750         /* Communicate the forces */
1751         if (DOMAINDECOMP(cr))
1752         {
1753             wallcycle_start(wcycle, ewcMOVEF);
1754             dd_move_f(cr->dd, f, fr->fshift);
1755             /* Do we need to communicate the separate force array
1756              * for terms that do not contribute to the single sum virial?
1757              * Position restraints and electric fields do not introduce
1758              * inter-cg forces, only full electrostatics methods do.
1759              * When we do not calculate the virial, fr->f_novirsum = f,
1760              * so we have already communicated these forces.
1761              */
1762             if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1763                 (flags & GMX_FORCE_VIRIAL))
1764             {
1765                 dd_move_f(cr->dd, as_rvec_array(fr->f_novirsum->data()), nullptr);
1766             }
1767             wallcycle_stop(wcycle, ewcMOVEF);
1768         }
1769
1770         /* If we have NoVirSum forces, but we do not calculate the virial,
1771          * we sum fr->f_novirsum=f later.
1772          */
1773         if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1774         {
1775             wallcycle_start(wcycle, ewcVSITESPREAD);
1776             spread_vsite_f(vsite, x, f, fr->fshift, FALSE, nullptr, nrnb,
1777                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1778             wallcycle_stop(wcycle, ewcVSITESPREAD);
1779         }
1780
1781         if (flags & GMX_FORCE_VIRIAL)
1782         {
1783             /* Calculation of the virial must be done after vsites! */
1784             calc_virial(0, mdatoms->homenr, x, f,
1785                         vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1786         }
1787     }
1788
1789     if (inputrec->bPull && pull_have_potential(inputrec->pull_work))
1790     {
1791         pull_potential_wrapper(cr, inputrec, box, x,
1792                                f, vir_force, mdatoms, enerd, lambda, t,
1793                                wcycle);
1794     }
1795
1796     /* Add the forces from enforced rotation potentials (if any) */
1797     if (inputrec->bRot)
1798     {
1799         wallcycle_start(wcycle, ewcROTadd);
1800         enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1801         wallcycle_stop(wcycle, ewcROTadd);
1802     }
1803
1804     /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1805     IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1806
1807     if (PAR(cr) && !(cr->duty & DUTY_PME))
1808     {
1809         /* In case of node-splitting, the PP nodes receive the long-range
1810          * forces, virial and energy from the PME nodes here.
1811          */
1812         pme_receive_force_ener(cr, wcycle, enerd, fr);
1813     }
1814
1815     if (bDoForces)
1816     {
1817         post_process_forces(cr, step, nrnb, wcycle,
1818                             top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1819                             flags);
1820     }
1821
1822     if (flags & GMX_FORCE_ENERGY)
1823     {
1824         /* Sum the potential energy terms from group contributions */
1825         sum_epot(&(enerd->grpp), enerd->term);
1826
1827         if (!EI_TPI(inputrec->eI))
1828         {
1829             checkPotentialEnergyValidity(enerd);
1830         }
1831     }
1832
1833 }
1834
1835 void do_force(FILE *fplog, t_commrec *cr,
1836               t_inputrec *inputrec,
1837               gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1838               gmx_localtop_t *top,
1839               gmx_groups_t *groups,
1840               matrix box, PaddedRVecVector *coordinates, history_t *hist,
1841               PaddedRVecVector *force,
1842               tensor vir_force,
1843               t_mdatoms *mdatoms,
1844               gmx_enerdata_t *enerd, t_fcdata *fcd,
1845               std::vector<real> *lambda, t_graph *graph,
1846               t_forcerec *fr,
1847               gmx_vsite_t *vsite, rvec mu_tot,
1848               double t, gmx_edsam_t ed,
1849               gmx_bool bBornRadii,
1850               int flags)
1851 {
1852     /* modify force flag if not doing nonbonded */
1853     if (!fr->bNonbonded)
1854     {
1855         flags &= ~GMX_FORCE_NONBONDED;
1856     }
1857
1858     GMX_ASSERT(coordinates->size() >= static_cast<unsigned int>(fr->natoms_force + 1), "We might need 1 element extra for SIMD");
1859     GMX_ASSERT(force->size() >= static_cast<unsigned int>(fr->natoms_force + 1), "We might need 1 element extra for SIMD");
1860
1861     rvec *x = as_rvec_array(coordinates->data());
1862
1863     switch (inputrec->cutoff_scheme)
1864     {
1865         case ecutsVERLET:
1866             do_force_cutsVERLET(fplog, cr, inputrec,
1867                                 step, nrnb, wcycle,
1868                                 top,
1869                                 groups,
1870                                 box, x, hist,
1871                                 force, vir_force,
1872                                 mdatoms,
1873                                 enerd, fcd,
1874                                 lambda->data(), graph,
1875                                 fr, fr->ic,
1876                                 vsite, mu_tot,
1877                                 t, ed,
1878                                 bBornRadii,
1879                                 flags);
1880             break;
1881         case ecutsGROUP:
1882             do_force_cutsGROUP(fplog, cr, inputrec,
1883                                step, nrnb, wcycle,
1884                                top,
1885                                groups,
1886                                box, x, hist,
1887                                force, vir_force,
1888                                mdatoms,
1889                                enerd, fcd,
1890                                lambda->data(), graph,
1891                                fr, vsite, mu_tot,
1892                                t, ed,
1893                                bBornRadii,
1894                                flags);
1895             break;
1896         default:
1897             gmx_incons("Invalid cut-off scheme passed!");
1898     }
1899 }
1900
1901
1902 void do_constrain_first(FILE *fplog, gmx_constr_t constr,
1903                         t_inputrec *ir, t_mdatoms *md,
1904                         t_state *state, t_commrec *cr, t_nrnb *nrnb,
1905                         t_forcerec *fr, gmx_localtop_t *top)
1906 {
1907     int             i, m, start, end;
1908     gmx_int64_t     step;
1909     real            dt = ir->delta_t;
1910     real            dvdl_dum;
1911     rvec           *savex;
1912
1913     /* We need to allocate one element extra, since we might use
1914      * (unaligned) 4-wide SIMD loads to access rvec entries.
1915      */
1916     snew(savex, state->natoms + 1);
1917
1918     start = 0;
1919     end   = md->homenr;
1920
1921     if (debug)
1922     {
1923         fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
1924                 start, md->homenr, end);
1925     }
1926     /* Do a first constrain to reset particles... */
1927     step = ir->init_step;
1928     if (fplog)
1929     {
1930         char buf[STEPSTRSIZE];
1931         fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
1932                 gmx_step_str(step, buf));
1933     }
1934     dvdl_dum = 0;
1935
1936     /* constrain the current position */
1937     constrain(nullptr, TRUE, FALSE, constr, &(top->idef),
1938               ir, cr, step, 0, 1.0, md,
1939               as_rvec_array(state->x.data()), as_rvec_array(state->x.data()), nullptr,
1940               fr->bMolPBC, state->box,
1941               state->lambda[efptBONDED], &dvdl_dum,
1942               nullptr, nullptr, nrnb, econqCoord);
1943     if (EI_VV(ir->eI))
1944     {
1945         /* constrain the inital velocity, and save it */
1946         /* also may be useful if we need the ekin from the halfstep for velocity verlet */
1947         constrain(nullptr, TRUE, FALSE, constr, &(top->idef),
1948                   ir, cr, step, 0, 1.0, md,
1949                   as_rvec_array(state->x.data()), as_rvec_array(state->v.data()), as_rvec_array(state->v.data()),
1950                   fr->bMolPBC, state->box,
1951                   state->lambda[efptBONDED], &dvdl_dum,
1952                   nullptr, nullptr, nrnb, econqVeloc);
1953     }
1954     /* constrain the inital velocities at t-dt/2 */
1955     if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
1956     {
1957         for (i = start; (i < end); i++)
1958         {
1959             for (m = 0; (m < DIM); m++)
1960             {
1961                 /* Reverse the velocity */
1962                 state->v[i][m] = -state->v[i][m];
1963                 /* Store the position at t-dt in buf */
1964                 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
1965             }
1966         }
1967         /* Shake the positions at t=-dt with the positions at t=0
1968          * as reference coordinates.
1969          */
1970         if (fplog)
1971         {
1972             char buf[STEPSTRSIZE];
1973             fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
1974                     gmx_step_str(step, buf));
1975         }
1976         dvdl_dum = 0;
1977         constrain(nullptr, TRUE, FALSE, constr, &(top->idef),
1978                   ir, cr, step, -1, 1.0, md,
1979                   as_rvec_array(state->x.data()), savex, nullptr,
1980                   fr->bMolPBC, state->box,
1981                   state->lambda[efptBONDED], &dvdl_dum,
1982                   as_rvec_array(state->v.data()), nullptr, nrnb, econqCoord);
1983
1984         for (i = start; i < end; i++)
1985         {
1986             for (m = 0; m < DIM; m++)
1987             {
1988                 /* Re-reverse the velocities */
1989                 state->v[i][m] = -state->v[i][m];
1990             }
1991         }
1992     }
1993     sfree(savex);
1994 }
1995
1996
1997 static void
1998 integrate_table(real vdwtab[], real scale, int offstart, int rstart, int rend,
1999                 double *enerout, double *virout)
2000 {
2001     double enersum, virsum;
2002     double invscale, invscale2, invscale3;
2003     double r, ea, eb, ec, pa, pb, pc, pd;
2004     double y0, f, g, h;
2005     int    ri, offset;
2006     double tabfactor;
2007
2008     invscale  = 1.0/scale;
2009     invscale2 = invscale*invscale;
2010     invscale3 = invscale*invscale2;
2011
2012     /* Following summation derived from cubic spline definition,
2013      * Numerical Recipies in C, second edition, p. 113-116.  Exact for
2014      * the cubic spline.  We first calculate the negative of the
2015      * energy from rvdw to rvdw_switch, assuming that g(r)=1, and then
2016      * add the more standard, abrupt cutoff correction to that result,
2017      * yielding the long-range correction for a switched function.  We
2018      * perform both the pressure and energy loops at the same time for
2019      * simplicity, as the computational cost is low. */
2020
2021     if (offstart == 0)
2022     {
2023         /* Since the dispersion table has been scaled down a factor
2024          * 6.0 and the repulsion a factor 12.0 to compensate for the
2025          * c6/c12 parameters inside nbfp[] being scaled up (to save
2026          * flops in kernels), we need to correct for this.
2027          */
2028         tabfactor = 6.0;
2029     }
2030     else
2031     {
2032         tabfactor = 12.0;
2033     }
2034
2035     enersum = 0.0;
2036     virsum  = 0.0;
2037     for (ri = rstart; ri < rend; ++ri)
2038     {
2039         r  = ri*invscale;
2040         ea = invscale3;
2041         eb = 2.0*invscale2*r;
2042         ec = invscale*r*r;
2043
2044         pa = invscale3;
2045         pb = 3.0*invscale2*r;
2046         pc = 3.0*invscale*r*r;
2047         pd = r*r*r;
2048
2049         /* this "8" is from the packing in the vdwtab array - perhaps
2050            should be defined? */
2051
2052         offset = 8*ri + offstart;
2053         y0     = vdwtab[offset];
2054         f      = vdwtab[offset+1];
2055         g      = vdwtab[offset+2];
2056         h      = vdwtab[offset+3];
2057
2058         enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
2059         virsum  +=  f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
2060     }
2061     *enerout = 4.0*M_PI*enersum*tabfactor;
2062     *virout  = 4.0*M_PI*virsum*tabfactor;
2063 }
2064
2065 void calc_enervirdiff(FILE *fplog, int eDispCorr, t_forcerec *fr)
2066 {
2067     double   eners[2], virs[2], enersum, virsum;
2068     double   r0, rc3, rc9;
2069     int      ri0, ri1, i;
2070     real     scale, *vdwtab;
2071
2072     fr->enershiftsix    = 0;
2073     fr->enershifttwelve = 0;
2074     fr->enerdiffsix     = 0;
2075     fr->enerdifftwelve  = 0;
2076     fr->virdiffsix      = 0;
2077     fr->virdifftwelve   = 0;
2078
2079     if (eDispCorr != edispcNO)
2080     {
2081         for (i = 0; i < 2; i++)
2082         {
2083             eners[i] = 0;
2084             virs[i]  = 0;
2085         }
2086         if ((fr->vdw_modifier == eintmodPOTSHIFT) ||
2087             (fr->vdw_modifier == eintmodPOTSWITCH) ||
2088             (fr->vdw_modifier == eintmodFORCESWITCH) ||
2089             (fr->vdwtype == evdwSHIFT) ||
2090             (fr->vdwtype == evdwSWITCH))
2091         {
2092             if (((fr->vdw_modifier == eintmodPOTSWITCH) ||
2093                  (fr->vdw_modifier == eintmodFORCESWITCH) ||
2094                  (fr->vdwtype == evdwSWITCH)) && fr->rvdw_switch == 0)
2095             {
2096                 gmx_fatal(FARGS,
2097                           "With dispersion correction rvdw-switch can not be zero "
2098                           "for vdw-type = %s", evdw_names[fr->vdwtype]);
2099             }
2100
2101             /* TODO This code depends on the logic in tables.c that
2102                constructs the table layout, which should be made
2103                explicit in future cleanup. */
2104             GMX_ASSERT(fr->dispersionCorrectionTable->interaction == GMX_TABLE_INTERACTION_VDWREP_VDWDISP,
2105                        "Dispersion-correction code needs a table with both repulsion and dispersion terms");
2106             scale  = fr->dispersionCorrectionTable->scale;
2107             vdwtab = fr->dispersionCorrectionTable->data;
2108
2109             /* Round the cut-offs to exact table values for precision */
2110             ri0  = static_cast<int>(floor(fr->rvdw_switch*scale));
2111             ri1  = static_cast<int>(ceil(fr->rvdw*scale));
2112
2113             /* The code below has some support for handling force-switching, i.e.
2114              * when the force (instead of potential) is switched over a limited
2115              * region. This leads to a constant shift in the potential inside the
2116              * switching region, which we can handle by adding a constant energy
2117              * term in the force-switch case just like when we do potential-shift.
2118              *
2119              * For now this is not enabled, but to keep the functionality in the
2120              * code we check separately for switch and shift. When we do force-switch
2121              * the shifting point is rvdw_switch, while it is the cutoff when we
2122              * have a classical potential-shift.
2123              *
2124              * For a pure potential-shift the potential has a constant shift
2125              * all the way out to the cutoff, and that is it. For other forms
2126              * we need to calculate the constant shift up to the point where we
2127              * start modifying the potential.
2128              */
2129             ri0  = (fr->vdw_modifier == eintmodPOTSHIFT) ? ri1 : ri0;
2130
2131             r0   = ri0/scale;
2132             rc3  = r0*r0*r0;
2133             rc9  = rc3*rc3*rc3;
2134
2135             if ((fr->vdw_modifier == eintmodFORCESWITCH) ||
2136                 (fr->vdwtype == evdwSHIFT))
2137             {
2138                 /* Determine the constant energy shift below rvdw_switch.
2139                  * Table has a scale factor since we have scaled it down to compensate
2140                  * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2141                  */
2142                 fr->enershiftsix    = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
2143                 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
2144             }
2145             else if (fr->vdw_modifier == eintmodPOTSHIFT)
2146             {
2147                 fr->enershiftsix    = (real)(-1.0/(rc3*rc3));
2148                 fr->enershifttwelve = (real)( 1.0/(rc9*rc3));
2149             }
2150
2151             /* Add the constant part from 0 to rvdw_switch.
2152              * This integration from 0 to rvdw_switch overcounts the number
2153              * of interactions by 1, as it also counts the self interaction.
2154              * We will correct for this later.
2155              */
2156             eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
2157             eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
2158
2159             /* Calculate the contribution in the range [r0,r1] where we
2160              * modify the potential. For a pure potential-shift modifier we will
2161              * have ri0==ri1, and there will not be any contribution here.
2162              */
2163             for (i = 0; i < 2; i++)
2164             {
2165                 enersum = 0;
2166                 virsum  = 0;
2167                 integrate_table(vdwtab, scale, (i == 0 ? 0 : 4), ri0, ri1, &enersum, &virsum);
2168                 eners[i] -= enersum;
2169                 virs[i]  -= virsum;
2170             }
2171
2172             /* Alright: Above we compensated by REMOVING the parts outside r0
2173              * corresponding to the ideal VdW 1/r6 and /r12 potentials.
2174              *
2175              * Regardless of whether r0 is the point where we start switching,
2176              * or the cutoff where we calculated the constant shift, we include
2177              * all the parts we are missing out to infinity from r0 by
2178              * calculating the analytical dispersion correction.
2179              */
2180             eners[0] += -4.0*M_PI/(3.0*rc3);
2181             eners[1] +=  4.0*M_PI/(9.0*rc9);
2182             virs[0]  +=  8.0*M_PI/rc3;
2183             virs[1]  += -16.0*M_PI/(3.0*rc9);
2184         }
2185         else if (fr->vdwtype == evdwCUT ||
2186                  EVDW_PME(fr->vdwtype) ||
2187                  fr->vdwtype == evdwUSER)
2188         {
2189             if (fr->vdwtype == evdwUSER && fplog)
2190             {
2191                 fprintf(fplog,
2192                         "WARNING: using dispersion correction with user tables\n");
2193             }
2194
2195             /* Note that with LJ-PME, the dispersion correction is multiplied
2196              * by the difference between the actual C6 and the value of C6
2197              * that would produce the combination rule.
2198              * This means the normal energy and virial difference formulas
2199              * can be used here.
2200              */
2201
2202             rc3  = fr->rvdw*fr->rvdw*fr->rvdw;
2203             rc9  = rc3*rc3*rc3;
2204             /* Contribution beyond the cut-off */
2205             eners[0] += -4.0*M_PI/(3.0*rc3);
2206             eners[1] +=  4.0*M_PI/(9.0*rc9);
2207             if (fr->vdw_modifier == eintmodPOTSHIFT)
2208             {
2209                 /* Contribution within the cut-off */
2210                 eners[0] += -4.0*M_PI/(3.0*rc3);
2211                 eners[1] +=  4.0*M_PI/(3.0*rc9);
2212             }
2213             /* Contribution beyond the cut-off */
2214             virs[0]  +=  8.0*M_PI/rc3;
2215             virs[1]  += -16.0*M_PI/(3.0*rc9);
2216         }
2217         else
2218         {
2219             gmx_fatal(FARGS,
2220                       "Dispersion correction is not implemented for vdw-type = %s",
2221                       evdw_names[fr->vdwtype]);
2222         }
2223
2224         /* When we deprecate the group kernels the code below can go too */
2225         if (fr->vdwtype == evdwPME && fr->cutoff_scheme == ecutsGROUP)
2226         {
2227             /* Calculate self-interaction coefficient (assuming that
2228              * the reciprocal-space contribution is constant in the
2229              * region that contributes to the self-interaction).
2230              */
2231             fr->enershiftsix = gmx::power6(fr->ewaldcoeff_lj) / 6.0;
2232
2233             eners[0] += -gmx::power3(std::sqrt(M_PI)*fr->ewaldcoeff_lj)/3.0;
2234             virs[0]  +=  gmx::power3(std::sqrt(M_PI)*fr->ewaldcoeff_lj);
2235         }
2236
2237         fr->enerdiffsix    = eners[0];
2238         fr->enerdifftwelve = eners[1];
2239         /* The 0.5 is due to the Gromacs definition of the virial */
2240         fr->virdiffsix     = 0.5*virs[0];
2241         fr->virdifftwelve  = 0.5*virs[1];
2242     }
2243 }
2244
2245 void calc_dispcorr(t_inputrec *ir, t_forcerec *fr,
2246                    matrix box, real lambda, tensor pres, tensor virial,
2247                    real *prescorr, real *enercorr, real *dvdlcorr)
2248 {
2249     gmx_bool bCorrAll, bCorrPres;
2250     real     dvdlambda, invvol, dens, ninter, avcsix, avctwelve, enerdiff, svir = 0, spres = 0;
2251     int      m;
2252
2253     *prescorr = 0;
2254     *enercorr = 0;
2255     *dvdlcorr = 0;
2256
2257     clear_mat(virial);
2258     clear_mat(pres);
2259
2260     if (ir->eDispCorr != edispcNO)
2261     {
2262         bCorrAll  = (ir->eDispCorr == edispcAllEner ||
2263                      ir->eDispCorr == edispcAllEnerPres);
2264         bCorrPres = (ir->eDispCorr == edispcEnerPres ||
2265                      ir->eDispCorr == edispcAllEnerPres);
2266
2267         invvol = 1/det(box);
2268         if (fr->n_tpi)
2269         {
2270             /* Only correct for the interactions with the inserted molecule */
2271             dens   = (fr->numAtomsForDispersionCorrection - fr->n_tpi)*invvol;
2272             ninter = fr->n_tpi;
2273         }
2274         else
2275         {
2276             dens   = fr->numAtomsForDispersionCorrection*invvol;
2277             ninter = 0.5*fr->numAtomsForDispersionCorrection;
2278         }
2279
2280         if (ir->efep == efepNO)
2281         {
2282             avcsix    = fr->avcsix[0];
2283             avctwelve = fr->avctwelve[0];
2284         }
2285         else
2286         {
2287             avcsix    = (1 - lambda)*fr->avcsix[0]    + lambda*fr->avcsix[1];
2288             avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
2289         }
2290
2291         enerdiff   = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
2292         *enercorr += avcsix*enerdiff;
2293         dvdlambda  = 0.0;
2294         if (ir->efep != efepNO)
2295         {
2296             dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
2297         }
2298         if (bCorrAll)
2299         {
2300             enerdiff   = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
2301             *enercorr += avctwelve*enerdiff;
2302             if (fr->efep != efepNO)
2303             {
2304                 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
2305             }
2306         }
2307
2308         if (bCorrPres)
2309         {
2310             svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
2311             if (ir->eDispCorr == edispcAllEnerPres)
2312             {
2313                 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
2314             }
2315             /* The factor 2 is because of the Gromacs virial definition */
2316             spres = -2.0*invvol*svir*PRESFAC;
2317
2318             for (m = 0; m < DIM; m++)
2319             {
2320                 virial[m][m] += svir;
2321                 pres[m][m]   += spres;
2322             }
2323             *prescorr += spres;
2324         }
2325
2326         /* Can't currently control when it prints, for now, just print when degugging */
2327         if (debug)
2328         {
2329             if (bCorrAll)
2330             {
2331                 fprintf(debug, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2332                         avcsix, avctwelve);
2333             }
2334             if (bCorrPres)
2335             {
2336                 fprintf(debug,
2337                         "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2338                         *enercorr, spres, svir);
2339             }
2340             else
2341             {
2342                 fprintf(debug, "Long Range LJ corr.: Epot %10g\n", *enercorr);
2343             }
2344         }
2345
2346         if (fr->efep != efepNO)
2347         {
2348             *dvdlcorr += dvdlambda;
2349         }
2350     }
2351 }
2352
2353 void do_pbc_first(FILE *fplog, matrix box, t_forcerec *fr,
2354                   t_graph *graph, rvec x[])
2355 {
2356     if (fplog)
2357     {
2358         fprintf(fplog, "Removing pbc first time\n");
2359     }
2360     calc_shifts(box, fr->shift_vec);
2361     if (graph)
2362     {
2363         mk_mshift(fplog, graph, fr->ePBC, box, x);
2364         if (gmx_debug_at)
2365         {
2366             p_graph(debug, "do_pbc_first 1", graph);
2367         }
2368         shift_self(graph, box, x);
2369         /* By doing an extra mk_mshift the molecules that are broken
2370          * because they were e.g. imported from another software
2371          * will be made whole again. Such are the healing powers
2372          * of GROMACS.
2373          */
2374         mk_mshift(fplog, graph, fr->ePBC, box, x);
2375         if (gmx_debug_at)
2376         {
2377             p_graph(debug, "do_pbc_first 2", graph);
2378         }
2379     }
2380     if (fplog)
2381     {
2382         fprintf(fplog, "Done rmpbc\n");
2383     }
2384 }
2385
2386 static void low_do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2387                             const gmx_mtop_t *mtop, rvec x[],
2388                             gmx_bool bFirst)
2389 {
2390     t_graph        *graph;
2391     int             mb, as, mol;
2392     gmx_molblock_t *molb;
2393
2394     if (bFirst && fplog)
2395     {
2396         fprintf(fplog, "Removing pbc first time\n");
2397     }
2398
2399     snew(graph, 1);
2400     as = 0;
2401     for (mb = 0; mb < mtop->nmolblock; mb++)
2402     {
2403         molb = &mtop->molblock[mb];
2404         if (molb->natoms_mol == 1 ||
2405             (!bFirst && mtop->moltype[molb->type].cgs.nr == 1))
2406         {
2407             /* Just one atom or charge group in the molecule, no PBC required */
2408             as += molb->nmol*molb->natoms_mol;
2409         }
2410         else
2411         {
2412             /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2413             mk_graph_ilist(nullptr, mtop->moltype[molb->type].ilist,
2414                            0, molb->natoms_mol, FALSE, FALSE, graph);
2415
2416             for (mol = 0; mol < molb->nmol; mol++)
2417             {
2418                 mk_mshift(fplog, graph, ePBC, box, x+as);
2419
2420                 shift_self(graph, box, x+as);
2421                 /* The molecule is whole now.
2422                  * We don't need the second mk_mshift call as in do_pbc_first,
2423                  * since we no longer need this graph.
2424                  */
2425
2426                 as += molb->natoms_mol;
2427             }
2428             done_graph(graph);
2429         }
2430     }
2431     sfree(graph);
2432 }
2433
2434 void do_pbc_first_mtop(FILE *fplog, int ePBC, matrix box,
2435                        const gmx_mtop_t *mtop, rvec x[])
2436 {
2437     low_do_pbc_mtop(fplog, ePBC, box, mtop, x, TRUE);
2438 }
2439
2440 void do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2441                  gmx_mtop_t *mtop, rvec x[])
2442 {
2443     low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
2444 }
2445
2446 void put_atoms_in_box_omp(int ePBC, const matrix box, int natoms, rvec x[])
2447 {
2448     int t, nth;
2449     nth = gmx_omp_nthreads_get(emntDefault);
2450
2451 #pragma omp parallel for num_threads(nth) schedule(static)
2452     for (t = 0; t < nth; t++)
2453     {
2454         try
2455         {
2456             int offset, len;
2457
2458             offset = (natoms*t    )/nth;
2459             len    = (natoms*(t + 1))/nth - offset;
2460             put_atoms_in_box(ePBC, box, len, x + offset);
2461         }
2462         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2463     }
2464 }
2465
2466 // TODO This can be cleaned up a lot, and move back to runner.cpp
2467 void finish_run(FILE *fplog, const gmx::MDLogger &mdlog, t_commrec *cr,
2468                 t_inputrec *inputrec,
2469                 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
2470                 gmx_walltime_accounting_t walltime_accounting,
2471                 nonbonded_verlet_t *nbv,
2472                 gmx_bool bWriteStat)
2473 {
2474     t_nrnb *nrnb_tot = nullptr;
2475     double  delta_t  = 0;
2476     double  nbfs     = 0, mflop = 0;
2477     double  elapsed_time,
2478             elapsed_time_over_all_ranks,
2479             elapsed_time_over_all_threads,
2480             elapsed_time_over_all_threads_over_all_ranks;
2481
2482     if (cr->nnodes > 1)
2483     {
2484         snew(nrnb_tot, 1);
2485 #if GMX_MPI
2486         MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
2487                       cr->mpi_comm_mysim);
2488 #endif
2489     }
2490     else
2491     {
2492         nrnb_tot = nrnb;
2493     }
2494
2495     elapsed_time                                 = walltime_accounting_get_elapsed_time(walltime_accounting);
2496     elapsed_time_over_all_ranks                  = elapsed_time;
2497     elapsed_time_over_all_threads                = walltime_accounting_get_elapsed_time_over_all_threads(walltime_accounting);
2498     elapsed_time_over_all_threads_over_all_ranks = elapsed_time_over_all_threads;
2499 #if GMX_MPI
2500     if (cr->nnodes > 1)
2501     {
2502         /* reduce elapsed_time over all MPI ranks in the current simulation */
2503         MPI_Allreduce(&elapsed_time,
2504                       &elapsed_time_over_all_ranks,
2505                       1, MPI_DOUBLE, MPI_SUM,
2506                       cr->mpi_comm_mysim);
2507         elapsed_time_over_all_ranks /= cr->nnodes;
2508         /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
2509          * current simulation. */
2510         MPI_Allreduce(&elapsed_time_over_all_threads,
2511                       &elapsed_time_over_all_threads_over_all_ranks,
2512                       1, MPI_DOUBLE, MPI_SUM,
2513                       cr->mpi_comm_mysim);
2514     }
2515 #endif
2516
2517     if (SIMMASTER(cr))
2518     {
2519         print_flop(fplog, nrnb_tot, &nbfs, &mflop);
2520     }
2521     if (cr->nnodes > 1)
2522     {
2523         sfree(nrnb_tot);
2524     }
2525
2526     if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
2527     {
2528         print_dd_statistics(cr, inputrec, fplog);
2529     }
2530
2531     /* TODO Move the responsibility for any scaling by thread counts
2532      * to the code that handled the thread region, so that there's a
2533      * mechanism to keep cycle counting working during the transition
2534      * to task parallelism. */
2535     int nthreads_pp  = gmx_omp_nthreads_get(emntNonbonded);
2536     int nthreads_pme = gmx_omp_nthreads_get(emntPME);
2537     wallcycle_scale_by_num_threads(wcycle, cr->duty == DUTY_PME, nthreads_pp, nthreads_pme);
2538     auto cycle_sum(wallcycle_sum(cr, wcycle));
2539
2540     if (SIMMASTER(cr))
2541     {
2542         struct gmx_wallclock_gpu_t* gputimes = use_GPU(nbv) ? nbnxn_gpu_get_timings(nbv->gpu_nbv) : nullptr;
2543
2544         wallcycle_print(fplog, mdlog, cr->nnodes, cr->npmenodes, nthreads_pp, nthreads_pme,
2545                         elapsed_time_over_all_ranks,
2546                         wcycle, cycle_sum, gputimes);
2547
2548         if (EI_DYNAMICS(inputrec->eI))
2549         {
2550             delta_t = inputrec->delta_t;
2551         }
2552
2553         if (fplog)
2554         {
2555             print_perf(fplog, elapsed_time_over_all_threads_over_all_ranks,
2556                        elapsed_time_over_all_ranks,
2557                        walltime_accounting_get_nsteps_done(walltime_accounting),
2558                        delta_t, nbfs, mflop);
2559         }
2560         if (bWriteStat)
2561         {
2562             print_perf(stderr, elapsed_time_over_all_threads_over_all_ranks,
2563                        elapsed_time_over_all_ranks,
2564                        walltime_accounting_get_nsteps_done(walltime_accounting),
2565                        delta_t, nbfs, mflop);
2566         }
2567     }
2568 }
2569
2570 extern void initialize_lambdas(FILE *fplog, t_inputrec *ir, int *fep_state, std::vector<real> *lambda, double *lam0)
2571 {
2572     /* this function works, but could probably use a logic rewrite to keep all the different
2573        types of efep straight. */
2574
2575     if ((ir->efep == efepNO) && (ir->bSimTemp == FALSE))
2576     {
2577         return;
2578     }
2579
2580     t_lambda *fep = ir->fepvals;
2581     *fep_state    = fep->init_fep_state; /* this might overwrite the checkpoint
2582                                             if checkpoint is set -- a kludge is in for now
2583                                             to prevent this.*/
2584
2585     lambda->resize(efptNR);
2586
2587     for (int i = 0; i < efptNR; i++)
2588     {
2589         /* overwrite lambda state with init_lambda for now for backwards compatibility */
2590         if (fep->init_lambda >= 0) /* if it's -1, it was never initializd */
2591         {
2592             (*lambda)[i] = fep->init_lambda;
2593             if (lam0)
2594             {
2595                 lam0[i] = (*lambda)[i];
2596             }
2597         }
2598         else
2599         {
2600             (*lambda)[i] = fep->all_lambda[i][*fep_state];
2601             if (lam0)
2602             {
2603                 lam0[i] = (*lambda)[i];
2604             }
2605         }
2606     }
2607     if (ir->bSimTemp)
2608     {
2609         /* need to rescale control temperatures to match current state */
2610         for (int i = 0; i < ir->opts.ngtc; i++)
2611         {
2612             if (ir->opts.ref_t[i] > 0)
2613             {
2614                 ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
2615             }
2616         }
2617     }
2618
2619     /* Send to the log the information on the current lambdas */
2620     if (fplog != nullptr)
2621     {
2622         fprintf(fplog, "Initial vector of lambda components:[ ");
2623         for (int i = 0; i < efptNR; i++)
2624         {
2625             fprintf(fplog, "%10.4f ", (*lambda)[i]);
2626         }
2627         fprintf(fplog, "]\n");
2628     }
2629     return;
2630 }
2631
2632
2633 void init_md(FILE *fplog,
2634              t_commrec *cr, t_inputrec *ir, const gmx_output_env_t *oenv,
2635              double *t, double *t0,
2636              std::vector<real> *lambda, int *fep_state, double *lam0,
2637              t_nrnb *nrnb, gmx_mtop_t *mtop,
2638              gmx_update_t **upd,
2639              int nfile, const t_filenm fnm[],
2640              gmx_mdoutf_t *outf, t_mdebin **mdebin,
2641              tensor force_vir, tensor shake_vir, rvec mu_tot,
2642              gmx_bool *bSimAnn, t_vcm **vcm, unsigned long Flags,
2643              gmx_wallcycle_t wcycle)
2644 {
2645     int  i;
2646
2647     /* Initial values */
2648     *t = *t0       = ir->init_t;
2649
2650     *bSimAnn = FALSE;
2651     for (i = 0; i < ir->opts.ngtc; i++)
2652     {
2653         /* set bSimAnn if any group is being annealed */
2654         if (ir->opts.annealing[i] != eannNO)
2655         {
2656             *bSimAnn = TRUE;
2657         }
2658     }
2659
2660     /* Initialize lambda variables */
2661     initialize_lambdas(fplog, ir, fep_state, lambda, lam0);
2662
2663     // TODO upd is never NULL in practice, but the analysers don't know that
2664     if (upd)
2665     {
2666         *upd = init_update(ir);
2667     }
2668     if (*bSimAnn)
2669     {
2670         update_annealing_target_temp(ir, ir->init_t, upd ? *upd : nullptr);
2671     }
2672
2673     if (vcm != nullptr)
2674     {
2675         *vcm = init_vcm(fplog, &mtop->groups, ir);
2676     }
2677
2678     if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
2679     {
2680         if (ir->etc == etcBERENDSEN)
2681         {
2682             please_cite(fplog, "Berendsen84a");
2683         }
2684         if (ir->etc == etcVRESCALE)
2685         {
2686             please_cite(fplog, "Bussi2007a");
2687         }
2688         if (ir->eI == eiSD1)
2689         {
2690             please_cite(fplog, "Goga2012");
2691         }
2692     }
2693     init_nrnb(nrnb);
2694
2695     if (nfile != -1)
2696     {
2697         *outf = init_mdoutf(fplog, nfile, fnm, Flags, cr, ir, mtop, oenv, wcycle);
2698
2699         *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? nullptr : mdoutf_get_fp_ene(*outf),
2700                               mtop, ir, mdoutf_get_fp_dhdl(*outf));
2701     }
2702
2703     /* Initiate variables */
2704     clear_mat(force_vir);
2705     clear_mat(shake_vir);
2706     clear_rvec(mu_tot);
2707 }