Merge branch release-2016
[alexxy/gromacs.git] / src / gromacs / mdlib / sim_util.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #include "gmxpre.h"
38
39 #include "sim_util.h"
40
41 #include "config.h"
42
43 #include <assert.h>
44 #include <math.h>
45 #include <stdio.h>
46 #include <string.h>
47
48 #include <array>
49
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/domdec/domdec_struct.h"
52 #include "gromacs/essentialdynamics/edsam.h"
53 #include "gromacs/ewald/pme.h"
54 #include "gromacs/gmxlib/chargegroup.h"
55 #include "gromacs/gmxlib/network.h"
56 #include "gromacs/gmxlib/nrnb.h"
57 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
58 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
59 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
60 #include "gromacs/imd/imd.h"
61 #include "gromacs/listed-forces/bonded.h"
62 #include "gromacs/listed-forces/disre.h"
63 #include "gromacs/listed-forces/orires.h"
64 #include "gromacs/math/functions.h"
65 #include "gromacs/math/units.h"
66 #include "gromacs/math/vec.h"
67 #include "gromacs/math/vecdump.h"
68 #include "gromacs/mdlib/calcmu.h"
69 #include "gromacs/mdlib/constr.h"
70 #include "gromacs/mdlib/force.h"
71 #include "gromacs/mdlib/forcerec.h"
72 #include "gromacs/mdlib/genborn.h"
73 #include "gromacs/mdlib/gmx_omp_nthreads.h"
74 #include "gromacs/mdlib/mdrun.h"
75 #include "gromacs/mdlib/nb_verlet.h"
76 #include "gromacs/mdlib/nbnxn_atomdata.h"
77 #include "gromacs/mdlib/nbnxn_gpu_data_mgmt.h"
78 #include "gromacs/mdlib/nbnxn_grid.h"
79 #include "gromacs/mdlib/nbnxn_search.h"
80 #include "gromacs/mdlib/qmmm.h"
81 #include "gromacs/mdlib/update.h"
82 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
83 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref.h"
84 #include "gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
85 #include "gromacs/mdlib/nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
86 #include "gromacs/mdtypes/commrec.h"
87 #include "gromacs/mdtypes/inputrec.h"
88 #include "gromacs/mdtypes/md_enums.h"
89 #include "gromacs/pbcutil/ishift.h"
90 #include "gromacs/pbcutil/mshift.h"
91 #include "gromacs/pbcutil/pbc.h"
92 #include "gromacs/pulling/pull.h"
93 #include "gromacs/pulling/pull_rotation.h"
94 #include "gromacs/timing/cyclecounter.h"
95 #include "gromacs/timing/gpu_timing.h"
96 #include "gromacs/timing/wallcycle.h"
97 #include "gromacs/timing/wallcyclereporting.h"
98 #include "gromacs/timing/walltime_accounting.h"
99 #include "gromacs/utility/basedefinitions.h"
100 #include "gromacs/utility/cstringutil.h"
101 #include "gromacs/utility/exceptions.h"
102 #include "gromacs/utility/fatalerror.h"
103 #include "gromacs/utility/gmxassert.h"
104 #include "gromacs/utility/gmxmpi.h"
105 #include "gromacs/utility/pleasecite.h"
106 #include "gromacs/utility/smalloc.h"
107 #include "gromacs/utility/sysinfo.h"
108
109 #include "nbnxn_gpu.h"
110
111 void print_time(FILE                     *out,
112                 gmx_walltime_accounting_t walltime_accounting,
113                 gmx_int64_t               step,
114                 t_inputrec               *ir,
115                 t_commrec gmx_unused     *cr)
116 {
117     time_t finish;
118     char   timebuf[STRLEN];
119     double dt, elapsed_seconds, time_per_step;
120     char   buf[48];
121
122 #if !GMX_THREAD_MPI
123     if (!PAR(cr))
124 #endif
125     {
126         fprintf(out, "\r");
127     }
128     fprintf(out, "step %s", gmx_step_str(step, buf));
129     fflush(out);
130
131     if ((step >= ir->nstlist))
132     {
133         double seconds_since_epoch = gmx_gettime();
134         elapsed_seconds = seconds_since_epoch - walltime_accounting_get_start_time_stamp(walltime_accounting);
135         time_per_step   = elapsed_seconds/(step - ir->init_step + 1);
136         dt              = (ir->nsteps + ir->init_step - step) * time_per_step;
137
138         if (ir->nsteps >= 0)
139         {
140             if (dt >= 300)
141             {
142                 finish = (time_t) (seconds_since_epoch + dt);
143                 gmx_ctime_r(&finish, timebuf, STRLEN);
144                 sprintf(buf, "%s", timebuf);
145                 buf[strlen(buf)-1] = '\0';
146                 fprintf(out, ", will finish %s", buf);
147             }
148             else
149             {
150                 fprintf(out, ", remaining wall clock time: %5d s          ", (int)dt);
151             }
152         }
153         else
154         {
155             fprintf(out, " performance: %.1f ns/day    ",
156                     ir->delta_t/1000*24*60*60/time_per_step);
157         }
158     }
159 #if !GMX_THREAD_MPI
160     if (PAR(cr))
161     {
162         fprintf(out, "\n");
163     }
164 #endif
165
166     fflush(out);
167 }
168
169 void print_date_and_time(FILE *fplog, int nodeid, const char *title,
170                          double the_time)
171 {
172     char   time_string[STRLEN];
173
174     if (!fplog)
175     {
176         return;
177     }
178
179     {
180         int    i;
181         char   timebuf[STRLEN];
182         time_t temp_time = (time_t) the_time;
183
184         gmx_ctime_r(&temp_time, timebuf, STRLEN);
185         for (i = 0; timebuf[i] >= ' '; i++)
186         {
187             time_string[i] = timebuf[i];
188         }
189         time_string[i] = '\0';
190     }
191
192     fprintf(fplog, "%s on rank %d %s\n", title, nodeid, time_string);
193 }
194
195 void print_start(FILE *fplog, t_commrec *cr,
196                  gmx_walltime_accounting_t walltime_accounting,
197                  const char *name)
198 {
199     char buf[STRLEN];
200
201     sprintf(buf, "Started %s", name);
202     print_date_and_time(fplog, cr->nodeid, buf,
203                         walltime_accounting_get_start_time_stamp(walltime_accounting));
204 }
205
206 static void sum_forces(rvec f[], const PaddedRVecVector *forceToAdd)
207 {
208     /* TODO: remove this - 1 when padding is properly implemented */
209     int         end  = forceToAdd->size() - 1;
210     const rvec *fAdd = as_rvec_array(forceToAdd->data());
211
212     // cppcheck-suppress unreadVariable
213     int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
214 #pragma omp parallel for num_threads(nt) schedule(static)
215     for (int i = 0; i < end; i++)
216     {
217         rvec_inc(f[i], fAdd[i]);
218     }
219 }
220
221 static void calc_virial(int start, int homenr, rvec x[], rvec f[],
222                         tensor vir_part, t_graph *graph, matrix box,
223                         t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
224 {
225     int    i;
226
227     /* The short-range virial from surrounding boxes */
228     clear_mat(vir_part);
229     calc_vir(SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
230     inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
231
232     /* Calculate partial virial, for local atoms only, based on short range.
233      * Total virial is computed in global_stat, called from do_md
234      */
235     f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
236     inc_nrnb(nrnb, eNR_VIRIAL, homenr);
237
238     /* Add position restraint contribution */
239     for (i = 0; i < DIM; i++)
240     {
241         vir_part[i][i] += fr->vir_diag_posres[i];
242     }
243
244     /* Add wall contribution */
245     for (i = 0; i < DIM; i++)
246     {
247         vir_part[i][ZZ] += fr->vir_wall_z[i];
248     }
249
250     if (debug)
251     {
252         pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
253     }
254 }
255
256 static void pull_potential_wrapper(t_commrec *cr,
257                                    t_inputrec *ir,
258                                    matrix box, rvec x[],
259                                    rvec f[],
260                                    tensor vir_force,
261                                    t_mdatoms *mdatoms,
262                                    gmx_enerdata_t *enerd,
263                                    real *lambda,
264                                    double t,
265                                    gmx_wallcycle_t wcycle)
266 {
267     t_pbc  pbc;
268     real   dvdl;
269
270     /* Calculate the center of mass forces, this requires communication,
271      * which is why pull_potential is called close to other communication.
272      * The virial contribution is calculated directly,
273      * which is why we call pull_potential after calc_virial.
274      */
275     wallcycle_start(wcycle, ewcPULLPOT);
276     set_pbc(&pbc, ir->ePBC, box);
277     dvdl                     = 0;
278     enerd->term[F_COM_PULL] +=
279         pull_potential(ir->pull_work, mdatoms, &pbc,
280                        cr, t, lambda[efptRESTRAINT], x, f, vir_force, &dvdl);
281     enerd->dvdl_lin[efptRESTRAINT] += dvdl;
282     wallcycle_stop(wcycle, ewcPULLPOT);
283 }
284
285 static void pme_receive_force_ener(t_commrec      *cr,
286                                    gmx_wallcycle_t wcycle,
287                                    gmx_enerdata_t *enerd,
288                                    t_forcerec     *fr)
289 {
290     real   e_q, e_lj, dvdl_q, dvdl_lj;
291     float  cycles_ppdpme, cycles_seppme;
292
293     cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
294     dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
295
296     /* In case of node-splitting, the PP nodes receive the long-range
297      * forces, virial and energy from the PME nodes here.
298      */
299     wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
300     dvdl_q  = 0;
301     dvdl_lj = 0;
302     gmx_pme_receive_f(cr, as_rvec_array(fr->f_novirsum->data()), fr->vir_el_recip, &e_q,
303                       fr->vir_lj_recip, &e_lj, &dvdl_q, &dvdl_lj,
304                       &cycles_seppme);
305     enerd->term[F_COUL_RECIP] += e_q;
306     enerd->term[F_LJ_RECIP]   += e_lj;
307     enerd->dvdl_lin[efptCOUL] += dvdl_q;
308     enerd->dvdl_lin[efptVDW]  += dvdl_lj;
309
310     if (wcycle)
311     {
312         dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
313     }
314     wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
315 }
316
317 static void print_large_forces(FILE *fp, t_mdatoms *md, t_commrec *cr,
318                                gmx_int64_t step, real pforce, rvec *x, rvec *f)
319 {
320     int  i;
321     real pf2, fn2;
322     char buf[STEPSTRSIZE];
323
324     pf2 = gmx::square(pforce);
325     for (i = 0; i < md->homenr; i++)
326     {
327         fn2 = norm2(f[i]);
328         /* We also catch NAN, if the compiler does not optimize this away. */
329         if (fn2 >= pf2 || fn2 != fn2)
330         {
331             fprintf(fp, "step %s  atom %6d  x %8.3f %8.3f %8.3f  force %12.5e\n",
332                     gmx_step_str(step, buf),
333                     ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], std::sqrt(fn2));
334         }
335     }
336 }
337
338 static void post_process_forces(t_commrec *cr,
339                                 gmx_int64_t step,
340                                 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
341                                 gmx_localtop_t *top,
342                                 matrix box, rvec x[],
343                                 rvec f[],
344                                 tensor vir_force,
345                                 t_mdatoms *mdatoms,
346                                 t_graph *graph,
347                                 t_forcerec *fr, gmx_vsite_t *vsite,
348                                 int flags)
349 {
350     if (fr->bF_NoVirSum)
351     {
352         if (vsite)
353         {
354             /* Spread the mesh force on virtual sites to the other particles...
355              * This is parallellized. MPI communication is performed
356              * if the constructing atoms aren't local.
357              */
358             wallcycle_start(wcycle, ewcVSITESPREAD);
359             spread_vsite_f(vsite, x, as_rvec_array(fr->f_novirsum->data()), NULL,
360                            (flags & GMX_FORCE_VIRIAL), fr->vir_el_recip,
361                            nrnb,
362                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
363             wallcycle_stop(wcycle, ewcVSITESPREAD);
364         }
365         if (flags & GMX_FORCE_VIRIAL)
366         {
367             /* Now add the forces, this is local */
368             sum_forces(f, fr->f_novirsum);
369
370             if (EEL_FULL(fr->eeltype))
371             {
372                 /* Add the mesh contribution to the virial */
373                 m_add(vir_force, fr->vir_el_recip, vir_force);
374             }
375             if (EVDW_PME(fr->vdwtype))
376             {
377                 /* Add the mesh contribution to the virial */
378                 m_add(vir_force, fr->vir_lj_recip, vir_force);
379             }
380             if (debug)
381             {
382                 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
383             }
384         }
385     }
386
387     if (fr->print_force >= 0)
388     {
389         print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
390     }
391 }
392
393 static void do_nb_verlet(t_forcerec *fr,
394                          interaction_const_t *ic,
395                          gmx_enerdata_t *enerd,
396                          int flags, int ilocality,
397                          int clearF,
398                          t_nrnb *nrnb,
399                          gmx_wallcycle_t wcycle)
400 {
401     int                        enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
402     nonbonded_verlet_group_t  *nbvg;
403     gmx_bool                   bUsingGpuKernels;
404
405     if (!(flags & GMX_FORCE_NONBONDED))
406     {
407         /* skip non-bonded calculation */
408         return;
409     }
410
411     nbvg = &fr->nbv->grp[ilocality];
412
413     /* GPU kernel launch overhead is already timed separately */
414     if (fr->cutoff_scheme != ecutsVERLET)
415     {
416         gmx_incons("Invalid cut-off scheme passed!");
417     }
418
419     bUsingGpuKernels = (nbvg->kernel_type == nbnxnk8x8x8_GPU);
420
421     if (!bUsingGpuKernels)
422     {
423         wallcycle_sub_start(wcycle, ewcsNONBONDED);
424     }
425     switch (nbvg->kernel_type)
426     {
427         case nbnxnk4x4_PlainC:
428             nbnxn_kernel_ref(&nbvg->nbl_lists,
429                              nbvg->nbat, ic,
430                              fr->shift_vec,
431                              flags,
432                              clearF,
433                              fr->fshift[0],
434                              enerd->grpp.ener[egCOULSR],
435                              fr->bBHAM ?
436                              enerd->grpp.ener[egBHAMSR] :
437                              enerd->grpp.ener[egLJSR]);
438             break;
439
440         case nbnxnk4xN_SIMD_4xN:
441             nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
442                                   nbvg->nbat, ic,
443                                   nbvg->ewald_excl,
444                                   fr->shift_vec,
445                                   flags,
446                                   clearF,
447                                   fr->fshift[0],
448                                   enerd->grpp.ener[egCOULSR],
449                                   fr->bBHAM ?
450                                   enerd->grpp.ener[egBHAMSR] :
451                                   enerd->grpp.ener[egLJSR]);
452             break;
453         case nbnxnk4xN_SIMD_2xNN:
454             nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
455                                    nbvg->nbat, ic,
456                                    nbvg->ewald_excl,
457                                    fr->shift_vec,
458                                    flags,
459                                    clearF,
460                                    fr->fshift[0],
461                                    enerd->grpp.ener[egCOULSR],
462                                    fr->bBHAM ?
463                                    enerd->grpp.ener[egBHAMSR] :
464                                    enerd->grpp.ener[egLJSR]);
465             break;
466
467         case nbnxnk8x8x8_GPU:
468             nbnxn_gpu_launch_kernel(fr->nbv->gpu_nbv, nbvg->nbat, flags, ilocality);
469             break;
470
471         case nbnxnk8x8x8_PlainC:
472             nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
473                                  nbvg->nbat, ic,
474                                  fr->shift_vec,
475                                  flags,
476                                  clearF,
477                                  nbvg->nbat->out[0].f,
478                                  fr->fshift[0],
479                                  enerd->grpp.ener[egCOULSR],
480                                  fr->bBHAM ?
481                                  enerd->grpp.ener[egBHAMSR] :
482                                  enerd->grpp.ener[egLJSR]);
483             break;
484
485         default:
486             gmx_incons("Invalid nonbonded kernel type passed!");
487
488     }
489     if (!bUsingGpuKernels)
490     {
491         wallcycle_sub_stop(wcycle, ewcsNONBONDED);
492     }
493
494     if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
495     {
496         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
497     }
498     else if ((!bUsingGpuKernels && nbvg->ewald_excl == ewaldexclAnalytical) ||
499              (bUsingGpuKernels && nbnxn_gpu_is_kernel_ewald_analytical(fr->nbv->gpu_nbv)))
500     {
501         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
502     }
503     else
504     {
505         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
506     }
507     enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
508     if (flags & GMX_FORCE_ENERGY)
509     {
510         /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
511         enr_nbnxn_kernel_ljc += 1;
512         enr_nbnxn_kernel_lj  += 1;
513     }
514
515     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
516              nbvg->nbl_lists.natpair_ljq);
517     inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
518              nbvg->nbl_lists.natpair_lj);
519     /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
520     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
521              nbvg->nbl_lists.natpair_q);
522
523     if (ic->vdw_modifier == eintmodFORCESWITCH)
524     {
525         /* We add up the switch cost separately */
526         inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_FSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
527                  nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
528     }
529     if (ic->vdw_modifier == eintmodPOTSWITCH)
530     {
531         /* We add up the switch cost separately */
532         inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_PSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
533                  nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
534     }
535     if (ic->vdwtype == evdwPME)
536     {
537         /* We add up the LJ Ewald cost separately */
538         inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_EWALD+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
539                  nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
540     }
541 }
542
543 static void do_nb_verlet_fep(nbnxn_pairlist_set_t *nbl_lists,
544                              t_forcerec           *fr,
545                              rvec                  x[],
546                              rvec                  f[],
547                              t_mdatoms            *mdatoms,
548                              t_lambda             *fepvals,
549                              real                 *lambda,
550                              gmx_enerdata_t       *enerd,
551                              int                   flags,
552                              t_nrnb               *nrnb,
553                              gmx_wallcycle_t       wcycle)
554 {
555     int              donb_flags;
556     nb_kernel_data_t kernel_data;
557     real             lam_i[efptNR];
558     real             dvdl_nb[efptNR];
559     int              th;
560     int              i, j;
561
562     donb_flags = 0;
563     /* Add short-range interactions */
564     donb_flags |= GMX_NONBONDED_DO_SR;
565
566     /* Currently all group scheme kernels always calculate (shift-)forces */
567     if (flags & GMX_FORCE_FORCES)
568     {
569         donb_flags |= GMX_NONBONDED_DO_FORCE;
570     }
571     if (flags & GMX_FORCE_VIRIAL)
572     {
573         donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
574     }
575     if (flags & GMX_FORCE_ENERGY)
576     {
577         donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
578     }
579
580     kernel_data.flags  = donb_flags;
581     kernel_data.lambda = lambda;
582     kernel_data.dvdl   = dvdl_nb;
583
584     kernel_data.energygrp_elec = enerd->grpp.ener[egCOULSR];
585     kernel_data.energygrp_vdw  = enerd->grpp.ener[egLJSR];
586
587     /* reset free energy components */
588     for (i = 0; i < efptNR; i++)
589     {
590         dvdl_nb[i]  = 0;
591     }
592
593     assert(gmx_omp_nthreads_get(emntNonbonded) == nbl_lists->nnbl);
594
595     wallcycle_sub_start(wcycle, ewcsNONBONDED);
596 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
597     for (th = 0; th < nbl_lists->nnbl; th++)
598     {
599         try
600         {
601             gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
602                                       x, f, fr, mdatoms, &kernel_data, nrnb);
603         }
604         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
605     }
606
607     if (fepvals->sc_alpha != 0)
608     {
609         enerd->dvdl_nonlin[efptVDW]  += dvdl_nb[efptVDW];
610         enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
611     }
612     else
613     {
614         enerd->dvdl_lin[efptVDW]  += dvdl_nb[efptVDW];
615         enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
616     }
617
618     /* If we do foreign lambda and we have soft-core interactions
619      * we have to recalculate the (non-linear) energies contributions.
620      */
621     if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
622     {
623         kernel_data.flags          = (donb_flags & ~(GMX_NONBONDED_DO_FORCE | GMX_NONBONDED_DO_SHIFTFORCE)) | GMX_NONBONDED_DO_FOREIGNLAMBDA;
624         kernel_data.lambda         = lam_i;
625         kernel_data.energygrp_elec = enerd->foreign_grpp.ener[egCOULSR];
626         kernel_data.energygrp_vdw  = enerd->foreign_grpp.ener[egLJSR];
627         /* Note that we add to kernel_data.dvdl, but ignore the result */
628
629         for (i = 0; i < enerd->n_lambda; i++)
630         {
631             for (j = 0; j < efptNR; j++)
632             {
633                 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
634             }
635             reset_foreign_enerdata(enerd);
636 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
637             for (th = 0; th < nbl_lists->nnbl; th++)
638             {
639                 try
640                 {
641                     gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
642                                               x, f, fr, mdatoms, &kernel_data, nrnb);
643                 }
644                 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
645             }
646
647             sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
648             enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
649         }
650     }
651
652     wallcycle_sub_stop(wcycle, ewcsNONBONDED);
653 }
654
655 gmx_bool use_GPU(const nonbonded_verlet_t *nbv)
656 {
657     return nbv != NULL && nbv->bUseGPU;
658 }
659
660 static gmx_inline void clear_rvecs_omp(int n, rvec v[])
661 {
662     int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, n);
663
664     /* Note that we would like to avoid this conditional by putting it
665      * into the omp pragma instead, but then we still take the full
666      * omp parallel for overhead (at least with gcc5).
667      */
668     if (nth == 1)
669     {
670         for (int i = 0; i < n; i++)
671         {
672             clear_rvec(v[i]);
673         }
674     }
675     else
676     {
677 #pragma omp parallel for num_threads(nth) schedule(static)
678         for (int i = 0; i < n; i++)
679         {
680             clear_rvec(v[i]);
681         }
682     }
683 }
684
685 /*! \brief  This routine checks if the potential energy is finite.
686  *
687  * Note that passing this check does not guarantee finite forces,
688  * since those use slightly different arithmetics. But in most cases
689  * there is just a narrow coordinate range where forces are not finite
690  * and energies are finite.
691  *
692  * \param[in] enerd  The energy data; the non-bonded group energies need to be added in here before calling this routine
693  */
694 static void checkPotentialEnergyValidity(const gmx_enerdata_t *enerd)
695 {
696     if (!std::isfinite(enerd->term[F_EPOT]))
697     {
698         gmx_fatal(FARGS, "The total potential energy is %g, which is not finite. The LJ and electrostatic contributions to the energy are %g and %g, respectively. A non-finite potential energy can be caused by overlapping interactions in bonded interactions or very large or NaN coordinate values. Usually this is caused by a badly or non-equilibrated initial configuration or incorrect interactions or parameters in the topology.",
699                   enerd->term[F_EPOT],
700                   enerd->term[F_LJ],
701                   enerd->term[F_COUL_SR]);
702     }
703 }
704
705 void do_force_cutsVERLET(FILE *fplog, t_commrec *cr,
706                          t_inputrec *inputrec,
707                          gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
708                          gmx_localtop_t *top,
709                          gmx_groups_t gmx_unused *groups,
710                          matrix box, rvec x[], history_t *hist,
711                          PaddedRVecVector *force,
712                          tensor vir_force,
713                          t_mdatoms *mdatoms,
714                          gmx_enerdata_t *enerd, t_fcdata *fcd,
715                          real *lambda, t_graph *graph,
716                          t_forcerec *fr, interaction_const_t *ic,
717                          gmx_vsite_t *vsite, rvec mu_tot,
718                          double t, gmx_edsam_t ed,
719                          gmx_bool bBornRadii,
720                          int flags)
721 {
722     int                 cg1, i, j;
723     double              mu[2*DIM];
724     gmx_bool            bStateChanged, bNS, bFillGrid, bCalcCGCM;
725     gmx_bool            bDoForces, bUseGPU, bUseOrEmulGPU;
726     gmx_bool            bDiffKernels = FALSE;
727     rvec                vzero, box_diag;
728     float               cycles_pme, cycles_force, cycles_wait_gpu;
729     /* TODO To avoid loss of precision, float can't be used for a
730      * cycle count. Build an object that can do this right and perhaps
731      * also be used by gmx_wallcycle_t */
732     gmx_cycles_t        cycleCountBeforeLocalWorkCompletes = 0;
733     nonbonded_verlet_t *nbv;
734
735     cycles_force    = 0;
736     cycles_wait_gpu = 0;
737     nbv             = fr->nbv;
738
739     const int start  = 0;
740     const int homenr = mdatoms->homenr;
741
742     clear_mat(vir_force);
743
744     if (DOMAINDECOMP(cr))
745     {
746         cg1 = cr->dd->ncg_tot;
747     }
748     else
749     {
750         cg1 = top->cgs.nr;
751     }
752     if (fr->n_tpi > 0)
753     {
754         cg1--;
755     }
756
757     bStateChanged = (flags & GMX_FORCE_STATECHANGED);
758     bNS           = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
759     bFillGrid     = (bNS && bStateChanged);
760     bCalcCGCM     = (bFillGrid && !DOMAINDECOMP(cr));
761     bDoForces     = (flags & GMX_FORCE_FORCES);
762     bUseGPU       = fr->nbv->bUseGPU;
763     bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
764
765     if (bStateChanged)
766     {
767         update_forcerec(fr, box);
768
769         if (inputrecNeedMutot(inputrec))
770         {
771             /* Calculate total (local) dipole moment in a temporary common array.
772              * This makes it possible to sum them over nodes faster.
773              */
774             calc_mu(start, homenr,
775                     x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
776                     mu, mu+DIM);
777         }
778     }
779
780     if (fr->ePBC != epbcNONE)
781     {
782         /* Compute shift vectors every step,
783          * because of pressure coupling or box deformation!
784          */
785         if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
786         {
787             calc_shifts(box, fr->shift_vec);
788         }
789
790         if (bCalcCGCM)
791         {
792             put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
793             inc_nrnb(nrnb, eNR_SHIFTX, homenr);
794         }
795         else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
796         {
797             unshift_self(graph, box, x);
798         }
799     }
800
801     nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
802                                  fr->shift_vec, nbv->grp[0].nbat);
803
804 #if GMX_MPI
805     if (!(cr->duty & DUTY_PME))
806     {
807         gmx_bool bBS;
808         matrix   boxs;
809
810         /* Send particle coordinates to the pme nodes.
811          * Since this is only implemented for domain decomposition
812          * and domain decomposition does not use the graph,
813          * we do not need to worry about shifting.
814          */
815
816         wallcycle_start(wcycle, ewcPP_PMESENDX);
817
818         bBS = (inputrec->nwall == 2);
819         if (bBS)
820         {
821             copy_mat(box, boxs);
822             svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
823         }
824
825         gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
826                                  lambda[efptCOUL], lambda[efptVDW],
827                                  (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
828                                  step);
829
830         wallcycle_stop(wcycle, ewcPP_PMESENDX);
831     }
832 #endif /* GMX_MPI */
833
834     /* do gridding for pair search */
835     if (bNS)
836     {
837         if (graph && bStateChanged)
838         {
839             /* Calculate intramolecular shift vectors to make molecules whole */
840             mk_mshift(fplog, graph, fr->ePBC, box, x);
841         }
842
843         clear_rvec(vzero);
844         box_diag[XX] = box[XX][XX];
845         box_diag[YY] = box[YY][YY];
846         box_diag[ZZ] = box[ZZ][ZZ];
847
848         wallcycle_start(wcycle, ewcNS);
849         if (!fr->bDomDec)
850         {
851             wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
852             nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
853                               0, vzero, box_diag,
854                               0, mdatoms->homenr, -1, fr->cginfo, x,
855                               0, NULL,
856                               nbv->grp[eintLocal].kernel_type,
857                               nbv->grp[eintLocal].nbat);
858             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
859         }
860         else
861         {
862             wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
863             nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
864                                        fr->cginfo, x,
865                                        nbv->grp[eintNonlocal].kernel_type,
866                                        nbv->grp[eintNonlocal].nbat);
867             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
868         }
869
870         if (nbv->ngrp == 1 ||
871             nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
872         {
873             nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatAll,
874                                nbv->nbs, mdatoms, fr->cginfo);
875         }
876         else
877         {
878             nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatLocal,
879                                nbv->nbs, mdatoms, fr->cginfo);
880             nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat, eatAll,
881                                nbv->nbs, mdatoms, fr->cginfo);
882         }
883         wallcycle_stop(wcycle, ewcNS);
884     }
885
886     /* initialize the GPU atom data and copy shift vector */
887     if (bUseGPU)
888     {
889         if (bNS)
890         {
891             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
892             nbnxn_gpu_init_atomdata(nbv->gpu_nbv, nbv->grp[eintLocal].nbat);
893             wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
894         }
895
896         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
897         nbnxn_gpu_upload_shiftvec(nbv->gpu_nbv, nbv->grp[eintLocal].nbat);
898         wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
899     }
900
901     /* do local pair search */
902     if (bNS)
903     {
904         wallcycle_start_nocount(wcycle, ewcNS);
905         wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
906         nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintLocal].nbat,
907                             &top->excls,
908                             ic->rlist,
909                             nbv->min_ci_balanced,
910                             &nbv->grp[eintLocal].nbl_lists,
911                             eintLocal,
912                             nbv->grp[eintLocal].kernel_type,
913                             nrnb);
914         wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
915
916         if (bUseGPU)
917         {
918             /* initialize local pair-list on the GPU */
919             nbnxn_gpu_init_pairlist(nbv->gpu_nbv,
920                                     nbv->grp[eintLocal].nbl_lists.nbl[0],
921                                     eintLocal);
922         }
923         wallcycle_stop(wcycle, ewcNS);
924     }
925     else
926     {
927         wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
928         wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
929         nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
930                                         nbv->grp[eintLocal].nbat);
931         wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
932         wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
933     }
934
935     if (bUseGPU)
936     {
937         wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
938         /* launch local nonbonded F on GPU */
939         do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
940                      nrnb, wcycle);
941         wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
942     }
943
944     /* Communicate coordinates and sum dipole if necessary +
945        do non-local pair search */
946     if (DOMAINDECOMP(cr))
947     {
948         bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
949                         nbv->grp[eintLocal].kernel_type);
950
951         if (bDiffKernels)
952         {
953             /* With GPU+CPU non-bonded calculations we need to copy
954              * the local coordinates to the non-local nbat struct
955              * (in CPU format) as the non-local kernel call also
956              * calculates the local - non-local interactions.
957              */
958             wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
959             wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
960             nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, TRUE, x,
961                                             nbv->grp[eintNonlocal].nbat);
962             wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
963             wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
964         }
965
966         if (bNS)
967         {
968             wallcycle_start_nocount(wcycle, ewcNS);
969             wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
970
971             if (bDiffKernels)
972             {
973                 nbnxn_grid_add_simple(nbv->nbs, nbv->grp[eintNonlocal].nbat);
974             }
975
976             nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintNonlocal].nbat,
977                                 &top->excls,
978                                 ic->rlist,
979                                 nbv->min_ci_balanced,
980                                 &nbv->grp[eintNonlocal].nbl_lists,
981                                 eintNonlocal,
982                                 nbv->grp[eintNonlocal].kernel_type,
983                                 nrnb);
984
985             wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
986
987             if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_GPU)
988             {
989                 /* initialize non-local pair-list on the GPU */
990                 nbnxn_gpu_init_pairlist(nbv->gpu_nbv,
991                                         nbv->grp[eintNonlocal].nbl_lists.nbl[0],
992                                         eintNonlocal);
993             }
994             wallcycle_stop(wcycle, ewcNS);
995         }
996         else
997         {
998             wallcycle_start(wcycle, ewcMOVEX);
999             dd_move_x(cr->dd, box, x);
1000             wallcycle_stop(wcycle, ewcMOVEX);
1001
1002             wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1003             wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1004             nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
1005                                             nbv->grp[eintNonlocal].nbat);
1006             wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1007             cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1008         }
1009
1010         if (bUseGPU && !bDiffKernels)
1011         {
1012             wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1013             /* launch non-local nonbonded F on GPU */
1014             do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
1015                          nrnb, wcycle);
1016             cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1017         }
1018     }
1019
1020     if (bUseGPU)
1021     {
1022         /* launch D2H copy-back F */
1023         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1024         if (DOMAINDECOMP(cr) && !bDiffKernels)
1025         {
1026             nbnxn_gpu_launch_cpyback(nbv->gpu_nbv, nbv->grp[eintNonlocal].nbat,
1027                                      flags, eatNonlocal);
1028         }
1029         nbnxn_gpu_launch_cpyback(nbv->gpu_nbv, nbv->grp[eintLocal].nbat,
1030                                  flags, eatLocal);
1031         cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1032     }
1033
1034     if (bStateChanged && inputrecNeedMutot(inputrec))
1035     {
1036         if (PAR(cr))
1037         {
1038             gmx_sumd(2*DIM, mu, cr);
1039         }
1040
1041         for (i = 0; i < 2; i++)
1042         {
1043             for (j = 0; j < DIM; j++)
1044             {
1045                 fr->mu_tot[i][j] = mu[i*DIM + j];
1046             }
1047         }
1048     }
1049     if (fr->efep == efepNO)
1050     {
1051         copy_rvec(fr->mu_tot[0], mu_tot);
1052     }
1053     else
1054     {
1055         for (j = 0; j < DIM; j++)
1056         {
1057             mu_tot[j] =
1058                 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1059                 lambda[efptCOUL]*fr->mu_tot[1][j];
1060         }
1061     }
1062
1063     /* Reset energies */
1064     reset_enerdata(enerd);
1065     clear_rvecs(SHIFTS, fr->fshift);
1066
1067     if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1068     {
1069         wallcycle_start(wcycle, ewcPPDURINGPME);
1070         dd_force_flop_start(cr->dd, nrnb);
1071     }
1072
1073     if (inputrec->bRot)
1074     {
1075         /* Enforced rotation has its own cycle counter that starts after the collective
1076          * coordinates have been communicated. It is added to ddCyclF to allow
1077          * for proper load-balancing */
1078         wallcycle_start(wcycle, ewcROT);
1079         do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1080         wallcycle_stop(wcycle, ewcROT);
1081     }
1082
1083     /* Temporary solution until all routines take PaddedRVecVector */
1084     rvec *f = as_rvec_array(force->data());
1085
1086     /* Start the force cycle counter.
1087      * This counter is stopped after do_force_lowlevel.
1088      * No parallel communication should occur while this counter is running,
1089      * since that will interfere with the dynamic load balancing.
1090      */
1091     wallcycle_start(wcycle, ewcFORCE);
1092     if (bDoForces)
1093     {
1094         /* Reset forces for which the virial is calculated separately:
1095          * PME/Ewald forces if necessary */
1096         if (fr->bF_NoVirSum)
1097         {
1098             if (flags & GMX_FORCE_VIRIAL)
1099             {
1100                 fr->f_novirsum = fr->forceBufferNoVirialSummation;
1101             }
1102             else
1103             {
1104                 /* We are not calculating the pressure so we do not need
1105                  * a separate array for forces that do not contribute
1106                  * to the pressure.
1107                  */
1108                 fr->f_novirsum = force;
1109             }
1110         }
1111
1112         if (fr->bF_NoVirSum)
1113         {
1114             if (flags & GMX_FORCE_VIRIAL)
1115             {
1116                 /* TODO: remove this - 1 when padding is properly implemented */
1117                 clear_rvecs_omp(fr->f_novirsum->size() - 1,
1118                                 as_rvec_array(fr->f_novirsum->data()));
1119             }
1120         }
1121         /* Clear the short- and long-range forces */
1122         clear_rvecs_omp(fr->natoms_force_constr, f);
1123
1124         clear_rvec(fr->vir_diag_posres);
1125     }
1126
1127     if (inputrec->bPull && pull_have_constraint(inputrec->pull_work))
1128     {
1129         clear_pull_forces(inputrec->pull_work);
1130     }
1131
1132     /* We calculate the non-bonded forces, when done on the CPU, here.
1133      * We do this before calling do_force_lowlevel, because in that
1134      * function, the listed forces are calculated before PME, which
1135      * does communication.  With this order, non-bonded and listed
1136      * force calculation imbalance can be balanced out by the domain
1137      * decomposition load balancing.
1138      */
1139
1140     if (!bUseOrEmulGPU)
1141     {
1142         /* Maybe we should move this into do_force_lowlevel */
1143         do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
1144                      nrnb, wcycle);
1145     }
1146
1147     if (fr->efep != efepNO)
1148     {
1149         /* Calculate the local and non-local free energy interactions here.
1150          * Happens here on the CPU both with and without GPU.
1151          */
1152         if (fr->nbv->grp[eintLocal].nbl_lists.nbl_fep[0]->nrj > 0)
1153         {
1154             do_nb_verlet_fep(&fr->nbv->grp[eintLocal].nbl_lists,
1155                              fr, x, f, mdatoms,
1156                              inputrec->fepvals, lambda,
1157                              enerd, flags, nrnb, wcycle);
1158         }
1159
1160         if (DOMAINDECOMP(cr) &&
1161             fr->nbv->grp[eintNonlocal].nbl_lists.nbl_fep[0]->nrj > 0)
1162         {
1163             do_nb_verlet_fep(&fr->nbv->grp[eintNonlocal].nbl_lists,
1164                              fr, x, f, mdatoms,
1165                              inputrec->fepvals, lambda,
1166                              enerd, flags, nrnb, wcycle);
1167         }
1168     }
1169
1170     if (!bUseOrEmulGPU || bDiffKernels)
1171     {
1172         int aloc;
1173
1174         if (DOMAINDECOMP(cr))
1175         {
1176             do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
1177                          bDiffKernels ? enbvClearFYes : enbvClearFNo,
1178                          nrnb, wcycle);
1179         }
1180
1181         if (!bUseOrEmulGPU)
1182         {
1183             aloc = eintLocal;
1184         }
1185         else
1186         {
1187             aloc = eintNonlocal;
1188         }
1189
1190         /* Add all the non-bonded force to the normal force array.
1191          * This can be split into a local and a non-local part when overlapping
1192          * communication with calculation with domain decomposition.
1193          */
1194         cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1195         wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1196         wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1197         nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatAll, nbv->grp[aloc].nbat, f);
1198         wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1199         cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1200         wallcycle_start_nocount(wcycle, ewcFORCE);
1201
1202         /* if there are multiple fshift output buffers reduce them */
1203         if ((flags & GMX_FORCE_VIRIAL) &&
1204             nbv->grp[aloc].nbl_lists.nnbl > 1)
1205         {
1206             /* This is not in a subcounter because it takes a
1207                negligible and constant-sized amount of time */
1208             nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
1209                                                      fr->fshift);
1210         }
1211     }
1212
1213     /* update QMMMrec, if necessary */
1214     if (fr->bQMMM)
1215     {
1216         update_QMMMrec(cr, fr, x, mdatoms, box, top);
1217     }
1218
1219     /* Compute the bonded and non-bonded energies and optionally forces */
1220     do_force_lowlevel(fr, inputrec, &(top->idef),
1221                       cr, nrnb, wcycle, mdatoms,
1222                       x, hist, f, enerd, fcd, top, fr->born,
1223                       bBornRadii, box,
1224                       inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1225                       flags, &cycles_pme);
1226
1227     cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1228
1229     if (ed)
1230     {
1231         do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1232     }
1233
1234     if (bUseOrEmulGPU && !bDiffKernels)
1235     {
1236         /* wait for non-local forces (or calculate in emulation mode) */
1237         if (DOMAINDECOMP(cr))
1238         {
1239             if (bUseGPU)
1240             {
1241                 float cycles_tmp;
1242
1243                 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1244                 nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1245                                        flags, eatNonlocal,
1246                                        enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1247                                        fr->fshift);
1248                 cycles_tmp       = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1249                 cycles_wait_gpu += cycles_tmp;
1250                 cycles_force    += cycles_tmp;
1251             }
1252             else
1253             {
1254                 wallcycle_start_nocount(wcycle, ewcFORCE);
1255                 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
1256                              nrnb, wcycle);
1257                 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1258             }
1259             wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1260             wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1261             /* skip the reduction if there was no non-local work to do */
1262             if (nbv->grp[eintNonlocal].nbl_lists.nbl[0]->nsci > 0)
1263             {
1264                 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatNonlocal,
1265                                                nbv->grp[eintNonlocal].nbat, f);
1266             }
1267             wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1268             cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1269         }
1270     }
1271
1272     if (bDoForces && DOMAINDECOMP(cr))
1273     {
1274         if (bUseGPU)
1275         {
1276             /* We are done with the CPU compute, but the GPU local non-bonded
1277              * kernel can still be running while we communicate the forces.
1278              * We start a counter here, so we can, hopefully, time the rest
1279              * of the GPU kernel execution and data transfer.
1280              */
1281             cycleCountBeforeLocalWorkCompletes = gmx_cycles_read();
1282         }
1283
1284         /* Communicate the forces */
1285         wallcycle_start(wcycle, ewcMOVEF);
1286         dd_move_f(cr->dd, f, fr->fshift);
1287         wallcycle_stop(wcycle, ewcMOVEF);
1288     }
1289
1290     if (bUseOrEmulGPU)
1291     {
1292         /* wait for local forces (or calculate in emulation mode) */
1293         if (bUseGPU)
1294         {
1295             float       cycles_tmp, cycles_wait_est;
1296             /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1297              * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1298              * but even with a step of 0.1 ms the difference is less than 1%
1299              * of the step time.
1300              */
1301             const float gpuWaitApiOverheadMargin = 2e6f; /* cycles */
1302
1303             wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1304             nbnxn_gpu_wait_for_gpu(nbv->gpu_nbv,
1305                                    flags, eatLocal,
1306                                    enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1307                                    fr->fshift);
1308             cycles_tmp      = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1309
1310             if (bDoForces && DOMAINDECOMP(cr))
1311             {
1312                 cycles_wait_est = gmx_cycles_read() - cycleCountBeforeLocalWorkCompletes;
1313
1314                 if (cycles_tmp < gpuWaitApiOverheadMargin)
1315                 {
1316                     /* We measured few cycles, it could be that the kernel
1317                      * and transfer finished earlier and there was no actual
1318                      * wait time, only API call overhead.
1319                      * Then the actual time could be anywhere between 0 and
1320                      * cycles_wait_est. As a compromise, we use half the time.
1321                      */
1322                     cycles_wait_est *= 0.5f;
1323                 }
1324             }
1325             else
1326             {
1327                 /* No force communication so we actually timed the wait */
1328                 cycles_wait_est = cycles_tmp;
1329             }
1330             /* Even though this is after dd_move_f, the actual task we are
1331              * waiting for runs asynchronously with dd_move_f and we usually
1332              * have nothing to balance it with, so we can and should add
1333              * the time to the force time for load balancing.
1334              */
1335             cycles_force    += cycles_wait_est;
1336             cycles_wait_gpu += cycles_wait_est;
1337
1338             /* now clear the GPU outputs while we finish the step on the CPU */
1339             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1340             nbnxn_gpu_clear_outputs(nbv->gpu_nbv, flags);
1341             wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1342         }
1343         else
1344         {
1345             wallcycle_start_nocount(wcycle, ewcFORCE);
1346             do_nb_verlet(fr, ic, enerd, flags, eintLocal,
1347                          DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1348                          nrnb, wcycle);
1349             wallcycle_stop(wcycle, ewcFORCE);
1350         }
1351         wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1352         wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1353         nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatLocal,
1354                                        nbv->grp[eintLocal].nbat, f);
1355         wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1356         wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1357     }
1358
1359     if (DOMAINDECOMP(cr))
1360     {
1361         dd_force_flop_stop(cr->dd, nrnb);
1362         if (wcycle)
1363         {
1364             dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1365             if (bUseGPU)
1366             {
1367                 dd_cycles_add(cr->dd, cycles_wait_gpu, ddCyclWaitGPU);
1368             }
1369         }
1370     }
1371
1372     if (bDoForces)
1373     {
1374         /* Compute forces due to electric field */
1375         if (fr->efield != nullptr)
1376         {
1377             fr->efield->calculateForces(cr, mdatoms, fr->f_novirsum, t);
1378         }
1379
1380         /* If we have NoVirSum forces, but we do not calculate the virial,
1381          * we sum fr->f_novirsum=f later.
1382          */
1383         if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1384         {
1385             wallcycle_start(wcycle, ewcVSITESPREAD);
1386             spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1387                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1388             wallcycle_stop(wcycle, ewcVSITESPREAD);
1389         }
1390
1391         if (flags & GMX_FORCE_VIRIAL)
1392         {
1393             /* Calculation of the virial must be done after vsites! */
1394             calc_virial(0, mdatoms->homenr, x, f,
1395                         vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1396         }
1397     }
1398
1399     if (inputrec->bPull && pull_have_potential(inputrec->pull_work))
1400     {
1401         /* Since the COM pulling is always done mass-weighted, no forces are
1402          * applied to vsites and this call can be done after vsite spreading.
1403          */
1404         pull_potential_wrapper(cr, inputrec, box, x,
1405                                f, vir_force, mdatoms, enerd, lambda, t,
1406                                wcycle);
1407     }
1408
1409     /* Add the forces from enforced rotation potentials (if any) */
1410     if (inputrec->bRot)
1411     {
1412         wallcycle_start(wcycle, ewcROTadd);
1413         enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1414         wallcycle_stop(wcycle, ewcROTadd);
1415     }
1416
1417     /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1418     IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1419
1420     if (PAR(cr) && !(cr->duty & DUTY_PME))
1421     {
1422         /* In case of node-splitting, the PP nodes receive the long-range
1423          * forces, virial and energy from the PME nodes here.
1424          */
1425         pme_receive_force_ener(cr, wcycle, enerd, fr);
1426     }
1427
1428     if (bDoForces)
1429     {
1430         post_process_forces(cr, step, nrnb, wcycle,
1431                             top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1432                             flags);
1433     }
1434
1435     if (flags & GMX_FORCE_ENERGY)
1436     {
1437         /* Sum the potential energy terms from group contributions */
1438         sum_epot(&(enerd->grpp), enerd->term);
1439
1440         if (!EI_TPI(inputrec->eI))
1441         {
1442             checkPotentialEnergyValidity(enerd);
1443         }
1444     }
1445 }
1446
1447 void do_force_cutsGROUP(FILE *fplog, t_commrec *cr,
1448                         t_inputrec *inputrec,
1449                         gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1450                         gmx_localtop_t *top,
1451                         gmx_groups_t *groups,
1452                         matrix box, rvec x[], history_t *hist,
1453                         PaddedRVecVector *force,
1454                         tensor vir_force,
1455                         t_mdatoms *mdatoms,
1456                         gmx_enerdata_t *enerd, t_fcdata *fcd,
1457                         real *lambda, t_graph *graph,
1458                         t_forcerec *fr, gmx_vsite_t *vsite, rvec mu_tot,
1459                         double t, gmx_edsam_t ed,
1460                         gmx_bool bBornRadii,
1461                         int flags)
1462 {
1463     int        cg0, cg1, i, j;
1464     double     mu[2*DIM];
1465     gmx_bool   bStateChanged, bNS, bFillGrid, bCalcCGCM;
1466     gmx_bool   bDoForces;
1467     float      cycles_pme, cycles_force;
1468
1469     const int  start  = 0;
1470     const int  homenr = mdatoms->homenr;
1471
1472     clear_mat(vir_force);
1473
1474     cg0 = 0;
1475     if (DOMAINDECOMP(cr))
1476     {
1477         cg1 = cr->dd->ncg_tot;
1478     }
1479     else
1480     {
1481         cg1 = top->cgs.nr;
1482     }
1483     if (fr->n_tpi > 0)
1484     {
1485         cg1--;
1486     }
1487
1488     bStateChanged  = (flags & GMX_FORCE_STATECHANGED);
1489     bNS            = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
1490     /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1491     bFillGrid      = (bNS && bStateChanged);
1492     bCalcCGCM      = (bFillGrid && !DOMAINDECOMP(cr));
1493     bDoForces      = (flags & GMX_FORCE_FORCES);
1494
1495     if (bStateChanged)
1496     {
1497         update_forcerec(fr, box);
1498
1499         if (inputrecNeedMutot(inputrec))
1500         {
1501             /* Calculate total (local) dipole moment in a temporary common array.
1502              * This makes it possible to sum them over nodes faster.
1503              */
1504             calc_mu(start, homenr,
1505                     x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1506                     mu, mu+DIM);
1507         }
1508     }
1509
1510     if (fr->ePBC != epbcNONE)
1511     {
1512         /* Compute shift vectors every step,
1513          * because of pressure coupling or box deformation!
1514          */
1515         if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1516         {
1517             calc_shifts(box, fr->shift_vec);
1518         }
1519
1520         if (bCalcCGCM)
1521         {
1522             put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1523                                      &(top->cgs), x, fr->cg_cm);
1524             inc_nrnb(nrnb, eNR_CGCM, homenr);
1525             inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1526         }
1527         else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1528         {
1529             unshift_self(graph, box, x);
1530         }
1531     }
1532     else if (bCalcCGCM)
1533     {
1534         calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
1535         inc_nrnb(nrnb, eNR_CGCM, homenr);
1536     }
1537
1538     if (bCalcCGCM && gmx_debug_at)
1539     {
1540         pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1541     }
1542
1543 #if GMX_MPI
1544     if (!(cr->duty & DUTY_PME))
1545     {
1546         gmx_bool bBS;
1547         matrix   boxs;
1548
1549         /* Send particle coordinates to the pme nodes.
1550          * Since this is only implemented for domain decomposition
1551          * and domain decomposition does not use the graph,
1552          * we do not need to worry about shifting.
1553          */
1554
1555         wallcycle_start(wcycle, ewcPP_PMESENDX);
1556
1557         bBS = (inputrec->nwall == 2);
1558         if (bBS)
1559         {
1560             copy_mat(box, boxs);
1561             svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
1562         }
1563
1564         gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
1565                                  lambda[efptCOUL], lambda[efptVDW],
1566                                  (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
1567                                  step);
1568
1569         wallcycle_stop(wcycle, ewcPP_PMESENDX);
1570     }
1571 #endif /* GMX_MPI */
1572
1573     /* Communicate coordinates and sum dipole if necessary */
1574     if (DOMAINDECOMP(cr))
1575     {
1576         wallcycle_start(wcycle, ewcMOVEX);
1577         dd_move_x(cr->dd, box, x);
1578         wallcycle_stop(wcycle, ewcMOVEX);
1579     }
1580
1581     if (inputrecNeedMutot(inputrec))
1582     {
1583         if (bStateChanged)
1584         {
1585             if (PAR(cr))
1586             {
1587                 gmx_sumd(2*DIM, mu, cr);
1588             }
1589             for (i = 0; i < 2; i++)
1590             {
1591                 for (j = 0; j < DIM; j++)
1592                 {
1593                     fr->mu_tot[i][j] = mu[i*DIM + j];
1594                 }
1595             }
1596         }
1597         if (fr->efep == efepNO)
1598         {
1599             copy_rvec(fr->mu_tot[0], mu_tot);
1600         }
1601         else
1602         {
1603             for (j = 0; j < DIM; j++)
1604             {
1605                 mu_tot[j] =
1606                     (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1607             }
1608         }
1609     }
1610
1611     /* Reset energies */
1612     reset_enerdata(enerd);
1613     clear_rvecs(SHIFTS, fr->fshift);
1614
1615     if (bNS)
1616     {
1617         wallcycle_start(wcycle, ewcNS);
1618
1619         if (graph && bStateChanged)
1620         {
1621             /* Calculate intramolecular shift vectors to make molecules whole */
1622             mk_mshift(fplog, graph, fr->ePBC, box, x);
1623         }
1624
1625         /* Do the actual neighbour searching */
1626         ns(fplog, fr, box,
1627            groups, top, mdatoms,
1628            cr, nrnb, bFillGrid);
1629
1630         wallcycle_stop(wcycle, ewcNS);
1631     }
1632
1633     if (inputrec->implicit_solvent && bNS)
1634     {
1635         make_gb_nblist(cr, inputrec->gb_algorithm,
1636                        x, box, fr, &top->idef, graph, fr->born);
1637     }
1638
1639     if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1640     {
1641         wallcycle_start(wcycle, ewcPPDURINGPME);
1642         dd_force_flop_start(cr->dd, nrnb);
1643     }
1644
1645     if (inputrec->bRot)
1646     {
1647         /* Enforced rotation has its own cycle counter that starts after the collective
1648          * coordinates have been communicated. It is added to ddCyclF to allow
1649          * for proper load-balancing */
1650         wallcycle_start(wcycle, ewcROT);
1651         do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1652         wallcycle_stop(wcycle, ewcROT);
1653     }
1654
1655     /* Temporary solution until all routines take PaddedRVecVector */
1656     rvec *f = as_rvec_array(force->data());
1657
1658     /* Start the force cycle counter.
1659      * This counter is stopped after do_force_lowlevel.
1660      * No parallel communication should occur while this counter is running,
1661      * since that will interfere with the dynamic load balancing.
1662      */
1663     wallcycle_start(wcycle, ewcFORCE);
1664
1665     if (bDoForces)
1666     {
1667         /* Reset forces for which the virial is calculated separately:
1668          * PME/Ewald forces if necessary */
1669         if (fr->bF_NoVirSum)
1670         {
1671             if (flags & GMX_FORCE_VIRIAL)
1672             {
1673                 fr->f_novirsum = fr->forceBufferNoVirialSummation;
1674                 /* TODO: remove this - 1 when padding is properly implemented */
1675                 clear_rvecs(fr->f_novirsum->size() - 1,
1676                             as_rvec_array(fr->f_novirsum->data()));
1677             }
1678             else
1679             {
1680                 /* We are not calculating the pressure so we do not need
1681                  * a separate array for forces that do not contribute
1682                  * to the pressure.
1683                  */
1684                 fr->f_novirsum = force;
1685             }
1686         }
1687
1688         /* Clear the short- and long-range forces */
1689         clear_rvecs(fr->natoms_force_constr, f);
1690
1691         clear_rvec(fr->vir_diag_posres);
1692     }
1693     if (inputrec->bPull && pull_have_constraint(inputrec->pull_work))
1694     {
1695         clear_pull_forces(inputrec->pull_work);
1696     }
1697
1698     /* update QMMMrec, if necessary */
1699     if (fr->bQMMM)
1700     {
1701         update_QMMMrec(cr, fr, x, mdatoms, box, top);
1702     }
1703
1704     /* Compute the bonded and non-bonded energies and optionally forces */
1705     do_force_lowlevel(fr, inputrec, &(top->idef),
1706                       cr, nrnb, wcycle, mdatoms,
1707                       x, hist, f, enerd, fcd, top, fr->born,
1708                       bBornRadii, box,
1709                       inputrec->fepvals, lambda,
1710                       graph, &(top->excls), fr->mu_tot,
1711                       flags,
1712                       &cycles_pme);
1713
1714     cycles_force = wallcycle_stop(wcycle, ewcFORCE);
1715
1716     if (ed)
1717     {
1718         do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1719     }
1720
1721     if (DOMAINDECOMP(cr))
1722     {
1723         dd_force_flop_stop(cr->dd, nrnb);
1724         if (wcycle)
1725         {
1726             dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1727         }
1728     }
1729
1730     if (bDoForces)
1731     {
1732         /* Compute forces due to electric field */
1733         if (fr->efield != nullptr)
1734         {
1735             fr->efield->calculateForces(cr, mdatoms, fr->f_novirsum, t);
1736         }
1737
1738         /* Communicate the forces */
1739         if (DOMAINDECOMP(cr))
1740         {
1741             wallcycle_start(wcycle, ewcMOVEF);
1742             dd_move_f(cr->dd, f, fr->fshift);
1743             /* Do we need to communicate the separate force array
1744              * for terms that do not contribute to the single sum virial?
1745              * Position restraints and electric fields do not introduce
1746              * inter-cg forces, only full electrostatics methods do.
1747              * When we do not calculate the virial, fr->f_novirsum = f,
1748              * so we have already communicated these forces.
1749              */
1750             if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1751                 (flags & GMX_FORCE_VIRIAL))
1752             {
1753                 dd_move_f(cr->dd, as_rvec_array(fr->f_novirsum->data()), NULL);
1754             }
1755             wallcycle_stop(wcycle, ewcMOVEF);
1756         }
1757
1758         /* If we have NoVirSum forces, but we do not calculate the virial,
1759          * we sum fr->f_novirsum=f later.
1760          */
1761         if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1762         {
1763             wallcycle_start(wcycle, ewcVSITESPREAD);
1764             spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1765                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1766             wallcycle_stop(wcycle, ewcVSITESPREAD);
1767         }
1768
1769         if (flags & GMX_FORCE_VIRIAL)
1770         {
1771             /* Calculation of the virial must be done after vsites! */
1772             calc_virial(0, mdatoms->homenr, x, f,
1773                         vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1774         }
1775     }
1776
1777     if (inputrec->bPull && pull_have_potential(inputrec->pull_work))
1778     {
1779         pull_potential_wrapper(cr, inputrec, box, x,
1780                                f, vir_force, mdatoms, enerd, lambda, t,
1781                                wcycle);
1782     }
1783
1784     /* Add the forces from enforced rotation potentials (if any) */
1785     if (inputrec->bRot)
1786     {
1787         wallcycle_start(wcycle, ewcROTadd);
1788         enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1789         wallcycle_stop(wcycle, ewcROTadd);
1790     }
1791
1792     /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1793     IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1794
1795     if (PAR(cr) && !(cr->duty & DUTY_PME))
1796     {
1797         /* In case of node-splitting, the PP nodes receive the long-range
1798          * forces, virial and energy from the PME nodes here.
1799          */
1800         pme_receive_force_ener(cr, wcycle, enerd, fr);
1801     }
1802
1803     if (bDoForces)
1804     {
1805         post_process_forces(cr, step, nrnb, wcycle,
1806                             top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1807                             flags);
1808     }
1809
1810     if (flags & GMX_FORCE_ENERGY)
1811     {
1812         /* Sum the potential energy terms from group contributions */
1813         sum_epot(&(enerd->grpp), enerd->term);
1814
1815         if (!EI_TPI(inputrec->eI))
1816         {
1817             checkPotentialEnergyValidity(enerd);
1818         }
1819     }
1820
1821 }
1822
1823 void do_force(FILE *fplog, t_commrec *cr,
1824               t_inputrec *inputrec,
1825               gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1826               gmx_localtop_t *top,
1827               gmx_groups_t *groups,
1828               matrix box, PaddedRVecVector *coordinates, history_t *hist,
1829               PaddedRVecVector *force,
1830               tensor vir_force,
1831               t_mdatoms *mdatoms,
1832               gmx_enerdata_t *enerd, t_fcdata *fcd,
1833               std::vector<real> *lambda, t_graph *graph,
1834               t_forcerec *fr,
1835               gmx_vsite_t *vsite, rvec mu_tot,
1836               double t, gmx_edsam_t ed,
1837               gmx_bool bBornRadii,
1838               int flags)
1839 {
1840     /* modify force flag if not doing nonbonded */
1841     if (!fr->bNonbonded)
1842     {
1843         flags &= ~GMX_FORCE_NONBONDED;
1844     }
1845
1846     GMX_ASSERT(coordinates->size() >= static_cast<unsigned int>(fr->natoms_force + 1), "We might need 1 element extra for SIMD");
1847     GMX_ASSERT(force->size() >= static_cast<unsigned int>(fr->natoms_force + 1), "We might need 1 element extra for SIMD");
1848
1849     rvec *x = as_rvec_array(coordinates->data());
1850
1851     switch (inputrec->cutoff_scheme)
1852     {
1853         case ecutsVERLET:
1854             do_force_cutsVERLET(fplog, cr, inputrec,
1855                                 step, nrnb, wcycle,
1856                                 top,
1857                                 groups,
1858                                 box, x, hist,
1859                                 force, vir_force,
1860                                 mdatoms,
1861                                 enerd, fcd,
1862                                 lambda->data(), graph,
1863                                 fr, fr->ic,
1864                                 vsite, mu_tot,
1865                                 t, ed,
1866                                 bBornRadii,
1867                                 flags);
1868             break;
1869         case ecutsGROUP:
1870             do_force_cutsGROUP(fplog, cr, inputrec,
1871                                step, nrnb, wcycle,
1872                                top,
1873                                groups,
1874                                box, x, hist,
1875                                force, vir_force,
1876                                mdatoms,
1877                                enerd, fcd,
1878                                lambda->data(), graph,
1879                                fr, vsite, mu_tot,
1880                                t, ed,
1881                                bBornRadii,
1882                                flags);
1883             break;
1884         default:
1885             gmx_incons("Invalid cut-off scheme passed!");
1886     }
1887 }
1888
1889
1890 void do_constrain_first(FILE *fplog, gmx_constr_t constr,
1891                         t_inputrec *ir, t_mdatoms *md,
1892                         t_state *state, t_commrec *cr, t_nrnb *nrnb,
1893                         t_forcerec *fr, gmx_localtop_t *top)
1894 {
1895     int             i, m, start, end;
1896     gmx_int64_t     step;
1897     real            dt = ir->delta_t;
1898     real            dvdl_dum;
1899     rvec           *savex;
1900
1901     /* We need to allocate one element extra, since we might use
1902      * (unaligned) 4-wide SIMD loads to access rvec entries.
1903      */
1904     snew(savex, state->natoms + 1);
1905
1906     start = 0;
1907     end   = md->homenr;
1908
1909     if (debug)
1910     {
1911         fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
1912                 start, md->homenr, end);
1913     }
1914     /* Do a first constrain to reset particles... */
1915     step = ir->init_step;
1916     if (fplog)
1917     {
1918         char buf[STEPSTRSIZE];
1919         fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
1920                 gmx_step_str(step, buf));
1921     }
1922     dvdl_dum = 0;
1923
1924     /* constrain the current position */
1925     constrain(NULL, TRUE, FALSE, constr, &(top->idef),
1926               ir, cr, step, 0, 1.0, md,
1927               as_rvec_array(state->x.data()), as_rvec_array(state->x.data()), NULL,
1928               fr->bMolPBC, state->box,
1929               state->lambda[efptBONDED], &dvdl_dum,
1930               NULL, NULL, nrnb, econqCoord);
1931     if (EI_VV(ir->eI))
1932     {
1933         /* constrain the inital velocity, and save it */
1934         /* also may be useful if we need the ekin from the halfstep for velocity verlet */
1935         constrain(NULL, TRUE, FALSE, constr, &(top->idef),
1936                   ir, cr, step, 0, 1.0, md,
1937                   as_rvec_array(state->x.data()), as_rvec_array(state->v.data()), as_rvec_array(state->v.data()),
1938                   fr->bMolPBC, state->box,
1939                   state->lambda[efptBONDED], &dvdl_dum,
1940                   NULL, NULL, nrnb, econqVeloc);
1941     }
1942     /* constrain the inital velocities at t-dt/2 */
1943     if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
1944     {
1945         for (i = start; (i < end); i++)
1946         {
1947             for (m = 0; (m < DIM); m++)
1948             {
1949                 /* Reverse the velocity */
1950                 state->v[i][m] = -state->v[i][m];
1951                 /* Store the position at t-dt in buf */
1952                 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
1953             }
1954         }
1955         /* Shake the positions at t=-dt with the positions at t=0
1956          * as reference coordinates.
1957          */
1958         if (fplog)
1959         {
1960             char buf[STEPSTRSIZE];
1961             fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
1962                     gmx_step_str(step, buf));
1963         }
1964         dvdl_dum = 0;
1965         constrain(NULL, TRUE, FALSE, constr, &(top->idef),
1966                   ir, cr, step, -1, 1.0, md,
1967                   as_rvec_array(state->x.data()), savex, NULL,
1968                   fr->bMolPBC, state->box,
1969                   state->lambda[efptBONDED], &dvdl_dum,
1970                   as_rvec_array(state->v.data()), NULL, nrnb, econqCoord);
1971
1972         for (i = start; i < end; i++)
1973         {
1974             for (m = 0; m < DIM; m++)
1975             {
1976                 /* Re-reverse the velocities */
1977                 state->v[i][m] = -state->v[i][m];
1978             }
1979         }
1980     }
1981     sfree(savex);
1982 }
1983
1984
1985 static void
1986 integrate_table(real vdwtab[], real scale, int offstart, int rstart, int rend,
1987                 double *enerout, double *virout)
1988 {
1989     double enersum, virsum;
1990     double invscale, invscale2, invscale3;
1991     double r, ea, eb, ec, pa, pb, pc, pd;
1992     double y0, f, g, h;
1993     int    ri, offset;
1994     double tabfactor;
1995
1996     invscale  = 1.0/scale;
1997     invscale2 = invscale*invscale;
1998     invscale3 = invscale*invscale2;
1999
2000     /* Following summation derived from cubic spline definition,
2001      * Numerical Recipies in C, second edition, p. 113-116.  Exact for
2002      * the cubic spline.  We first calculate the negative of the
2003      * energy from rvdw to rvdw_switch, assuming that g(r)=1, and then
2004      * add the more standard, abrupt cutoff correction to that result,
2005      * yielding the long-range correction for a switched function.  We
2006      * perform both the pressure and energy loops at the same time for
2007      * simplicity, as the computational cost is low. */
2008
2009     if (offstart == 0)
2010     {
2011         /* Since the dispersion table has been scaled down a factor
2012          * 6.0 and the repulsion a factor 12.0 to compensate for the
2013          * c6/c12 parameters inside nbfp[] being scaled up (to save
2014          * flops in kernels), we need to correct for this.
2015          */
2016         tabfactor = 6.0;
2017     }
2018     else
2019     {
2020         tabfactor = 12.0;
2021     }
2022
2023     enersum = 0.0;
2024     virsum  = 0.0;
2025     for (ri = rstart; ri < rend; ++ri)
2026     {
2027         r  = ri*invscale;
2028         ea = invscale3;
2029         eb = 2.0*invscale2*r;
2030         ec = invscale*r*r;
2031
2032         pa = invscale3;
2033         pb = 3.0*invscale2*r;
2034         pc = 3.0*invscale*r*r;
2035         pd = r*r*r;
2036
2037         /* this "8" is from the packing in the vdwtab array - perhaps
2038            should be defined? */
2039
2040         offset = 8*ri + offstart;
2041         y0     = vdwtab[offset];
2042         f      = vdwtab[offset+1];
2043         g      = vdwtab[offset+2];
2044         h      = vdwtab[offset+3];
2045
2046         enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
2047         virsum  +=  f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
2048     }
2049     *enerout = 4.0*M_PI*enersum*tabfactor;
2050     *virout  = 4.0*M_PI*virsum*tabfactor;
2051 }
2052
2053 void calc_enervirdiff(FILE *fplog, int eDispCorr, t_forcerec *fr)
2054 {
2055     double   eners[2], virs[2], enersum, virsum;
2056     double   r0, rc3, rc9;
2057     int      ri0, ri1, i;
2058     real     scale, *vdwtab;
2059
2060     fr->enershiftsix    = 0;
2061     fr->enershifttwelve = 0;
2062     fr->enerdiffsix     = 0;
2063     fr->enerdifftwelve  = 0;
2064     fr->virdiffsix      = 0;
2065     fr->virdifftwelve   = 0;
2066
2067     if (eDispCorr != edispcNO)
2068     {
2069         for (i = 0; i < 2; i++)
2070         {
2071             eners[i] = 0;
2072             virs[i]  = 0;
2073         }
2074         if ((fr->vdw_modifier == eintmodPOTSHIFT) ||
2075             (fr->vdw_modifier == eintmodPOTSWITCH) ||
2076             (fr->vdw_modifier == eintmodFORCESWITCH) ||
2077             (fr->vdwtype == evdwSHIFT) ||
2078             (fr->vdwtype == evdwSWITCH))
2079         {
2080             if (((fr->vdw_modifier == eintmodPOTSWITCH) ||
2081                  (fr->vdw_modifier == eintmodFORCESWITCH) ||
2082                  (fr->vdwtype == evdwSWITCH)) && fr->rvdw_switch == 0)
2083             {
2084                 gmx_fatal(FARGS,
2085                           "With dispersion correction rvdw-switch can not be zero "
2086                           "for vdw-type = %s", evdw_names[fr->vdwtype]);
2087             }
2088
2089             /* TODO This code depends on the logic in tables.c that
2090                constructs the table layout, which should be made
2091                explicit in future cleanup. */
2092             GMX_ASSERT(fr->dispersionCorrectionTable->interaction == GMX_TABLE_INTERACTION_VDWREP_VDWDISP,
2093                        "Dispersion-correction code needs a table with both repulsion and dispersion terms");
2094             scale  = fr->dispersionCorrectionTable->scale;
2095             vdwtab = fr->dispersionCorrectionTable->data;
2096
2097             /* Round the cut-offs to exact table values for precision */
2098             ri0  = static_cast<int>(floor(fr->rvdw_switch*scale));
2099             ri1  = static_cast<int>(ceil(fr->rvdw*scale));
2100
2101             /* The code below has some support for handling force-switching, i.e.
2102              * when the force (instead of potential) is switched over a limited
2103              * region. This leads to a constant shift in the potential inside the
2104              * switching region, which we can handle by adding a constant energy
2105              * term in the force-switch case just like when we do potential-shift.
2106              *
2107              * For now this is not enabled, but to keep the functionality in the
2108              * code we check separately for switch and shift. When we do force-switch
2109              * the shifting point is rvdw_switch, while it is the cutoff when we
2110              * have a classical potential-shift.
2111              *
2112              * For a pure potential-shift the potential has a constant shift
2113              * all the way out to the cutoff, and that is it. For other forms
2114              * we need to calculate the constant shift up to the point where we
2115              * start modifying the potential.
2116              */
2117             ri0  = (fr->vdw_modifier == eintmodPOTSHIFT) ? ri1 : ri0;
2118
2119             r0   = ri0/scale;
2120             rc3  = r0*r0*r0;
2121             rc9  = rc3*rc3*rc3;
2122
2123             if ((fr->vdw_modifier == eintmodFORCESWITCH) ||
2124                 (fr->vdwtype == evdwSHIFT))
2125             {
2126                 /* Determine the constant energy shift below rvdw_switch.
2127                  * Table has a scale factor since we have scaled it down to compensate
2128                  * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2129                  */
2130                 fr->enershiftsix    = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
2131                 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
2132             }
2133             else if (fr->vdw_modifier == eintmodPOTSHIFT)
2134             {
2135                 fr->enershiftsix    = (real)(-1.0/(rc3*rc3));
2136                 fr->enershifttwelve = (real)( 1.0/(rc9*rc3));
2137             }
2138
2139             /* Add the constant part from 0 to rvdw_switch.
2140              * This integration from 0 to rvdw_switch overcounts the number
2141              * of interactions by 1, as it also counts the self interaction.
2142              * We will correct for this later.
2143              */
2144             eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
2145             eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
2146
2147             /* Calculate the contribution in the range [r0,r1] where we
2148              * modify the potential. For a pure potential-shift modifier we will
2149              * have ri0==ri1, and there will not be any contribution here.
2150              */
2151             for (i = 0; i < 2; i++)
2152             {
2153                 enersum = 0;
2154                 virsum  = 0;
2155                 integrate_table(vdwtab, scale, (i == 0 ? 0 : 4), ri0, ri1, &enersum, &virsum);
2156                 eners[i] -= enersum;
2157                 virs[i]  -= virsum;
2158             }
2159
2160             /* Alright: Above we compensated by REMOVING the parts outside r0
2161              * corresponding to the ideal VdW 1/r6 and /r12 potentials.
2162              *
2163              * Regardless of whether r0 is the point where we start switching,
2164              * or the cutoff where we calculated the constant shift, we include
2165              * all the parts we are missing out to infinity from r0 by
2166              * calculating the analytical dispersion correction.
2167              */
2168             eners[0] += -4.0*M_PI/(3.0*rc3);
2169             eners[1] +=  4.0*M_PI/(9.0*rc9);
2170             virs[0]  +=  8.0*M_PI/rc3;
2171             virs[1]  += -16.0*M_PI/(3.0*rc9);
2172         }
2173         else if (fr->vdwtype == evdwCUT ||
2174                  EVDW_PME(fr->vdwtype) ||
2175                  fr->vdwtype == evdwUSER)
2176         {
2177             if (fr->vdwtype == evdwUSER && fplog)
2178             {
2179                 fprintf(fplog,
2180                         "WARNING: using dispersion correction with user tables\n");
2181             }
2182
2183             /* Note that with LJ-PME, the dispersion correction is multiplied
2184              * by the difference between the actual C6 and the value of C6
2185              * that would produce the combination rule.
2186              * This means the normal energy and virial difference formulas
2187              * can be used here.
2188              */
2189
2190             rc3  = fr->rvdw*fr->rvdw*fr->rvdw;
2191             rc9  = rc3*rc3*rc3;
2192             /* Contribution beyond the cut-off */
2193             eners[0] += -4.0*M_PI/(3.0*rc3);
2194             eners[1] +=  4.0*M_PI/(9.0*rc9);
2195             if (fr->vdw_modifier == eintmodPOTSHIFT)
2196             {
2197                 /* Contribution within the cut-off */
2198                 eners[0] += -4.0*M_PI/(3.0*rc3);
2199                 eners[1] +=  4.0*M_PI/(3.0*rc9);
2200             }
2201             /* Contribution beyond the cut-off */
2202             virs[0]  +=  8.0*M_PI/rc3;
2203             virs[1]  += -16.0*M_PI/(3.0*rc9);
2204         }
2205         else
2206         {
2207             gmx_fatal(FARGS,
2208                       "Dispersion correction is not implemented for vdw-type = %s",
2209                       evdw_names[fr->vdwtype]);
2210         }
2211
2212         /* When we deprecate the group kernels the code below can go too */
2213         if (fr->vdwtype == evdwPME && fr->cutoff_scheme == ecutsGROUP)
2214         {
2215             /* Calculate self-interaction coefficient (assuming that
2216              * the reciprocal-space contribution is constant in the
2217              * region that contributes to the self-interaction).
2218              */
2219             fr->enershiftsix = gmx::power6(fr->ewaldcoeff_lj) / 6.0;
2220
2221             eners[0] += -gmx::power3(std::sqrt(M_PI)*fr->ewaldcoeff_lj)/3.0;
2222             virs[0]  +=  gmx::power3(std::sqrt(M_PI)*fr->ewaldcoeff_lj);
2223         }
2224
2225         fr->enerdiffsix    = eners[0];
2226         fr->enerdifftwelve = eners[1];
2227         /* The 0.5 is due to the Gromacs definition of the virial */
2228         fr->virdiffsix     = 0.5*virs[0];
2229         fr->virdifftwelve  = 0.5*virs[1];
2230     }
2231 }
2232
2233 void calc_dispcorr(t_inputrec *ir, t_forcerec *fr,
2234                    matrix box, real lambda, tensor pres, tensor virial,
2235                    real *prescorr, real *enercorr, real *dvdlcorr)
2236 {
2237     gmx_bool bCorrAll, bCorrPres;
2238     real     dvdlambda, invvol, dens, ninter, avcsix, avctwelve, enerdiff, svir = 0, spres = 0;
2239     int      m;
2240
2241     *prescorr = 0;
2242     *enercorr = 0;
2243     *dvdlcorr = 0;
2244
2245     clear_mat(virial);
2246     clear_mat(pres);
2247
2248     if (ir->eDispCorr != edispcNO)
2249     {
2250         bCorrAll  = (ir->eDispCorr == edispcAllEner ||
2251                      ir->eDispCorr == edispcAllEnerPres);
2252         bCorrPres = (ir->eDispCorr == edispcEnerPres ||
2253                      ir->eDispCorr == edispcAllEnerPres);
2254
2255         invvol = 1/det(box);
2256         if (fr->n_tpi)
2257         {
2258             /* Only correct for the interactions with the inserted molecule */
2259             dens   = (fr->numAtomsForDispersionCorrection - fr->n_tpi)*invvol;
2260             ninter = fr->n_tpi;
2261         }
2262         else
2263         {
2264             dens   = fr->numAtomsForDispersionCorrection*invvol;
2265             ninter = 0.5*fr->numAtomsForDispersionCorrection;
2266         }
2267
2268         if (ir->efep == efepNO)
2269         {
2270             avcsix    = fr->avcsix[0];
2271             avctwelve = fr->avctwelve[0];
2272         }
2273         else
2274         {
2275             avcsix    = (1 - lambda)*fr->avcsix[0]    + lambda*fr->avcsix[1];
2276             avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
2277         }
2278
2279         enerdiff   = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
2280         *enercorr += avcsix*enerdiff;
2281         dvdlambda  = 0.0;
2282         if (ir->efep != efepNO)
2283         {
2284             dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
2285         }
2286         if (bCorrAll)
2287         {
2288             enerdiff   = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
2289             *enercorr += avctwelve*enerdiff;
2290             if (fr->efep != efepNO)
2291             {
2292                 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
2293             }
2294         }
2295
2296         if (bCorrPres)
2297         {
2298             svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
2299             if (ir->eDispCorr == edispcAllEnerPres)
2300             {
2301                 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
2302             }
2303             /* The factor 2 is because of the Gromacs virial definition */
2304             spres = -2.0*invvol*svir*PRESFAC;
2305
2306             for (m = 0; m < DIM; m++)
2307             {
2308                 virial[m][m] += svir;
2309                 pres[m][m]   += spres;
2310             }
2311             *prescorr += spres;
2312         }
2313
2314         /* Can't currently control when it prints, for now, just print when degugging */
2315         if (debug)
2316         {
2317             if (bCorrAll)
2318             {
2319                 fprintf(debug, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2320                         avcsix, avctwelve);
2321             }
2322             if (bCorrPres)
2323             {
2324                 fprintf(debug,
2325                         "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2326                         *enercorr, spres, svir);
2327             }
2328             else
2329             {
2330                 fprintf(debug, "Long Range LJ corr.: Epot %10g\n", *enercorr);
2331             }
2332         }
2333
2334         if (fr->efep != efepNO)
2335         {
2336             *dvdlcorr += dvdlambda;
2337         }
2338     }
2339 }
2340
2341 void do_pbc_first(FILE *fplog, matrix box, t_forcerec *fr,
2342                   t_graph *graph, rvec x[])
2343 {
2344     if (fplog)
2345     {
2346         fprintf(fplog, "Removing pbc first time\n");
2347     }
2348     calc_shifts(box, fr->shift_vec);
2349     if (graph)
2350     {
2351         mk_mshift(fplog, graph, fr->ePBC, box, x);
2352         if (gmx_debug_at)
2353         {
2354             p_graph(debug, "do_pbc_first 1", graph);
2355         }
2356         shift_self(graph, box, x);
2357         /* By doing an extra mk_mshift the molecules that are broken
2358          * because they were e.g. imported from another software
2359          * will be made whole again. Such are the healing powers
2360          * of GROMACS.
2361          */
2362         mk_mshift(fplog, graph, fr->ePBC, box, x);
2363         if (gmx_debug_at)
2364         {
2365             p_graph(debug, "do_pbc_first 2", graph);
2366         }
2367     }
2368     if (fplog)
2369     {
2370         fprintf(fplog, "Done rmpbc\n");
2371     }
2372 }
2373
2374 static void low_do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2375                             const gmx_mtop_t *mtop, rvec x[],
2376                             gmx_bool bFirst)
2377 {
2378     t_graph        *graph;
2379     int             mb, as, mol;
2380     gmx_molblock_t *molb;
2381
2382     if (bFirst && fplog)
2383     {
2384         fprintf(fplog, "Removing pbc first time\n");
2385     }
2386
2387     snew(graph, 1);
2388     as = 0;
2389     for (mb = 0; mb < mtop->nmolblock; mb++)
2390     {
2391         molb = &mtop->molblock[mb];
2392         if (molb->natoms_mol == 1 ||
2393             (!bFirst && mtop->moltype[molb->type].cgs.nr == 1))
2394         {
2395             /* Just one atom or charge group in the molecule, no PBC required */
2396             as += molb->nmol*molb->natoms_mol;
2397         }
2398         else
2399         {
2400             /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2401             mk_graph_ilist(NULL, mtop->moltype[molb->type].ilist,
2402                            0, molb->natoms_mol, FALSE, FALSE, graph);
2403
2404             for (mol = 0; mol < molb->nmol; mol++)
2405             {
2406                 mk_mshift(fplog, graph, ePBC, box, x+as);
2407
2408                 shift_self(graph, box, x+as);
2409                 /* The molecule is whole now.
2410                  * We don't need the second mk_mshift call as in do_pbc_first,
2411                  * since we no longer need this graph.
2412                  */
2413
2414                 as += molb->natoms_mol;
2415             }
2416             done_graph(graph);
2417         }
2418     }
2419     sfree(graph);
2420 }
2421
2422 void do_pbc_first_mtop(FILE *fplog, int ePBC, matrix box,
2423                        const gmx_mtop_t *mtop, rvec x[])
2424 {
2425     low_do_pbc_mtop(fplog, ePBC, box, mtop, x, TRUE);
2426 }
2427
2428 void do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2429                  gmx_mtop_t *mtop, rvec x[])
2430 {
2431     low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
2432 }
2433
2434 void put_atoms_in_box_omp(int ePBC, const matrix box, int natoms, rvec x[])
2435 {
2436     int t, nth;
2437     nth = gmx_omp_nthreads_get(emntDefault);
2438
2439 #pragma omp parallel for num_threads(nth) schedule(static)
2440     for (t = 0; t < nth; t++)
2441     {
2442         try
2443         {
2444             int offset, len;
2445
2446             offset = (natoms*t    )/nth;
2447             len    = (natoms*(t + 1))/nth - offset;
2448             put_atoms_in_box(ePBC, box, len, x + offset);
2449         }
2450         GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
2451     }
2452 }
2453
2454 // TODO This can be cleaned up a lot, and move back to runner.cpp
2455 void finish_run(FILE *fplog, const gmx::MDLogger &mdlog, t_commrec *cr,
2456                 t_inputrec *inputrec,
2457                 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
2458                 gmx_walltime_accounting_t walltime_accounting,
2459                 nonbonded_verlet_t *nbv,
2460                 gmx_bool bWriteStat)
2461 {
2462     t_nrnb *nrnb_tot = NULL;
2463     double  delta_t  = 0;
2464     double  nbfs     = 0, mflop = 0;
2465     double  elapsed_time,
2466             elapsed_time_over_all_ranks,
2467             elapsed_time_over_all_threads,
2468             elapsed_time_over_all_threads_over_all_ranks;
2469
2470     if (cr->nnodes > 1)
2471     {
2472         snew(nrnb_tot, 1);
2473 #if GMX_MPI
2474         MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
2475                       cr->mpi_comm_mysim);
2476 #endif
2477     }
2478     else
2479     {
2480         nrnb_tot = nrnb;
2481     }
2482
2483     elapsed_time                                 = walltime_accounting_get_elapsed_time(walltime_accounting);
2484     elapsed_time_over_all_ranks                  = elapsed_time;
2485     elapsed_time_over_all_threads                = walltime_accounting_get_elapsed_time_over_all_threads(walltime_accounting);
2486     elapsed_time_over_all_threads_over_all_ranks = elapsed_time_over_all_threads;
2487 #if GMX_MPI
2488     if (cr->nnodes > 1)
2489     {
2490         /* reduce elapsed_time over all MPI ranks in the current simulation */
2491         MPI_Allreduce(&elapsed_time,
2492                       &elapsed_time_over_all_ranks,
2493                       1, MPI_DOUBLE, MPI_SUM,
2494                       cr->mpi_comm_mysim);
2495         elapsed_time_over_all_ranks /= cr->nnodes;
2496         /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
2497          * current simulation. */
2498         MPI_Allreduce(&elapsed_time_over_all_threads,
2499                       &elapsed_time_over_all_threads_over_all_ranks,
2500                       1, MPI_DOUBLE, MPI_SUM,
2501                       cr->mpi_comm_mysim);
2502     }
2503 #endif
2504
2505     if (SIMMASTER(cr))
2506     {
2507         print_flop(fplog, nrnb_tot, &nbfs, &mflop);
2508     }
2509     if (cr->nnodes > 1)
2510     {
2511         sfree(nrnb_tot);
2512     }
2513
2514     if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
2515     {
2516         print_dd_statistics(cr, inputrec, fplog);
2517     }
2518
2519     /* TODO Move the responsibility for any scaling by thread counts
2520      * to the code that handled the thread region, so that there's a
2521      * mechanism to keep cycle counting working during the transition
2522      * to task parallelism. */
2523     int nthreads_pp  = gmx_omp_nthreads_get(emntNonbonded);
2524     int nthreads_pme = gmx_omp_nthreads_get(emntPME);
2525     wallcycle_scale_by_num_threads(wcycle, cr->duty == DUTY_PME, nthreads_pp, nthreads_pme);
2526     auto cycle_sum(wallcycle_sum(cr, wcycle));
2527
2528     if (SIMMASTER(cr))
2529     {
2530         struct gmx_wallclock_gpu_t* gputimes = use_GPU(nbv) ? nbnxn_gpu_get_timings(nbv->gpu_nbv) : NULL;
2531
2532         wallcycle_print(fplog, mdlog, cr->nnodes, cr->npmenodes, nthreads_pp, nthreads_pme,
2533                         elapsed_time_over_all_ranks,
2534                         wcycle, cycle_sum, gputimes);
2535
2536         if (EI_DYNAMICS(inputrec->eI))
2537         {
2538             delta_t = inputrec->delta_t;
2539         }
2540
2541         if (fplog)
2542         {
2543             print_perf(fplog, elapsed_time_over_all_threads_over_all_ranks,
2544                        elapsed_time_over_all_ranks,
2545                        walltime_accounting_get_nsteps_done(walltime_accounting),
2546                        delta_t, nbfs, mflop);
2547         }
2548         if (bWriteStat)
2549         {
2550             print_perf(stderr, elapsed_time_over_all_threads_over_all_ranks,
2551                        elapsed_time_over_all_ranks,
2552                        walltime_accounting_get_nsteps_done(walltime_accounting),
2553                        delta_t, nbfs, mflop);
2554         }
2555     }
2556 }
2557
2558 extern void initialize_lambdas(FILE *fplog, t_inputrec *ir, int *fep_state, std::vector<real> *lambda, double *lam0)
2559 {
2560     /* this function works, but could probably use a logic rewrite to keep all the different
2561        types of efep straight. */
2562
2563     if ((ir->efep == efepNO) && (ir->bSimTemp == FALSE))
2564     {
2565         return;
2566     }
2567
2568     t_lambda *fep = ir->fepvals;
2569     *fep_state    = fep->init_fep_state; /* this might overwrite the checkpoint
2570                                             if checkpoint is set -- a kludge is in for now
2571                                             to prevent this.*/
2572
2573     lambda->resize(efptNR);
2574
2575     for (int i = 0; i < efptNR; i++)
2576     {
2577         /* overwrite lambda state with init_lambda for now for backwards compatibility */
2578         if (fep->init_lambda >= 0) /* if it's -1, it was never initializd */
2579         {
2580             (*lambda)[i] = fep->init_lambda;
2581             if (lam0)
2582             {
2583                 lam0[i] = (*lambda)[i];
2584             }
2585         }
2586         else
2587         {
2588             (*lambda)[i] = fep->all_lambda[i][*fep_state];
2589             if (lam0)
2590             {
2591                 lam0[i] = (*lambda)[i];
2592             }
2593         }
2594     }
2595     if (ir->bSimTemp)
2596     {
2597         /* need to rescale control temperatures to match current state */
2598         for (int i = 0; i < ir->opts.ngtc; i++)
2599         {
2600             if (ir->opts.ref_t[i] > 0)
2601             {
2602                 ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
2603             }
2604         }
2605     }
2606
2607     /* Send to the log the information on the current lambdas */
2608     if (fplog != NULL)
2609     {
2610         fprintf(fplog, "Initial vector of lambda components:[ ");
2611         for (int i = 0; i < efptNR; i++)
2612         {
2613             fprintf(fplog, "%10.4f ", (*lambda)[i]);
2614         }
2615         fprintf(fplog, "]\n");
2616     }
2617     return;
2618 }
2619
2620
2621 void init_md(FILE *fplog,
2622              t_commrec *cr, t_inputrec *ir, const gmx_output_env_t *oenv,
2623              double *t, double *t0,
2624              std::vector<real> *lambda, int *fep_state, double *lam0,
2625              t_nrnb *nrnb, gmx_mtop_t *mtop,
2626              gmx_update_t **upd,
2627              int nfile, const t_filenm fnm[],
2628              gmx_mdoutf_t *outf, t_mdebin **mdebin,
2629              tensor force_vir, tensor shake_vir, rvec mu_tot,
2630              gmx_bool *bSimAnn, t_vcm **vcm, unsigned long Flags,
2631              gmx_wallcycle_t wcycle)
2632 {
2633     int  i;
2634
2635     /* Initial values */
2636     *t = *t0       = ir->init_t;
2637
2638     *bSimAnn = FALSE;
2639     for (i = 0; i < ir->opts.ngtc; i++)
2640     {
2641         /* set bSimAnn if any group is being annealed */
2642         if (ir->opts.annealing[i] != eannNO)
2643         {
2644             *bSimAnn = TRUE;
2645         }
2646     }
2647
2648     /* Initialize lambda variables */
2649     initialize_lambdas(fplog, ir, fep_state, lambda, lam0);
2650
2651     // TODO upd is never NULL in practice, but the analysers don't know that
2652     if (upd)
2653     {
2654         *upd = init_update(ir);
2655     }
2656     if (*bSimAnn)
2657     {
2658         update_annealing_target_temp(ir, ir->init_t, upd ? *upd : NULL);
2659     }
2660
2661     if (vcm != NULL)
2662     {
2663         *vcm = init_vcm(fplog, &mtop->groups, ir);
2664     }
2665
2666     if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
2667     {
2668         if (ir->etc == etcBERENDSEN)
2669         {
2670             please_cite(fplog, "Berendsen84a");
2671         }
2672         if (ir->etc == etcVRESCALE)
2673         {
2674             please_cite(fplog, "Bussi2007a");
2675         }
2676         if (ir->eI == eiSD1)
2677         {
2678             please_cite(fplog, "Goga2012");
2679         }
2680     }
2681     init_nrnb(nrnb);
2682
2683     if (nfile != -1)
2684     {
2685         *outf = init_mdoutf(fplog, nfile, fnm, Flags, cr, ir, mtop, oenv, wcycle);
2686
2687         *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? NULL : mdoutf_get_fp_ene(*outf),
2688                               mtop, ir, mdoutf_get_fp_dhdl(*outf));
2689     }
2690
2691     /* Initiate variables */
2692     clear_mat(force_vir);
2693     clear_mat(shake_vir);
2694     clear_rvec(mu_tot);
2695 }