Encapsulate gmx_wallclock_accounting_t into new timing module
[alexxy/gromacs.git] / src / gromacs / mdlib / sim_util.c
1 /* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
2  *
3  *
4  *                This source code is part of
5  *
6  *                 G   R   O   M   A   C   S
7  *
8  *          GROningen MAchine for Chemical Simulations
9  *
10  *                        VERSION 3.2.0
11  * Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
12  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
13  * Copyright (c) 2001-2004, The GROMACS development team,
14  * check out http://www.gromacs.org for more information.
15
16  * This program is free software; you can redistribute it and/or
17  * modify it under the terms of the GNU General Public License
18  * as published by the Free Software Foundation; either version 2
19  * of the License, or (at your option) any later version.
20  *
21  * If you want to redistribute modifications, please consider that
22  * scientific software is very special. Version control is crucial -
23  * bugs must be traceable. We will be happy to consider code for
24  * inclusion in the official distribution, but derived work must not
25  * be called official GROMACS. Details are found in the README & COPYING
26  * files - if they are missing, get the official version at www.gromacs.org.
27  *
28  * To help us fund GROMACS development, we humbly ask that you cite
29  * the papers on the package - you can find them in the top README file.
30  *
31  * For more info, check our website at http://www.gromacs.org
32  *
33  * And Hey:
34  * GROwing Monsters And Cloning Shrimps
35  */
36 #ifdef HAVE_CONFIG_H
37 #include <config.h>
38 #endif
39
40 #include <stdio.h>
41 #ifdef HAVE_SYS_TIME_H
42 #include <sys/time.h>
43 #endif
44 #include <math.h>
45 #include "typedefs.h"
46 #include "string2.h"
47 #include "gmxfio.h"
48 #include "smalloc.h"
49 #include "names.h"
50 #include "confio.h"
51 #include "mvdata.h"
52 #include "txtdump.h"
53 #include "pbc.h"
54 #include "chargegroup.h"
55 #include "vec.h"
56 #include "nrnb.h"
57 #include "mshift.h"
58 #include "mdrun.h"
59 #include "sim_util.h"
60 #include "update.h"
61 #include "physics.h"
62 #include "main.h"
63 #include "mdatoms.h"
64 #include "force.h"
65 #include "bondf.h"
66 #include "pme.h"
67 #include "disre.h"
68 #include "orires.h"
69 #include "network.h"
70 #include "calcmu.h"
71 #include "constr.h"
72 #include "xvgr.h"
73 #include "trnio.h"
74 #include "xtcio.h"
75 #include "copyrite.h"
76 #include "pull_rotation.h"
77 #include "gmx_random.h"
78 #include "domdec.h"
79 #include "partdec.h"
80 #include "gmx_wallcycle.h"
81 #include "genborn.h"
82 #include "nbnxn_atomdata.h"
83 #include "nbnxn_search.h"
84 #include "nbnxn_kernels/nbnxn_kernel_ref.h"
85 #include "nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
86 #include "nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
87 #include "nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
88
89 #include "gromacs/utility/gmxmpi.h"
90 #include "gromacs/timing/walltime_accounting.h"
91
92 #include "adress.h"
93 #include "qmmm.h"
94
95 #include "nbnxn_cuda_data_mgmt.h"
96 #include "nbnxn_cuda/nbnxn_cuda.h"
97
98 void print_time(FILE                     *out,
99                 gmx_walltime_accounting_t walltime_accounting,
100                 gmx_large_int_t           step,
101                 t_inputrec               *ir,
102                 t_commrec gmx_unused     *cr)
103 {
104     time_t finish;
105     char   timebuf[STRLEN];
106     double dt, elapsed_seconds, time_per_step;
107     char   buf[48];
108
109 #ifndef GMX_THREAD_MPI
110     if (!PAR(cr))
111 #endif
112     {
113         fprintf(out, "\r");
114     }
115     fprintf(out, "step %s", gmx_step_str(step, buf));
116     if ((step >= ir->nstlist))
117     {
118         double seconds_since_epoch = gmx_gettime();
119         elapsed_seconds = seconds_since_epoch - walltime_accounting_get_start_time_stamp(walltime_accounting);
120         time_per_step   = elapsed_seconds/(step - ir->init_step + 1);
121         dt              = (ir->nsteps + ir->init_step - step) * time_per_step;
122
123         if (ir->nsteps >= 0)
124         {
125             if (dt >= 300)
126             {
127                 finish = (time_t) (seconds_since_epoch + dt);
128                 gmx_ctime_r(&finish, timebuf, STRLEN);
129                 sprintf(buf, "%s", timebuf);
130                 buf[strlen(buf)-1] = '\0';
131                 fprintf(out, ", will finish %s", buf);
132             }
133             else
134             {
135                 fprintf(out, ", remaining wall clock time: %5d s          ", (int)dt);
136             }
137         }
138         else
139         {
140             fprintf(out, " performance: %.1f ns/day    ",
141                     ir->delta_t/1000*24*60*60/time_per_step);
142         }
143     }
144 #ifndef GMX_THREAD_MPI
145     if (PAR(cr))
146     {
147         fprintf(out, "\n");
148     }
149 #endif
150
151     fflush(out);
152 }
153
154 void print_date_and_time(FILE *fplog, int nodeid, const char *title,
155                          const gmx_walltime_accounting_t walltime_accounting)
156 {
157     int    i;
158     char   timebuf[STRLEN];
159     char   time_string[STRLEN];
160     time_t tmptime;
161
162     if (fplog)
163     {
164         if (walltime_accounting != NULL)
165         {
166             tmptime = (time_t) walltime_accounting_get_start_time_stamp(walltime_accounting);
167             gmx_ctime_r(&tmptime, timebuf, STRLEN);
168         }
169         else
170         {
171             tmptime = (time_t) gmx_gettime();
172             gmx_ctime_r(&tmptime, timebuf, STRLEN);
173         }
174         for (i = 0; timebuf[i] >= ' '; i++)
175         {
176             time_string[i] = timebuf[i];
177         }
178         time_string[i] = '\0';
179
180         fprintf(fplog, "%s on node %d %s\n", title, nodeid, time_string);
181     }
182 }
183
184 static void sum_forces(int start, int end, rvec f[], rvec flr[])
185 {
186     int i;
187
188     if (gmx_debug_at)
189     {
190         pr_rvecs(debug, 0, "fsr", f+start, end-start);
191         pr_rvecs(debug, 0, "flr", flr+start, end-start);
192     }
193     for (i = start; (i < end); i++)
194     {
195         rvec_inc(f[i], flr[i]);
196     }
197 }
198
199 /*
200  * calc_f_el calculates forces due to an electric field.
201  *
202  * force is kJ mol^-1 nm^-1 = e * kJ mol^-1 nm^-1 / e
203  *
204  * Et[] contains the parameters for the time dependent
205  * part of the field (not yet used).
206  * Ex[] contains the parameters for
207  * the spatial dependent part of the field. You can have cool periodic
208  * fields in principle, but only a constant field is supported
209  * now.
210  * The function should return the energy due to the electric field
211  * (if any) but for now returns 0.
212  *
213  * WARNING:
214  * There can be problems with the virial.
215  * Since the field is not self-consistent this is unavoidable.
216  * For neutral molecules the virial is correct within this approximation.
217  * For neutral systems with many charged molecules the error is small.
218  * But for systems with a net charge or a few charged molecules
219  * the error can be significant when the field is high.
220  * Solution: implement a self-consitent electric field into PME.
221  */
222 static void calc_f_el(FILE *fp, int  start, int homenr,
223                       real charge[], rvec f[],
224                       t_cosines Ex[], t_cosines Et[], double t)
225 {
226     rvec Ext;
227     real t0;
228     int  i, m;
229
230     for (m = 0; (m < DIM); m++)
231     {
232         if (Et[m].n > 0)
233         {
234             if (Et[m].n == 3)
235             {
236                 t0     = Et[m].a[1];
237                 Ext[m] = cos(Et[m].a[0]*(t-t0))*exp(-sqr(t-t0)/(2.0*sqr(Et[m].a[2])));
238             }
239             else
240             {
241                 Ext[m] = cos(Et[m].a[0]*t);
242             }
243         }
244         else
245         {
246             Ext[m] = 1.0;
247         }
248         if (Ex[m].n > 0)
249         {
250             /* Convert the field strength from V/nm to MD-units */
251             Ext[m] *= Ex[m].a[0]*FIELDFAC;
252             for (i = start; (i < start+homenr); i++)
253             {
254                 f[i][m] += charge[i]*Ext[m];
255             }
256         }
257         else
258         {
259             Ext[m] = 0;
260         }
261     }
262     if (fp != NULL)
263     {
264         fprintf(fp, "%10g  %10g  %10g  %10g #FIELD\n", t,
265                 Ext[XX]/FIELDFAC, Ext[YY]/FIELDFAC, Ext[ZZ]/FIELDFAC);
266     }
267 }
268
269 static void calc_virial(int start, int homenr, rvec x[], rvec f[],
270                         tensor vir_part, t_graph *graph, matrix box,
271                         t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
272 {
273     int    i, j;
274     tensor virtest;
275
276     /* The short-range virial from surrounding boxes */
277     clear_mat(vir_part);
278     calc_vir(SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
279     inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
280
281     /* Calculate partial virial, for local atoms only, based on short range.
282      * Total virial is computed in global_stat, called from do_md
283      */
284     f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
285     inc_nrnb(nrnb, eNR_VIRIAL, homenr);
286
287     /* Add position restraint contribution */
288     for (i = 0; i < DIM; i++)
289     {
290         vir_part[i][i] += fr->vir_diag_posres[i];
291     }
292
293     /* Add wall contribution */
294     for (i = 0; i < DIM; i++)
295     {
296         vir_part[i][ZZ] += fr->vir_wall_z[i];
297     }
298
299     if (debug)
300     {
301         pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
302     }
303 }
304
305 static void posres_wrapper(FILE *fplog,
306                            int flags,
307                            gmx_bool bSepDVDL,
308                            t_inputrec *ir,
309                            t_nrnb *nrnb,
310                            gmx_localtop_t *top,
311                            matrix box, rvec x[],
312                            gmx_enerdata_t *enerd,
313                            real *lambda,
314                            t_forcerec *fr)
315 {
316     t_pbc pbc;
317     real  v, dvdl;
318     int   i;
319
320     /* Position restraints always require full pbc */
321     set_pbc(&pbc, ir->ePBC, box);
322     dvdl = 0;
323     v    = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
324                   top->idef.iparams_posres,
325                   (const rvec*)x, fr->f_novirsum, fr->vir_diag_posres,
326                   ir->ePBC == epbcNONE ? NULL : &pbc,
327                   lambda[efptRESTRAINT], &dvdl,
328                   fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
329     if (bSepDVDL)
330     {
331         gmx_print_sepdvdl(fplog, interaction_function[F_POSRES].longname, v, dvdl);
332     }
333     enerd->term[F_POSRES] += v;
334     /* If just the force constant changes, the FEP term is linear,
335      * but if k changes, it is not.
336      */
337     enerd->dvdl_nonlin[efptRESTRAINT] += dvdl;
338     inc_nrnb(nrnb, eNR_POSRES, top->idef.il[F_POSRES].nr/2);
339
340     if ((ir->fepvals->n_lambda > 0) && (flags & GMX_FORCE_DHDL))
341     {
342         for (i = 0; i < enerd->n_lambda; i++)
343         {
344             real dvdl_dum, lambda_dum;
345
346             lambda_dum = (i == 0 ? lambda[efptRESTRAINT] : ir->fepvals->all_lambda[efptRESTRAINT][i-1]);
347             v          = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
348                                 top->idef.iparams_posres,
349                                 (const rvec*)x, NULL, NULL,
350                                 ir->ePBC == epbcNONE ? NULL : &pbc, lambda_dum, &dvdl,
351                                 fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
352             enerd->enerpart_lambda[i] += v;
353         }
354     }
355 }
356
357 static void pull_potential_wrapper(FILE *fplog,
358                                    gmx_bool bSepDVDL,
359                                    t_commrec *cr,
360                                    t_inputrec *ir,
361                                    matrix box, rvec x[],
362                                    rvec f[],
363                                    tensor vir_force,
364                                    t_mdatoms *mdatoms,
365                                    gmx_enerdata_t *enerd,
366                                    real *lambda,
367                                    double t)
368 {
369     t_pbc  pbc;
370     real   dvdl;
371
372     /* Calculate the center of mass forces, this requires communication,
373      * which is why pull_potential is called close to other communication.
374      * The virial contribution is calculated directly,
375      * which is why we call pull_potential after calc_virial.
376      */
377     set_pbc(&pbc, ir->ePBC, box);
378     dvdl                     = 0;
379     enerd->term[F_COM_PULL] +=
380         pull_potential(ir->ePull, ir->pull, mdatoms, &pbc,
381                        cr, t, lambda[efptRESTRAINT], x, f, vir_force, &dvdl);
382     if (bSepDVDL)
383     {
384         gmx_print_sepdvdl(fplog, "Com pull", enerd->term[F_COM_PULL], dvdl);
385     }
386     enerd->dvdl_lin[efptRESTRAINT] += dvdl;
387 }
388
389 static void pme_receive_force_ener(FILE           *fplog,
390                                    gmx_bool        bSepDVDL,
391                                    t_commrec      *cr,
392                                    gmx_wallcycle_t wcycle,
393                                    gmx_enerdata_t *enerd,
394                                    t_forcerec     *fr)
395 {
396     real   e, v, dvdl;
397     float  cycles_ppdpme, cycles_seppme;
398
399     cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
400     dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
401
402     /* In case of node-splitting, the PP nodes receive the long-range
403      * forces, virial and energy from the PME nodes here.
404      */
405     wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
406     dvdl = 0;
407     gmx_pme_receive_f(cr, fr->f_novirsum, fr->vir_el_recip, &e, &dvdl,
408                       &cycles_seppme);
409     if (bSepDVDL)
410     {
411         gmx_print_sepdvdl(fplog, "PME mesh", e, dvdl);
412     }
413     enerd->term[F_COUL_RECIP] += e;
414     enerd->dvdl_lin[efptCOUL] += dvdl;
415     if (wcycle)
416     {
417         dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
418     }
419     wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
420 }
421
422 static void print_large_forces(FILE *fp, t_mdatoms *md, t_commrec *cr,
423                                gmx_large_int_t step, real pforce, rvec *x, rvec *f)
424 {
425     int  i;
426     real pf2, fn2;
427     char buf[STEPSTRSIZE];
428
429     pf2 = sqr(pforce);
430     for (i = md->start; i < md->start+md->homenr; i++)
431     {
432         fn2 = norm2(f[i]);
433         /* We also catch NAN, if the compiler does not optimize this away. */
434         if (fn2 >= pf2 || fn2 != fn2)
435         {
436             fprintf(fp, "step %s  atom %6d  x %8.3f %8.3f %8.3f  force %12.5e\n",
437                     gmx_step_str(step, buf),
438                     ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], sqrt(fn2));
439         }
440     }
441 }
442
443 static void post_process_forces(t_commrec *cr,
444                                 gmx_large_int_t step,
445                                 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
446                                 gmx_localtop_t *top,
447                                 matrix box, rvec x[],
448                                 rvec f[],
449                                 tensor vir_force,
450                                 t_mdatoms *mdatoms,
451                                 t_graph *graph,
452                                 t_forcerec *fr, gmx_vsite_t *vsite,
453                                 int flags)
454 {
455     if (fr->bF_NoVirSum)
456     {
457         if (vsite)
458         {
459             /* Spread the mesh force on virtual sites to the other particles...
460              * This is parallellized. MPI communication is performed
461              * if the constructing atoms aren't local.
462              */
463             wallcycle_start(wcycle, ewcVSITESPREAD);
464             spread_vsite_f(vsite, x, fr->f_novirsum, NULL,
465                            (flags & GMX_FORCE_VIRIAL), fr->vir_el_recip,
466                            nrnb,
467                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
468             wallcycle_stop(wcycle, ewcVSITESPREAD);
469         }
470         if (flags & GMX_FORCE_VIRIAL)
471         {
472             /* Now add the forces, this is local */
473             if (fr->bDomDec)
474             {
475                 sum_forces(0, fr->f_novirsum_n, f, fr->f_novirsum);
476             }
477             else
478             {
479                 sum_forces(mdatoms->start, mdatoms->start+mdatoms->homenr,
480                            f, fr->f_novirsum);
481             }
482             if (EEL_FULL(fr->eeltype))
483             {
484                 /* Add the mesh contribution to the virial */
485                 m_add(vir_force, fr->vir_el_recip, vir_force);
486             }
487             if (debug)
488             {
489                 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
490             }
491         }
492     }
493
494     if (fr->print_force >= 0)
495     {
496         print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
497     }
498 }
499
500 static void do_nb_verlet(t_forcerec *fr,
501                          interaction_const_t *ic,
502                          gmx_enerdata_t *enerd,
503                          int flags, int ilocality,
504                          int clearF,
505                          t_nrnb *nrnb)
506 {
507     int                        nnbl, kernel_type, enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
508     char                      *env;
509     nonbonded_verlet_group_t  *nbvg;
510     gmx_bool                   bCUDA;
511
512     if (!(flags & GMX_FORCE_NONBONDED))
513     {
514         /* skip non-bonded calculation */
515         return;
516     }
517
518     nbvg = &fr->nbv->grp[ilocality];
519
520     /* CUDA kernel launch overhead is already timed separately */
521     if (fr->cutoff_scheme != ecutsVERLET)
522     {
523         gmx_incons("Invalid cut-off scheme passed!");
524     }
525
526     bCUDA = (nbvg->kernel_type == nbnxnk8x8x8_CUDA);
527
528     if (!bCUDA)
529     {
530         wallcycle_sub_start(wcycle, ewcsNONBONDED);
531     }
532     switch (nbvg->kernel_type)
533     {
534         case nbnxnk4x4_PlainC:
535             nbnxn_kernel_ref(&nbvg->nbl_lists,
536                              nbvg->nbat, ic,
537                              fr->shift_vec,
538                              flags,
539                              clearF,
540                              fr->fshift[0],
541                              enerd->grpp.ener[egCOULSR],
542                              fr->bBHAM ?
543                              enerd->grpp.ener[egBHAMSR] :
544                              enerd->grpp.ener[egLJSR]);
545             break;
546
547         case nbnxnk4xN_SIMD_4xN:
548             nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
549                                   nbvg->nbat, ic,
550                                   nbvg->ewald_excl,
551                                   fr->shift_vec,
552                                   flags,
553                                   clearF,
554                                   fr->fshift[0],
555                                   enerd->grpp.ener[egCOULSR],
556                                   fr->bBHAM ?
557                                   enerd->grpp.ener[egBHAMSR] :
558                                   enerd->grpp.ener[egLJSR]);
559             break;
560         case nbnxnk4xN_SIMD_2xNN:
561             nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
562                                    nbvg->nbat, ic,
563                                    nbvg->ewald_excl,
564                                    fr->shift_vec,
565                                    flags,
566                                    clearF,
567                                    fr->fshift[0],
568                                    enerd->grpp.ener[egCOULSR],
569                                    fr->bBHAM ?
570                                    enerd->grpp.ener[egBHAMSR] :
571                                    enerd->grpp.ener[egLJSR]);
572             break;
573
574         case nbnxnk8x8x8_CUDA:
575             nbnxn_cuda_launch_kernel(fr->nbv->cu_nbv, nbvg->nbat, flags, ilocality);
576             break;
577
578         case nbnxnk8x8x8_PlainC:
579             nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
580                                  nbvg->nbat, ic,
581                                  fr->shift_vec,
582                                  flags,
583                                  clearF,
584                                  nbvg->nbat->out[0].f,
585                                  fr->fshift[0],
586                                  enerd->grpp.ener[egCOULSR],
587                                  fr->bBHAM ?
588                                  enerd->grpp.ener[egBHAMSR] :
589                                  enerd->grpp.ener[egLJSR]);
590             break;
591
592         default:
593             gmx_incons("Invalid nonbonded kernel type passed!");
594
595     }
596     if (!bCUDA)
597     {
598         wallcycle_sub_stop(wcycle, ewcsNONBONDED);
599     }
600
601     if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
602     {
603         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
604     }
605     else if ((!bCUDA && nbvg->ewald_excl == ewaldexclAnalytical) ||
606              (bCUDA && nbnxn_cuda_is_kernel_ewald_analytical(fr->nbv->cu_nbv)))
607     {
608         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
609     }
610     else
611     {
612         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
613     }
614     enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
615     if (flags & GMX_FORCE_ENERGY)
616     {
617         /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
618         enr_nbnxn_kernel_ljc += 1;
619         enr_nbnxn_kernel_lj  += 1;
620     }
621
622     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
623              nbvg->nbl_lists.natpair_ljq);
624     inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
625              nbvg->nbl_lists.natpair_lj);
626     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
627              nbvg->nbl_lists.natpair_q);
628 }
629
630 void do_force_cutsVERLET(FILE *fplog, t_commrec *cr,
631                          t_inputrec *inputrec,
632                          gmx_large_int_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
633                          gmx_localtop_t *top,
634                          gmx_groups_t gmx_unused *groups,
635                          matrix box, rvec x[], history_t *hist,
636                          rvec f[],
637                          tensor vir_force,
638                          t_mdatoms *mdatoms,
639                          gmx_enerdata_t *enerd, t_fcdata *fcd,
640                          real *lambda, t_graph *graph,
641                          t_forcerec *fr, interaction_const_t *ic,
642                          gmx_vsite_t *vsite, rvec mu_tot,
643                          double t, FILE *field, gmx_edsam_t ed,
644                          gmx_bool bBornRadii,
645                          int flags)
646 {
647     int                 cg0, cg1, i, j;
648     int                 start, homenr;
649     int                 nb_kernel_type;
650     double              mu[2*DIM];
651     gmx_bool            bSepDVDL, bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
652     gmx_bool            bDoLongRange, bDoForces, bSepLRF, bUseGPU, bUseOrEmulGPU;
653     gmx_bool            bDiffKernels = FALSE;
654     matrix              boxs;
655     rvec                vzero, box_diag;
656     real                e, v, dvdl;
657     float               cycles_pme, cycles_force;
658     nonbonded_verlet_t *nbv;
659
660     cycles_force   = 0;
661     nbv            = fr->nbv;
662     nb_kernel_type = fr->nbv->grp[0].kernel_type;
663
664     start  = mdatoms->start;
665     homenr = mdatoms->homenr;
666
667     bSepDVDL = (fr->bSepDVDL && do_per_step(step, inputrec->nstlog));
668
669     clear_mat(vir_force);
670
671     cg0 = 0;
672     if (DOMAINDECOMP(cr))
673     {
674         cg1 = cr->dd->ncg_tot;
675     }
676     else
677     {
678         cg1 = top->cgs.nr;
679     }
680     if (fr->n_tpi > 0)
681     {
682         cg1--;
683     }
684
685     bStateChanged = (flags & GMX_FORCE_STATECHANGED);
686     bNS           = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
687     bFillGrid     = (bNS && bStateChanged);
688     bCalcCGCM     = (bFillGrid && !DOMAINDECOMP(cr));
689     bDoLongRange  = (fr->bTwinRange && bNS && (flags & GMX_FORCE_DO_LR));
690     bDoForces     = (flags & GMX_FORCE_FORCES);
691     bSepLRF       = (bDoLongRange && bDoForces && (flags & GMX_FORCE_SEPLRF));
692     bUseGPU       = fr->nbv->bUseGPU;
693     bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
694
695     if (bStateChanged)
696     {
697         update_forcerec(fr, box);
698
699         if (NEED_MUTOT(*inputrec))
700         {
701             /* Calculate total (local) dipole moment in a temporary common array.
702              * This makes it possible to sum them over nodes faster.
703              */
704             calc_mu(start, homenr,
705                     x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
706                     mu, mu+DIM);
707         }
708     }
709
710     if (fr->ePBC != epbcNONE)
711     {
712         /* Compute shift vectors every step,
713          * because of pressure coupling or box deformation!
714          */
715         if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
716         {
717             calc_shifts(box, fr->shift_vec);
718         }
719
720         if (bCalcCGCM)
721         {
722             put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
723             inc_nrnb(nrnb, eNR_SHIFTX, homenr);
724         }
725         else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
726         {
727             unshift_self(graph, box, x);
728         }
729     }
730
731     nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
732                                  fr->shift_vec, nbv->grp[0].nbat);
733
734 #ifdef GMX_MPI
735     if (!(cr->duty & DUTY_PME))
736     {
737         /* Send particle coordinates to the pme nodes.
738          * Since this is only implemented for domain decomposition
739          * and domain decomposition does not use the graph,
740          * we do not need to worry about shifting.
741          */
742
743         wallcycle_start(wcycle, ewcPP_PMESENDX);
744
745         bBS = (inputrec->nwall == 2);
746         if (bBS)
747         {
748             copy_mat(box, boxs);
749             svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
750         }
751
752         gmx_pme_send_x(cr, bBS ? boxs : box, x,
753                        mdatoms->nChargePerturbed, lambda[efptCOUL],
754                        (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)), step);
755
756         wallcycle_stop(wcycle, ewcPP_PMESENDX);
757     }
758 #endif /* GMX_MPI */
759
760     /* do gridding for pair search */
761     if (bNS)
762     {
763         if (graph && bStateChanged)
764         {
765             /* Calculate intramolecular shift vectors to make molecules whole */
766             mk_mshift(fplog, graph, fr->ePBC, box, x);
767         }
768
769         clear_rvec(vzero);
770         box_diag[XX] = box[XX][XX];
771         box_diag[YY] = box[YY][YY];
772         box_diag[ZZ] = box[ZZ][ZZ];
773
774         wallcycle_start(wcycle, ewcNS);
775         if (!fr->bDomDec)
776         {
777             wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
778             nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
779                               0, vzero, box_diag,
780                               0, mdatoms->homenr, -1, fr->cginfo, x,
781                               0, NULL,
782                               nbv->grp[eintLocal].kernel_type,
783                               nbv->grp[eintLocal].nbat);
784             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
785         }
786         else
787         {
788             wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
789             nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
790                                        fr->cginfo, x,
791                                        nbv->grp[eintNonlocal].kernel_type,
792                                        nbv->grp[eintNonlocal].nbat);
793             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
794         }
795
796         if (nbv->ngrp == 1 ||
797             nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
798         {
799             nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatAll,
800                                nbv->nbs, mdatoms, fr->cginfo);
801         }
802         else
803         {
804             nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatLocal,
805                                nbv->nbs, mdatoms, fr->cginfo);
806             nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat, eatAll,
807                                nbv->nbs, mdatoms, fr->cginfo);
808         }
809         wallcycle_stop(wcycle, ewcNS);
810     }
811
812     /* initialize the GPU atom data and copy shift vector */
813     if (bUseGPU)
814     {
815         if (bNS)
816         {
817             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
818             nbnxn_cuda_init_atomdata(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
819             wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
820         }
821
822         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
823         nbnxn_cuda_upload_shiftvec(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
824         wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
825     }
826
827     /* do local pair search */
828     if (bNS)
829     {
830         wallcycle_start_nocount(wcycle, ewcNS);
831         wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
832         nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintLocal].nbat,
833                             &top->excls,
834                             ic->rlist,
835                             nbv->min_ci_balanced,
836                             &nbv->grp[eintLocal].nbl_lists,
837                             eintLocal,
838                             nbv->grp[eintLocal].kernel_type,
839                             nrnb);
840         wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
841
842         if (bUseGPU)
843         {
844             /* initialize local pair-list on the GPU */
845             nbnxn_cuda_init_pairlist(nbv->cu_nbv,
846                                      nbv->grp[eintLocal].nbl_lists.nbl[0],
847                                      eintLocal);
848         }
849         wallcycle_stop(wcycle, ewcNS);
850     }
851     else
852     {
853         wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
854         wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
855         nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
856                                         nbv->grp[eintLocal].nbat);
857         wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
858         wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
859     }
860
861     if (bUseGPU)
862     {
863         wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
864         /* launch local nonbonded F on GPU */
865         do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
866                      nrnb);
867         wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
868     }
869
870     /* Communicate coordinates and sum dipole if necessary +
871        do non-local pair search */
872     if (DOMAINDECOMP(cr))
873     {
874         bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
875                         nbv->grp[eintLocal].kernel_type);
876
877         if (bDiffKernels)
878         {
879             /* With GPU+CPU non-bonded calculations we need to copy
880              * the local coordinates to the non-local nbat struct
881              * (in CPU format) as the non-local kernel call also
882              * calculates the local - non-local interactions.
883              */
884             wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
885             wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
886             nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, TRUE, x,
887                                             nbv->grp[eintNonlocal].nbat);
888             wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
889             wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
890         }
891
892         if (bNS)
893         {
894             wallcycle_start_nocount(wcycle, ewcNS);
895             wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
896
897             if (bDiffKernels)
898             {
899                 nbnxn_grid_add_simple(nbv->nbs, nbv->grp[eintNonlocal].nbat);
900             }
901
902             nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintNonlocal].nbat,
903                                 &top->excls,
904                                 ic->rlist,
905                                 nbv->min_ci_balanced,
906                                 &nbv->grp[eintNonlocal].nbl_lists,
907                                 eintNonlocal,
908                                 nbv->grp[eintNonlocal].kernel_type,
909                                 nrnb);
910
911             wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
912
913             if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_CUDA)
914             {
915                 /* initialize non-local pair-list on the GPU */
916                 nbnxn_cuda_init_pairlist(nbv->cu_nbv,
917                                          nbv->grp[eintNonlocal].nbl_lists.nbl[0],
918                                          eintNonlocal);
919             }
920             wallcycle_stop(wcycle, ewcNS);
921         }
922         else
923         {
924             wallcycle_start(wcycle, ewcMOVEX);
925             dd_move_x(cr->dd, box, x);
926
927             /* When we don't need the total dipole we sum it in global_stat */
928             if (bStateChanged && NEED_MUTOT(*inputrec))
929             {
930                 gmx_sumd(2*DIM, mu, cr);
931             }
932             wallcycle_stop(wcycle, ewcMOVEX);
933
934             wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
935             wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
936             nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
937                                             nbv->grp[eintNonlocal].nbat);
938             wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
939             cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
940         }
941
942         if (bUseGPU && !bDiffKernels)
943         {
944             wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
945             /* launch non-local nonbonded F on GPU */
946             do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
947                          nrnb);
948             cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
949         }
950     }
951
952     if (bUseGPU)
953     {
954         /* launch D2H copy-back F */
955         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
956         if (DOMAINDECOMP(cr) && !bDiffKernels)
957         {
958             nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintNonlocal].nbat,
959                                       flags, eatNonlocal);
960         }
961         nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintLocal].nbat,
962                                   flags, eatLocal);
963         cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
964     }
965
966     if (bStateChanged && NEED_MUTOT(*inputrec))
967     {
968         if (PAR(cr))
969         {
970             gmx_sumd(2*DIM, mu, cr);
971         }
972
973         for (i = 0; i < 2; i++)
974         {
975             for (j = 0; j < DIM; j++)
976             {
977                 fr->mu_tot[i][j] = mu[i*DIM + j];
978             }
979         }
980     }
981     if (fr->efep == efepNO)
982     {
983         copy_rvec(fr->mu_tot[0], mu_tot);
984     }
985     else
986     {
987         for (j = 0; j < DIM; j++)
988         {
989             mu_tot[j] =
990                 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
991                 lambda[efptCOUL]*fr->mu_tot[1][j];
992         }
993     }
994
995     /* Reset energies */
996     reset_enerdata(fr, bNS, enerd, MASTER(cr));
997     clear_rvecs(SHIFTS, fr->fshift);
998
999     if (DOMAINDECOMP(cr))
1000     {
1001         if (!(cr->duty & DUTY_PME))
1002         {
1003             wallcycle_start(wcycle, ewcPPDURINGPME);
1004             dd_force_flop_start(cr->dd, nrnb);
1005         }
1006     }
1007
1008     if (inputrec->bRot)
1009     {
1010         /* Enforced rotation has its own cycle counter that starts after the collective
1011          * coordinates have been communicated. It is added to ddCyclF to allow
1012          * for proper load-balancing */
1013         wallcycle_start(wcycle, ewcROT);
1014         do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1015         wallcycle_stop(wcycle, ewcROT);
1016     }
1017
1018     /* Start the force cycle counter.
1019      * This counter is stopped in do_forcelow_level.
1020      * No parallel communication should occur while this counter is running,
1021      * since that will interfere with the dynamic load balancing.
1022      */
1023     wallcycle_start(wcycle, ewcFORCE);
1024     if (bDoForces)
1025     {
1026         /* Reset forces for which the virial is calculated separately:
1027          * PME/Ewald forces if necessary */
1028         if (fr->bF_NoVirSum)
1029         {
1030             if (flags & GMX_FORCE_VIRIAL)
1031             {
1032                 fr->f_novirsum = fr->f_novirsum_alloc;
1033                 if (fr->bDomDec)
1034                 {
1035                     clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1036                 }
1037                 else
1038                 {
1039                     clear_rvecs(homenr, fr->f_novirsum+start);
1040                 }
1041             }
1042             else
1043             {
1044                 /* We are not calculating the pressure so we do not need
1045                  * a separate array for forces that do not contribute
1046                  * to the pressure.
1047                  */
1048                 fr->f_novirsum = f;
1049             }
1050         }
1051
1052         /* Clear the short- and long-range forces */
1053         clear_rvecs(fr->natoms_force_constr, f);
1054         if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1055         {
1056             clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1057         }
1058
1059         clear_rvec(fr->vir_diag_posres);
1060     }
1061
1062     if (inputrec->ePull == epullCONSTRAINT)
1063     {
1064         clear_pull_forces(inputrec->pull);
1065     }
1066
1067     /* We calculate the non-bonded forces, when done on the CPU, here.
1068      * We do this before calling do_force_lowlevel, as in there bondeds
1069      * forces are calculated before PME, which does communication.
1070      * With this order, non-bonded and bonded force calculation imbalance
1071      * can be balanced out by the domain decomposition load balancing.
1072      */
1073
1074     if (!bUseOrEmulGPU)
1075     {
1076         /* Maybe we should move this into do_force_lowlevel */
1077         do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
1078                      nrnb);
1079     }
1080
1081     if (!bUseOrEmulGPU || bDiffKernels)
1082     {
1083         int aloc;
1084
1085         if (DOMAINDECOMP(cr))
1086         {
1087             do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
1088                          bDiffKernels ? enbvClearFYes : enbvClearFNo,
1089                          nrnb);
1090         }
1091
1092         if (!bUseOrEmulGPU)
1093         {
1094             aloc = eintLocal;
1095         }
1096         else
1097         {
1098             aloc = eintNonlocal;
1099         }
1100
1101         /* Add all the non-bonded force to the normal force array.
1102          * This can be split into a local a non-local part when overlapping
1103          * communication with calculation with domain decomposition.
1104          */
1105         cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1106         wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1107         wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1108         nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatAll, nbv->grp[aloc].nbat, f);
1109         wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1110         cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1111         wallcycle_start_nocount(wcycle, ewcFORCE);
1112
1113         /* if there are multiple fshift output buffers reduce them */
1114         if ((flags & GMX_FORCE_VIRIAL) &&
1115             nbv->grp[aloc].nbl_lists.nnbl > 1)
1116         {
1117             nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
1118                                                      fr->fshift);
1119         }
1120     }
1121
1122     /* update QMMMrec, if necessary */
1123     if (fr->bQMMM)
1124     {
1125         update_QMMMrec(cr, fr, x, mdatoms, box, top);
1126     }
1127
1128     if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1129     {
1130         posres_wrapper(fplog, flags, bSepDVDL, inputrec, nrnb, top, box, x,
1131                        enerd, lambda, fr);
1132     }
1133
1134     /* Compute the bonded and non-bonded energies and optionally forces */
1135     do_force_lowlevel(fplog, step, fr, inputrec, &(top->idef),
1136                       cr, nrnb, wcycle, mdatoms,
1137                       x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, top, fr->born,
1138                       &(top->atomtypes), bBornRadii, box,
1139                       inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1140                       flags, &cycles_pme);
1141
1142     if (bSepLRF)
1143     {
1144         if (do_per_step(step, inputrec->nstcalclr))
1145         {
1146             /* Add the long range forces to the short range forces */
1147             for (i = 0; i < fr->natoms_force_constr; i++)
1148             {
1149                 rvec_add(fr->f_twin[i], f[i], f[i]);
1150             }
1151         }
1152     }
1153
1154     cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1155
1156     if (ed)
1157     {
1158         do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1159     }
1160
1161     if (bUseOrEmulGPU && !bDiffKernels)
1162     {
1163         /* wait for non-local forces (or calculate in emulation mode) */
1164         if (DOMAINDECOMP(cr))
1165         {
1166             if (bUseGPU)
1167             {
1168                 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1169                 nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1170                                     nbv->grp[eintNonlocal].nbat,
1171                                     flags, eatNonlocal,
1172                                     enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1173                                     fr->fshift);
1174                 cycles_force += wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1175             }
1176             else
1177             {
1178                 wallcycle_start_nocount(wcycle, ewcFORCE);
1179                 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
1180                              nrnb);
1181                 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1182             }
1183             wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1184             wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1185             /* skip the reduction if there was no non-local work to do */
1186             if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1187             {
1188                 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatNonlocal,
1189                                                nbv->grp[eintNonlocal].nbat, f);
1190             }
1191             wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1192             cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1193         }
1194     }
1195
1196     if (bDoForces)
1197     {
1198         /* Communicate the forces */
1199         if (PAR(cr))
1200         {
1201             wallcycle_start(wcycle, ewcMOVEF);
1202             if (DOMAINDECOMP(cr))
1203             {
1204                 dd_move_f(cr->dd, f, fr->fshift);
1205                 /* Do we need to communicate the separate force array
1206                  * for terms that do not contribute to the single sum virial?
1207                  * Position restraints and electric fields do not introduce
1208                  * inter-cg forces, only full electrostatics methods do.
1209                  * When we do not calculate the virial, fr->f_novirsum = f,
1210                  * so we have already communicated these forces.
1211                  */
1212                 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1213                     (flags & GMX_FORCE_VIRIAL))
1214                 {
1215                     dd_move_f(cr->dd, fr->f_novirsum, NULL);
1216                 }
1217                 if (bSepLRF)
1218                 {
1219                     /* We should not update the shift forces here,
1220                      * since f_twin is already included in f.
1221                      */
1222                     dd_move_f(cr->dd, fr->f_twin, NULL);
1223                 }
1224             }
1225             wallcycle_stop(wcycle, ewcMOVEF);
1226         }
1227     }
1228
1229     if (bUseOrEmulGPU)
1230     {
1231         /* wait for local forces (or calculate in emulation mode) */
1232         if (bUseGPU)
1233         {
1234             wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1235             nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1236                                 nbv->grp[eintLocal].nbat,
1237                                 flags, eatLocal,
1238                                 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1239                                 fr->fshift);
1240             wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1241
1242             /* now clear the GPU outputs while we finish the step on the CPU */
1243
1244             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1245             nbnxn_cuda_clear_outputs(nbv->cu_nbv, flags);
1246             wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1247         }
1248         else
1249         {
1250             wallcycle_start_nocount(wcycle, ewcFORCE);
1251             do_nb_verlet(fr, ic, enerd, flags, eintLocal,
1252                          DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1253                          nrnb);
1254             wallcycle_stop(wcycle, ewcFORCE);
1255         }
1256         wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1257         wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1258         if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1259         {
1260             /* skip the reduction if there was no non-local work to do */
1261             nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatLocal,
1262                                            nbv->grp[eintLocal].nbat, f);
1263         }
1264         wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1265         wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1266     }
1267
1268     if (DOMAINDECOMP(cr))
1269     {
1270         dd_force_flop_stop(cr->dd, nrnb);
1271         if (wcycle)
1272         {
1273             dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1274         }
1275     }
1276
1277     if (bDoForces)
1278     {
1279         if (IR_ELEC_FIELD(*inputrec))
1280         {
1281             /* Compute forces due to electric field */
1282             calc_f_el(MASTER(cr) ? field : NULL,
1283                       start, homenr, mdatoms->chargeA, fr->f_novirsum,
1284                       inputrec->ex, inputrec->et, t);
1285         }
1286
1287         /* If we have NoVirSum forces, but we do not calculate the virial,
1288          * we sum fr->f_novirum=f later.
1289          */
1290         if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1291         {
1292             wallcycle_start(wcycle, ewcVSITESPREAD);
1293             spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1294                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1295             wallcycle_stop(wcycle, ewcVSITESPREAD);
1296
1297             if (bSepLRF)
1298             {
1299                 wallcycle_start(wcycle, ewcVSITESPREAD);
1300                 spread_vsite_f(vsite, x, fr->f_twin, NULL, FALSE, NULL,
1301                                nrnb,
1302                                &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1303                 wallcycle_stop(wcycle, ewcVSITESPREAD);
1304             }
1305         }
1306
1307         if (flags & GMX_FORCE_VIRIAL)
1308         {
1309             /* Calculation of the virial must be done after vsites! */
1310             calc_virial(mdatoms->start, mdatoms->homenr, x, f,
1311                         vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1312         }
1313     }
1314
1315     if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
1316     {
1317         pull_potential_wrapper(fplog, bSepDVDL, cr, inputrec, box, x,
1318                                f, vir_force, mdatoms, enerd, lambda, t);
1319     }
1320
1321     /* Add the forces from enforced rotation potentials (if any) */
1322     if (inputrec->bRot)
1323     {
1324         wallcycle_start(wcycle, ewcROTadd);
1325         enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1326         wallcycle_stop(wcycle, ewcROTadd);
1327     }
1328
1329     if (PAR(cr) && !(cr->duty & DUTY_PME))
1330     {
1331         /* In case of node-splitting, the PP nodes receive the long-range
1332          * forces, virial and energy from the PME nodes here.
1333          */
1334         pme_receive_force_ener(fplog, bSepDVDL, cr, wcycle, enerd, fr);
1335     }
1336
1337     if (bDoForces)
1338     {
1339         post_process_forces(cr, step, nrnb, wcycle,
1340                             top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1341                             flags);
1342     }
1343
1344     /* Sum the potential energy terms from group contributions */
1345     sum_epot(&(enerd->grpp), enerd->term);
1346 }
1347
1348 void do_force_cutsGROUP(FILE *fplog, t_commrec *cr,
1349                         t_inputrec *inputrec,
1350                         gmx_large_int_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1351                         gmx_localtop_t *top,
1352                         gmx_groups_t *groups,
1353                         matrix box, rvec x[], history_t *hist,
1354                         rvec f[],
1355                         tensor vir_force,
1356                         t_mdatoms *mdatoms,
1357                         gmx_enerdata_t *enerd, t_fcdata *fcd,
1358                         real *lambda, t_graph *graph,
1359                         t_forcerec *fr, gmx_vsite_t *vsite, rvec mu_tot,
1360                         double t, FILE *field, gmx_edsam_t ed,
1361                         gmx_bool bBornRadii,
1362                         int flags)
1363 {
1364     int        cg0, cg1, i, j;
1365     int        start, homenr;
1366     double     mu[2*DIM];
1367     gmx_bool   bSepDVDL, bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
1368     gmx_bool   bDoLongRangeNS, bDoForces, bDoPotential, bSepLRF;
1369     gmx_bool   bDoAdressWF;
1370     matrix     boxs;
1371     rvec       vzero, box_diag;
1372     real       e, v, dvdlambda[efptNR];
1373     t_pbc      pbc;
1374     float      cycles_pme, cycles_force;
1375
1376     start  = mdatoms->start;
1377     homenr = mdatoms->homenr;
1378
1379     bSepDVDL = (fr->bSepDVDL && do_per_step(step, inputrec->nstlog));
1380
1381     clear_mat(vir_force);
1382
1383     if (PARTDECOMP(cr))
1384     {
1385         pd_cg_range(cr, &cg0, &cg1);
1386     }
1387     else
1388     {
1389         cg0 = 0;
1390         if (DOMAINDECOMP(cr))
1391         {
1392             cg1 = cr->dd->ncg_tot;
1393         }
1394         else
1395         {
1396             cg1 = top->cgs.nr;
1397         }
1398         if (fr->n_tpi > 0)
1399         {
1400             cg1--;
1401         }
1402     }
1403
1404     bStateChanged  = (flags & GMX_FORCE_STATECHANGED);
1405     bNS            = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
1406     /* Should we update the long-range neighborlists at this step? */
1407     bDoLongRangeNS = fr->bTwinRange && bNS;
1408     /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1409     bFillGrid      = (bNS && bStateChanged);
1410     bCalcCGCM      = (bFillGrid && !DOMAINDECOMP(cr));
1411     bDoForces      = (flags & GMX_FORCE_FORCES);
1412     bDoPotential   = (flags & GMX_FORCE_ENERGY);
1413     bSepLRF        = ((inputrec->nstcalclr > 1) && bDoForces &&
1414                       (flags & GMX_FORCE_SEPLRF) && (flags & GMX_FORCE_DO_LR));
1415
1416     /* should probably move this to the forcerec since it doesn't change */
1417     bDoAdressWF   = ((fr->adress_type != eAdressOff));
1418
1419     if (bStateChanged)
1420     {
1421         update_forcerec(fr, box);
1422
1423         if (NEED_MUTOT(*inputrec))
1424         {
1425             /* Calculate total (local) dipole moment in a temporary common array.
1426              * This makes it possible to sum them over nodes faster.
1427              */
1428             calc_mu(start, homenr,
1429                     x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1430                     mu, mu+DIM);
1431         }
1432     }
1433
1434     if (fr->ePBC != epbcNONE)
1435     {
1436         /* Compute shift vectors every step,
1437          * because of pressure coupling or box deformation!
1438          */
1439         if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1440         {
1441             calc_shifts(box, fr->shift_vec);
1442         }
1443
1444         if (bCalcCGCM)
1445         {
1446             put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1447                                      &(top->cgs), x, fr->cg_cm);
1448             inc_nrnb(nrnb, eNR_CGCM, homenr);
1449             inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1450         }
1451         else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1452         {
1453             unshift_self(graph, box, x);
1454         }
1455     }
1456     else if (bCalcCGCM)
1457     {
1458         calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
1459         inc_nrnb(nrnb, eNR_CGCM, homenr);
1460     }
1461
1462     if (bCalcCGCM)
1463     {
1464         if (PAR(cr))
1465         {
1466             move_cgcm(fplog, cr, fr->cg_cm);
1467         }
1468         if (gmx_debug_at)
1469         {
1470             pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1471         }
1472     }
1473
1474 #ifdef GMX_MPI
1475     if (!(cr->duty & DUTY_PME))
1476     {
1477         /* Send particle coordinates to the pme nodes.
1478          * Since this is only implemented for domain decomposition
1479          * and domain decomposition does not use the graph,
1480          * we do not need to worry about shifting.
1481          */
1482
1483         wallcycle_start(wcycle, ewcPP_PMESENDX);
1484
1485         bBS = (inputrec->nwall == 2);
1486         if (bBS)
1487         {
1488             copy_mat(box, boxs);
1489             svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
1490         }
1491
1492         gmx_pme_send_x(cr, bBS ? boxs : box, x,
1493                        mdatoms->nChargePerturbed, lambda[efptCOUL],
1494                        (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)), step);
1495
1496         wallcycle_stop(wcycle, ewcPP_PMESENDX);
1497     }
1498 #endif /* GMX_MPI */
1499
1500     /* Communicate coordinates and sum dipole if necessary */
1501     if (PAR(cr))
1502     {
1503         wallcycle_start(wcycle, ewcMOVEX);
1504         if (DOMAINDECOMP(cr))
1505         {
1506             dd_move_x(cr->dd, box, x);
1507         }
1508         else
1509         {
1510             move_x(cr, x, nrnb);
1511         }
1512         wallcycle_stop(wcycle, ewcMOVEX);
1513     }
1514
1515     /* update adress weight beforehand */
1516     if (bStateChanged && bDoAdressWF)
1517     {
1518         /* need pbc for adress weight calculation with pbc_dx */
1519         set_pbc(&pbc, inputrec->ePBC, box);
1520         if (fr->adress_site == eAdressSITEcog)
1521         {
1522             update_adress_weights_cog(top->idef.iparams, top->idef.il, x, fr, mdatoms,
1523                                       inputrec->ePBC == epbcNONE ? NULL : &pbc);
1524         }
1525         else if (fr->adress_site == eAdressSITEcom)
1526         {
1527             update_adress_weights_com(fplog, cg0, cg1, &(top->cgs), x, fr, mdatoms,
1528                                       inputrec->ePBC == epbcNONE ? NULL : &pbc);
1529         }
1530         else if (fr->adress_site == eAdressSITEatomatom)
1531         {
1532             update_adress_weights_atom_per_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1533                                                 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1534         }
1535         else
1536         {
1537             update_adress_weights_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1538                                        inputrec->ePBC == epbcNONE ? NULL : &pbc);
1539         }
1540     }
1541
1542     if (NEED_MUTOT(*inputrec))
1543     {
1544
1545         if (bStateChanged)
1546         {
1547             if (PAR(cr))
1548             {
1549                 gmx_sumd(2*DIM, mu, cr);
1550             }
1551             for (i = 0; i < 2; i++)
1552             {
1553                 for (j = 0; j < DIM; j++)
1554                 {
1555                     fr->mu_tot[i][j] = mu[i*DIM + j];
1556                 }
1557             }
1558         }
1559         if (fr->efep == efepNO)
1560         {
1561             copy_rvec(fr->mu_tot[0], mu_tot);
1562         }
1563         else
1564         {
1565             for (j = 0; j < DIM; j++)
1566             {
1567                 mu_tot[j] =
1568                     (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1569             }
1570         }
1571     }
1572
1573     /* Reset energies */
1574     reset_enerdata(fr, bNS, enerd, MASTER(cr));
1575     clear_rvecs(SHIFTS, fr->fshift);
1576
1577     if (bNS)
1578     {
1579         wallcycle_start(wcycle, ewcNS);
1580
1581         if (graph && bStateChanged)
1582         {
1583             /* Calculate intramolecular shift vectors to make molecules whole */
1584             mk_mshift(fplog, graph, fr->ePBC, box, x);
1585         }
1586
1587         /* Do the actual neighbour searching */
1588         ns(fplog, fr, box,
1589            groups, top, mdatoms,
1590            cr, nrnb, bFillGrid,
1591            bDoLongRangeNS);
1592
1593         wallcycle_stop(wcycle, ewcNS);
1594     }
1595
1596     if (inputrec->implicit_solvent && bNS)
1597     {
1598         make_gb_nblist(cr, inputrec->gb_algorithm,
1599                        x, box, fr, &top->idef, graph, fr->born);
1600     }
1601
1602     if (DOMAINDECOMP(cr))
1603     {
1604         if (!(cr->duty & DUTY_PME))
1605         {
1606             wallcycle_start(wcycle, ewcPPDURINGPME);
1607             dd_force_flop_start(cr->dd, nrnb);
1608         }
1609     }
1610
1611     if (inputrec->bRot)
1612     {
1613         /* Enforced rotation has its own cycle counter that starts after the collective
1614          * coordinates have been communicated. It is added to ddCyclF to allow
1615          * for proper load-balancing */
1616         wallcycle_start(wcycle, ewcROT);
1617         do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1618         wallcycle_stop(wcycle, ewcROT);
1619     }
1620
1621     /* Start the force cycle counter.
1622      * This counter is stopped in do_forcelow_level.
1623      * No parallel communication should occur while this counter is running,
1624      * since that will interfere with the dynamic load balancing.
1625      */
1626     wallcycle_start(wcycle, ewcFORCE);
1627
1628     if (bDoForces)
1629     {
1630         /* Reset forces for which the virial is calculated separately:
1631          * PME/Ewald forces if necessary */
1632         if (fr->bF_NoVirSum)
1633         {
1634             if (flags & GMX_FORCE_VIRIAL)
1635             {
1636                 fr->f_novirsum = fr->f_novirsum_alloc;
1637                 if (fr->bDomDec)
1638                 {
1639                     clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1640                 }
1641                 else
1642                 {
1643                     clear_rvecs(homenr, fr->f_novirsum+start);
1644                 }
1645             }
1646             else
1647             {
1648                 /* We are not calculating the pressure so we do not need
1649                  * a separate array for forces that do not contribute
1650                  * to the pressure.
1651                  */
1652                 fr->f_novirsum = f;
1653             }
1654         }
1655
1656         /* Clear the short- and long-range forces */
1657         clear_rvecs(fr->natoms_force_constr, f);
1658         if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1659         {
1660             clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1661         }
1662
1663         clear_rvec(fr->vir_diag_posres);
1664     }
1665     if (inputrec->ePull == epullCONSTRAINT)
1666     {
1667         clear_pull_forces(inputrec->pull);
1668     }
1669
1670     /* update QMMMrec, if necessary */
1671     if (fr->bQMMM)
1672     {
1673         update_QMMMrec(cr, fr, x, mdatoms, box, top);
1674     }
1675
1676     if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1677     {
1678         posres_wrapper(fplog, flags, bSepDVDL, inputrec, nrnb, top, box, x,
1679                        enerd, lambda, fr);
1680     }
1681
1682     if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_FBPOSRES].nr > 0)
1683     {
1684         /* Flat-bottomed position restraints always require full pbc */
1685         if (!(bStateChanged && bDoAdressWF))
1686         {
1687             set_pbc(&pbc, inputrec->ePBC, box);
1688         }
1689         v = fbposres(top->idef.il[F_FBPOSRES].nr, top->idef.il[F_FBPOSRES].iatoms,
1690                      top->idef.iparams_fbposres,
1691                      (const rvec*)x, fr->f_novirsum, fr->vir_diag_posres,
1692                      inputrec->ePBC == epbcNONE ? NULL : &pbc,
1693                      fr->rc_scaling, fr->ePBC, fr->posres_com);
1694         enerd->term[F_FBPOSRES] += v;
1695         inc_nrnb(nrnb, eNR_FBPOSRES, top->idef.il[F_FBPOSRES].nr/2);
1696     }
1697
1698     /* Compute the bonded and non-bonded energies and optionally forces */
1699     do_force_lowlevel(fplog, step, fr, inputrec, &(top->idef),
1700                       cr, nrnb, wcycle, mdatoms,
1701                       x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, top, fr->born,
1702                       &(top->atomtypes), bBornRadii, box,
1703                       inputrec->fepvals, lambda,
1704                       graph, &(top->excls), fr->mu_tot,
1705                       flags,
1706                       &cycles_pme);
1707
1708     if (bSepLRF)
1709     {
1710         if (do_per_step(step, inputrec->nstcalclr))
1711         {
1712             /* Add the long range forces to the short range forces */
1713             for (i = 0; i < fr->natoms_force_constr; i++)
1714             {
1715                 rvec_add(fr->f_twin[i], f[i], f[i]);
1716             }
1717         }
1718     }
1719
1720     cycles_force = wallcycle_stop(wcycle, ewcFORCE);
1721
1722     if (ed)
1723     {
1724         do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1725     }
1726
1727     if (DOMAINDECOMP(cr))
1728     {
1729         dd_force_flop_stop(cr->dd, nrnb);
1730         if (wcycle)
1731         {
1732             dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1733         }
1734     }
1735
1736     if (bDoForces)
1737     {
1738         if (IR_ELEC_FIELD(*inputrec))
1739         {
1740             /* Compute forces due to electric field */
1741             calc_f_el(MASTER(cr) ? field : NULL,
1742                       start, homenr, mdatoms->chargeA, fr->f_novirsum,
1743                       inputrec->ex, inputrec->et, t);
1744         }
1745
1746         if (bDoAdressWF && fr->adress_icor == eAdressICThermoForce)
1747         {
1748             /* Compute thermodynamic force in hybrid AdResS region */
1749             adress_thermo_force(start, homenr, &(top->cgs), x, fr->f_novirsum, fr, mdatoms,
1750                                 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1751         }
1752
1753         /* Communicate the forces */
1754         if (PAR(cr))
1755         {
1756             wallcycle_start(wcycle, ewcMOVEF);
1757             if (DOMAINDECOMP(cr))
1758             {
1759                 dd_move_f(cr->dd, f, fr->fshift);
1760                 /* Do we need to communicate the separate force array
1761                  * for terms that do not contribute to the single sum virial?
1762                  * Position restraints and electric fields do not introduce
1763                  * inter-cg forces, only full electrostatics methods do.
1764                  * When we do not calculate the virial, fr->f_novirsum = f,
1765                  * so we have already communicated these forces.
1766                  */
1767                 if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1768                     (flags & GMX_FORCE_VIRIAL))
1769                 {
1770                     dd_move_f(cr->dd, fr->f_novirsum, NULL);
1771                 }
1772                 if (bSepLRF)
1773                 {
1774                     /* We should not update the shift forces here,
1775                      * since f_twin is already included in f.
1776                      */
1777                     dd_move_f(cr->dd, fr->f_twin, NULL);
1778                 }
1779             }
1780             else
1781             {
1782                 pd_move_f(cr, f, nrnb);
1783                 if (bSepLRF)
1784                 {
1785                     pd_move_f(cr, fr->f_twin, nrnb);
1786                 }
1787             }
1788             wallcycle_stop(wcycle, ewcMOVEF);
1789         }
1790
1791         /* If we have NoVirSum forces, but we do not calculate the virial,
1792          * we sum fr->f_novirum=f later.
1793          */
1794         if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1795         {
1796             wallcycle_start(wcycle, ewcVSITESPREAD);
1797             spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1798                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1799             wallcycle_stop(wcycle, ewcVSITESPREAD);
1800
1801             if (bSepLRF)
1802             {
1803                 wallcycle_start(wcycle, ewcVSITESPREAD);
1804                 spread_vsite_f(vsite, x, fr->f_twin, NULL, FALSE, NULL,
1805                                nrnb,
1806                                &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1807                 wallcycle_stop(wcycle, ewcVSITESPREAD);
1808             }
1809         }
1810
1811         if (flags & GMX_FORCE_VIRIAL)
1812         {
1813             /* Calculation of the virial must be done after vsites! */
1814             calc_virial(mdatoms->start, mdatoms->homenr, x, f,
1815                         vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1816         }
1817     }
1818
1819     if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
1820     {
1821         pull_potential_wrapper(fplog, bSepDVDL, cr, inputrec, box, x,
1822                                f, vir_force, mdatoms, enerd, lambda, t);
1823     }
1824
1825     /* Add the forces from enforced rotation potentials (if any) */
1826     if (inputrec->bRot)
1827     {
1828         wallcycle_start(wcycle, ewcROTadd);
1829         enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1830         wallcycle_stop(wcycle, ewcROTadd);
1831     }
1832
1833     if (PAR(cr) && !(cr->duty & DUTY_PME))
1834     {
1835         /* In case of node-splitting, the PP nodes receive the long-range
1836          * forces, virial and energy from the PME nodes here.
1837          */
1838         pme_receive_force_ener(fplog, bSepDVDL, cr, wcycle, enerd, fr);
1839     }
1840
1841     if (bDoForces)
1842     {
1843         post_process_forces(cr, step, nrnb, wcycle,
1844                             top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1845                             flags);
1846     }
1847
1848     /* Sum the potential energy terms from group contributions */
1849     sum_epot(&(enerd->grpp), enerd->term);
1850 }
1851
1852 void do_force(FILE *fplog, t_commrec *cr,
1853               t_inputrec *inputrec,
1854               gmx_large_int_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1855               gmx_localtop_t *top,
1856               gmx_groups_t *groups,
1857               matrix box, rvec x[], history_t *hist,
1858               rvec f[],
1859               tensor vir_force,
1860               t_mdatoms *mdatoms,
1861               gmx_enerdata_t *enerd, t_fcdata *fcd,
1862               real *lambda, t_graph *graph,
1863               t_forcerec *fr,
1864               gmx_vsite_t *vsite, rvec mu_tot,
1865               double t, FILE *field, gmx_edsam_t ed,
1866               gmx_bool bBornRadii,
1867               int flags)
1868 {
1869     /* modify force flag if not doing nonbonded */
1870     if (!fr->bNonbonded)
1871     {
1872         flags &= ~GMX_FORCE_NONBONDED;
1873     }
1874
1875     switch (inputrec->cutoff_scheme)
1876     {
1877         case ecutsVERLET:
1878             do_force_cutsVERLET(fplog, cr, inputrec,
1879                                 step, nrnb, wcycle,
1880                                 top,
1881                                 groups,
1882                                 box, x, hist,
1883                                 f, vir_force,
1884                                 mdatoms,
1885                                 enerd, fcd,
1886                                 lambda, graph,
1887                                 fr, fr->ic,
1888                                 vsite, mu_tot,
1889                                 t, field, ed,
1890                                 bBornRadii,
1891                                 flags);
1892             break;
1893         case ecutsGROUP:
1894             do_force_cutsGROUP(fplog, cr, inputrec,
1895                                step, nrnb, wcycle,
1896                                top,
1897                                groups,
1898                                box, x, hist,
1899                                f, vir_force,
1900                                mdatoms,
1901                                enerd, fcd,
1902                                lambda, graph,
1903                                fr, vsite, mu_tot,
1904                                t, field, ed,
1905                                bBornRadii,
1906                                flags);
1907             break;
1908         default:
1909             gmx_incons("Invalid cut-off scheme passed!");
1910     }
1911 }
1912
1913
1914 void do_constrain_first(FILE *fplog, gmx_constr_t constr,
1915                         t_inputrec *ir, t_mdatoms *md,
1916                         t_state *state, t_commrec *cr, t_nrnb *nrnb,
1917                         t_forcerec *fr, gmx_localtop_t *top)
1918 {
1919     int             i, m, start, end;
1920     gmx_large_int_t step;
1921     real            dt = ir->delta_t;
1922     real            dvdl_dum;
1923     rvec           *savex;
1924
1925     snew(savex, state->natoms);
1926
1927     start = md->start;
1928     end   = md->homenr + start;
1929
1930     if (debug)
1931     {
1932         fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
1933                 start, md->homenr, end);
1934     }
1935     /* Do a first constrain to reset particles... */
1936     step = ir->init_step;
1937     if (fplog)
1938     {
1939         char buf[STEPSTRSIZE];
1940         fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
1941                 gmx_step_str(step, buf));
1942     }
1943     dvdl_dum = 0;
1944
1945     /* constrain the current position */
1946     constrain(NULL, TRUE, FALSE, constr, &(top->idef),
1947               ir, NULL, cr, step, 0, md,
1948               state->x, state->x, NULL,
1949               fr->bMolPBC, state->box,
1950               state->lambda[efptBONDED], &dvdl_dum,
1951               NULL, NULL, nrnb, econqCoord,
1952               ir->epc == epcMTTK, state->veta, state->veta);
1953     if (EI_VV(ir->eI))
1954     {
1955         /* constrain the inital velocity, and save it */
1956         /* also may be useful if we need the ekin from the halfstep for velocity verlet */
1957         /* might not yet treat veta correctly */
1958         constrain(NULL, TRUE, FALSE, constr, &(top->idef),
1959                   ir, NULL, cr, step, 0, md,
1960                   state->x, state->v, state->v,
1961                   fr->bMolPBC, state->box,
1962                   state->lambda[efptBONDED], &dvdl_dum,
1963                   NULL, NULL, nrnb, econqVeloc,
1964                   ir->epc == epcMTTK, state->veta, state->veta);
1965     }
1966     /* constrain the inital velocities at t-dt/2 */
1967     if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
1968     {
1969         for (i = start; (i < end); i++)
1970         {
1971             for (m = 0; (m < DIM); m++)
1972             {
1973                 /* Reverse the velocity */
1974                 state->v[i][m] = -state->v[i][m];
1975                 /* Store the position at t-dt in buf */
1976                 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
1977             }
1978         }
1979         /* Shake the positions at t=-dt with the positions at t=0
1980          * as reference coordinates.
1981          */
1982         if (fplog)
1983         {
1984             char buf[STEPSTRSIZE];
1985             fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
1986                     gmx_step_str(step, buf));
1987         }
1988         dvdl_dum = 0;
1989         constrain(NULL, TRUE, FALSE, constr, &(top->idef),
1990                   ir, NULL, cr, step, -1, md,
1991                   state->x, savex, NULL,
1992                   fr->bMolPBC, state->box,
1993                   state->lambda[efptBONDED], &dvdl_dum,
1994                   state->v, NULL, nrnb, econqCoord,
1995                   ir->epc == epcMTTK, state->veta, state->veta);
1996
1997         for (i = start; i < end; i++)
1998         {
1999             for (m = 0; m < DIM; m++)
2000             {
2001                 /* Re-reverse the velocities */
2002                 state->v[i][m] = -state->v[i][m];
2003             }
2004         }
2005     }
2006     sfree(savex);
2007 }
2008
2009 void calc_enervirdiff(FILE *fplog, int eDispCorr, t_forcerec *fr)
2010 {
2011     double eners[2], virs[2], enersum, virsum, y0, f, g, h;
2012     double r0, r1, r, rc3, rc9, ea, eb, ec, pa, pb, pc, pd;
2013     double invscale, invscale2, invscale3;
2014     int    ri0, ri1, ri, i, offstart, offset;
2015     real   scale, *vdwtab, tabfactor, tmp;
2016
2017     fr->enershiftsix    = 0;
2018     fr->enershifttwelve = 0;
2019     fr->enerdiffsix     = 0;
2020     fr->enerdifftwelve  = 0;
2021     fr->virdiffsix      = 0;
2022     fr->virdifftwelve   = 0;
2023
2024     if (eDispCorr != edispcNO)
2025     {
2026         for (i = 0; i < 2; i++)
2027         {
2028             eners[i] = 0;
2029             virs[i]  = 0;
2030         }
2031         if ((fr->vdwtype == evdwSWITCH) || (fr->vdwtype == evdwSHIFT))
2032         {
2033             if (fr->rvdw_switch == 0)
2034             {
2035                 gmx_fatal(FARGS,
2036                           "With dispersion correction rvdw-switch can not be zero "
2037                           "for vdw-type = %s", evdw_names[fr->vdwtype]);
2038             }
2039
2040             scale  = fr->nblists[0].table_elec_vdw.scale;
2041             vdwtab = fr->nblists[0].table_vdw.data;
2042
2043             /* Round the cut-offs to exact table values for precision */
2044             ri0  = floor(fr->rvdw_switch*scale);
2045             ri1  = ceil(fr->rvdw*scale);
2046             r0   = ri0/scale;
2047             r1   = ri1/scale;
2048             rc3  = r0*r0*r0;
2049             rc9  = rc3*rc3*rc3;
2050
2051             if (fr->vdwtype == evdwSHIFT)
2052             {
2053                 /* Determine the constant energy shift below rvdw_switch.
2054                  * Table has a scale factor since we have scaled it down to compensate
2055                  * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2056                  */
2057                 fr->enershiftsix    = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
2058                 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
2059             }
2060             /* Add the constant part from 0 to rvdw_switch.
2061              * This integration from 0 to rvdw_switch overcounts the number
2062              * of interactions by 1, as it also counts the self interaction.
2063              * We will correct for this later.
2064              */
2065             eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
2066             eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
2067
2068             invscale  = 1.0/(scale);
2069             invscale2 = invscale*invscale;
2070             invscale3 = invscale*invscale2;
2071
2072             /* following summation derived from cubic spline definition,
2073                Numerical Recipies in C, second edition, p. 113-116.  Exact
2074                for the cubic spline.  We first calculate the negative of
2075                the energy from rvdw to rvdw_switch, assuming that g(r)=1,
2076                and then add the more standard, abrupt cutoff correction to
2077                that result, yielding the long-range correction for a
2078                switched function.  We perform both the pressure and energy
2079                loops at the same time for simplicity, as the computational
2080                cost is low. */
2081
2082             for (i = 0; i < 2; i++)
2083             {
2084                 enersum = 0.0; virsum = 0.0;
2085                 if (i == 0)
2086                 {
2087                     offstart = 0;
2088                     /* Since the dispersion table has been scaled down a factor 6.0 and the repulsion
2089                      * a factor 12.0 to compensate for the c6/c12 parameters inside nbfp[] being scaled
2090                      * up (to save flops in kernels), we need to correct for this.
2091                      */
2092                     tabfactor = 6.0;
2093                 }
2094                 else
2095                 {
2096                     offstart  = 4;
2097                     tabfactor = 12.0;
2098                 }
2099                 for (ri = ri0; ri < ri1; ri++)
2100                 {
2101                     r  = ri*invscale;
2102                     ea = invscale3;
2103                     eb = 2.0*invscale2*r;
2104                     ec = invscale*r*r;
2105
2106                     pa = invscale3;
2107                     pb = 3.0*invscale2*r;
2108                     pc = 3.0*invscale*r*r;
2109                     pd = r*r*r;
2110
2111                     /* this "8" is from the packing in the vdwtab array - perhaps should be #define'ed? */
2112                     offset = 8*ri + offstart;
2113                     y0     = vdwtab[offset];
2114                     f      = vdwtab[offset+1];
2115                     g      = vdwtab[offset+2];
2116                     h      = vdwtab[offset+3];
2117
2118                     enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
2119                     virsum  += f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
2120                 }
2121
2122                 enersum  *= 4.0*M_PI*tabfactor;
2123                 virsum   *= 4.0*M_PI*tabfactor;
2124                 eners[i] -= enersum;
2125                 virs[i]  -= virsum;
2126             }
2127
2128             /* now add the correction for rvdw_switch to infinity */
2129             eners[0] += -4.0*M_PI/(3.0*rc3);
2130             eners[1] +=  4.0*M_PI/(9.0*rc9);
2131             virs[0]  +=  8.0*M_PI/rc3;
2132             virs[1]  += -16.0*M_PI/(3.0*rc9);
2133         }
2134         else if ((fr->vdwtype == evdwCUT) || (fr->vdwtype == evdwUSER))
2135         {
2136             if (fr->vdwtype == evdwUSER && fplog)
2137             {
2138                 fprintf(fplog,
2139                         "WARNING: using dispersion correction with user tables\n");
2140             }
2141             rc3  = fr->rvdw*fr->rvdw*fr->rvdw;
2142             rc9  = rc3*rc3*rc3;
2143             /* Contribution beyond the cut-off */
2144             eners[0] += -4.0*M_PI/(3.0*rc3);
2145             eners[1] +=  4.0*M_PI/(9.0*rc9);
2146             if (fr->vdw_modifier == eintmodPOTSHIFT)
2147             {
2148                 /* Contribution within the cut-off */
2149                 eners[0] += -4.0*M_PI/(3.0*rc3);
2150                 eners[1] +=  4.0*M_PI/(3.0*rc9);
2151             }
2152             /* Contribution beyond the cut-off */
2153             virs[0]  +=  8.0*M_PI/rc3;
2154             virs[1]  += -16.0*M_PI/(3.0*rc9);
2155         }
2156         else
2157         {
2158             gmx_fatal(FARGS,
2159                       "Dispersion correction is not implemented for vdw-type = %s",
2160                       evdw_names[fr->vdwtype]);
2161         }
2162         fr->enerdiffsix    = eners[0];
2163         fr->enerdifftwelve = eners[1];
2164         /* The 0.5 is due to the Gromacs definition of the virial */
2165         fr->virdiffsix     = 0.5*virs[0];
2166         fr->virdifftwelve  = 0.5*virs[1];
2167     }
2168 }
2169
2170 void calc_dispcorr(FILE *fplog, t_inputrec *ir, t_forcerec *fr,
2171                    gmx_large_int_t step, int natoms,
2172                    matrix box, real lambda, tensor pres, tensor virial,
2173                    real *prescorr, real *enercorr, real *dvdlcorr)
2174 {
2175     gmx_bool bCorrAll, bCorrPres;
2176     real     dvdlambda, invvol, dens, ninter, avcsix, avctwelve, enerdiff, svir = 0, spres = 0;
2177     int      m;
2178
2179     *prescorr = 0;
2180     *enercorr = 0;
2181     *dvdlcorr = 0;
2182
2183     clear_mat(virial);
2184     clear_mat(pres);
2185
2186     if (ir->eDispCorr != edispcNO)
2187     {
2188         bCorrAll  = (ir->eDispCorr == edispcAllEner ||
2189                      ir->eDispCorr == edispcAllEnerPres);
2190         bCorrPres = (ir->eDispCorr == edispcEnerPres ||
2191                      ir->eDispCorr == edispcAllEnerPres);
2192
2193         invvol = 1/det(box);
2194         if (fr->n_tpi)
2195         {
2196             /* Only correct for the interactions with the inserted molecule */
2197             dens   = (natoms - fr->n_tpi)*invvol;
2198             ninter = fr->n_tpi;
2199         }
2200         else
2201         {
2202             dens   = natoms*invvol;
2203             ninter = 0.5*natoms;
2204         }
2205
2206         if (ir->efep == efepNO)
2207         {
2208             avcsix    = fr->avcsix[0];
2209             avctwelve = fr->avctwelve[0];
2210         }
2211         else
2212         {
2213             avcsix    = (1 - lambda)*fr->avcsix[0]    + lambda*fr->avcsix[1];
2214             avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
2215         }
2216
2217         enerdiff   = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
2218         *enercorr += avcsix*enerdiff;
2219         dvdlambda  = 0.0;
2220         if (ir->efep != efepNO)
2221         {
2222             dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
2223         }
2224         if (bCorrAll)
2225         {
2226             enerdiff   = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
2227             *enercorr += avctwelve*enerdiff;
2228             if (fr->efep != efepNO)
2229             {
2230                 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
2231             }
2232         }
2233
2234         if (bCorrPres)
2235         {
2236             svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
2237             if (ir->eDispCorr == edispcAllEnerPres)
2238             {
2239                 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
2240             }
2241             /* The factor 2 is because of the Gromacs virial definition */
2242             spres = -2.0*invvol*svir*PRESFAC;
2243
2244             for (m = 0; m < DIM; m++)
2245             {
2246                 virial[m][m] += svir;
2247                 pres[m][m]   += spres;
2248             }
2249             *prescorr += spres;
2250         }
2251
2252         /* Can't currently control when it prints, for now, just print when degugging */
2253         if (debug)
2254         {
2255             if (bCorrAll)
2256             {
2257                 fprintf(debug, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2258                         avcsix, avctwelve);
2259             }
2260             if (bCorrPres)
2261             {
2262                 fprintf(debug,
2263                         "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2264                         *enercorr, spres, svir);
2265             }
2266             else
2267             {
2268                 fprintf(debug, "Long Range LJ corr.: Epot %10g\n", *enercorr);
2269             }
2270         }
2271
2272         if (fr->bSepDVDL && do_per_step(step, ir->nstlog))
2273         {
2274             gmx_print_sepdvdl(fplog, "Dispersion correction", *enercorr, dvdlambda);
2275         }
2276         if (fr->efep != efepNO)
2277         {
2278             *dvdlcorr += dvdlambda;
2279         }
2280     }
2281 }
2282
2283 void do_pbc_first(FILE *fplog, matrix box, t_forcerec *fr,
2284                   t_graph *graph, rvec x[])
2285 {
2286     if (fplog)
2287     {
2288         fprintf(fplog, "Removing pbc first time\n");
2289     }
2290     calc_shifts(box, fr->shift_vec);
2291     if (graph)
2292     {
2293         mk_mshift(fplog, graph, fr->ePBC, box, x);
2294         if (gmx_debug_at)
2295         {
2296             p_graph(debug, "do_pbc_first 1", graph);
2297         }
2298         shift_self(graph, box, x);
2299         /* By doing an extra mk_mshift the molecules that are broken
2300          * because they were e.g. imported from another software
2301          * will be made whole again. Such are the healing powers
2302          * of GROMACS.
2303          */
2304         mk_mshift(fplog, graph, fr->ePBC, box, x);
2305         if (gmx_debug_at)
2306         {
2307             p_graph(debug, "do_pbc_first 2", graph);
2308         }
2309     }
2310     if (fplog)
2311     {
2312         fprintf(fplog, "Done rmpbc\n");
2313     }
2314 }
2315
2316 static void low_do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2317                             gmx_mtop_t *mtop, rvec x[],
2318                             gmx_bool bFirst)
2319 {
2320     t_graph        *graph;
2321     int             mb, as, mol;
2322     gmx_molblock_t *molb;
2323
2324     if (bFirst && fplog)
2325     {
2326         fprintf(fplog, "Removing pbc first time\n");
2327     }
2328
2329     snew(graph, 1);
2330     as = 0;
2331     for (mb = 0; mb < mtop->nmolblock; mb++)
2332     {
2333         molb = &mtop->molblock[mb];
2334         if (molb->natoms_mol == 1 ||
2335             (!bFirst && mtop->moltype[molb->type].cgs.nr == 1))
2336         {
2337             /* Just one atom or charge group in the molecule, no PBC required */
2338             as += molb->nmol*molb->natoms_mol;
2339         }
2340         else
2341         {
2342             /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2343             mk_graph_ilist(NULL, mtop->moltype[molb->type].ilist,
2344                            0, molb->natoms_mol, FALSE, FALSE, graph);
2345
2346             for (mol = 0; mol < molb->nmol; mol++)
2347             {
2348                 mk_mshift(fplog, graph, ePBC, box, x+as);
2349
2350                 shift_self(graph, box, x+as);
2351                 /* The molecule is whole now.
2352                  * We don't need the second mk_mshift call as in do_pbc_first,
2353                  * since we no longer need this graph.
2354                  */
2355
2356                 as += molb->natoms_mol;
2357             }
2358             done_graph(graph);
2359         }
2360     }
2361     sfree(graph);
2362 }
2363
2364 void do_pbc_first_mtop(FILE *fplog, int ePBC, matrix box,
2365                        gmx_mtop_t *mtop, rvec x[])
2366 {
2367     low_do_pbc_mtop(fplog, ePBC, box, mtop, x, TRUE);
2368 }
2369
2370 void do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2371                  gmx_mtop_t *mtop, rvec x[])
2372 {
2373     low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
2374 }
2375
2376 void finish_run(FILE *fplog, t_commrec *cr,
2377                 t_inputrec *inputrec,
2378                 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
2379                 gmx_walltime_accounting_t walltime_accounting,
2380                 wallclock_gpu_t *gputimes,
2381                 gmx_bool bWriteStat)
2382 {
2383     int     i, j;
2384     t_nrnb *nrnb_tot = NULL;
2385     real    delta_t;
2386     double  nbfs, mflop;
2387     double  elapsed_time,
2388             elapsed_time_over_all_ranks,
2389             elapsed_time_over_all_threads,
2390             elapsed_time_over_all_threads_over_all_ranks;
2391     wallcycle_sum(cr, wcycle);
2392
2393     if (cr->nnodes > 1)
2394     {
2395         snew(nrnb_tot, 1);
2396 #ifdef GMX_MPI
2397         MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
2398                       cr->mpi_comm_mysim);
2399 #endif
2400     }
2401     else
2402     {
2403         nrnb_tot = nrnb;
2404     }
2405
2406     elapsed_time                                 = walltime_accounting_get_elapsed_time(walltime_accounting);
2407     elapsed_time_over_all_ranks                  = elapsed_time;
2408     elapsed_time_over_all_threads                = walltime_accounting_get_elapsed_time_over_all_threads(walltime_accounting);
2409     elapsed_time_over_all_threads_over_all_ranks = elapsed_time_over_all_threads;
2410 #ifdef GMX_MPI
2411     if (cr->nnodes > 1)
2412     {
2413         /* reduce elapsed_time over all MPI ranks in the current simulation */
2414         MPI_Allreduce(&elapsed_time,
2415                       &elapsed_time_over_all_ranks,
2416                       1, MPI_DOUBLE, MPI_SUM,
2417                       cr->mpi_comm_mysim);
2418         elapsed_time_over_all_ranks /= cr->nnodes;
2419         /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
2420          * current simulation. */
2421         MPI_Allreduce(&elapsed_time_over_all_threads,
2422                       &elapsed_time_over_all_threads_over_all_ranks,
2423                       1, MPI_DOUBLE, MPI_SUM,
2424                       cr->mpi_comm_mysim);
2425     }
2426 #endif
2427
2428     if (SIMMASTER(cr))
2429     {
2430         print_flop(fplog, nrnb_tot, &nbfs, &mflop);
2431     }
2432     if (cr->nnodes > 1)
2433     {
2434         sfree(nrnb_tot);
2435     }
2436
2437     if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
2438     {
2439         print_dd_statistics(cr, inputrec, fplog);
2440     }
2441
2442 #ifdef GMX_MPI
2443     if (PARTDECOMP(cr))
2444     {
2445         if (MASTER(cr))
2446         {
2447             t_nrnb     *nrnb_all;
2448             int         s;
2449             MPI_Status  stat;
2450
2451             snew(nrnb_all, cr->nnodes);
2452             nrnb_all[0] = *nrnb;
2453             for (s = 1; s < cr->nnodes; s++)
2454             {
2455                 MPI_Recv(nrnb_all[s].n, eNRNB, MPI_DOUBLE, s, 0,
2456                          cr->mpi_comm_mysim, &stat);
2457             }
2458             pr_load(fplog, cr, nrnb_all);
2459             sfree(nrnb_all);
2460         }
2461         else
2462         {
2463             MPI_Send(nrnb->n, eNRNB, MPI_DOUBLE, MASTERRANK(cr), 0,
2464                      cr->mpi_comm_mysim);
2465         }
2466     }
2467 #endif
2468
2469     if (SIMMASTER(cr))
2470     {
2471         wallcycle_print(fplog, cr->nnodes, cr->npmenodes,
2472                         elapsed_time_over_all_ranks,
2473                         wcycle, gputimes);
2474
2475         if (EI_DYNAMICS(inputrec->eI))
2476         {
2477             delta_t = inputrec->delta_t;
2478         }
2479         else
2480         {
2481             delta_t = 0;
2482         }
2483
2484         if (fplog)
2485         {
2486             print_perf(fplog, elapsed_time_over_all_threads_over_all_ranks,
2487                        elapsed_time_over_all_ranks,
2488                        walltime_accounting_get_nsteps_done(walltime_accounting),
2489                        delta_t, nbfs, mflop);
2490         }
2491         if (bWriteStat)
2492         {
2493             print_perf(stderr, elapsed_time_over_all_threads_over_all_ranks,
2494                        elapsed_time_over_all_ranks,
2495                        walltime_accounting_get_nsteps_done(walltime_accounting),
2496                        delta_t, nbfs, mflop);
2497         }
2498     }
2499 }
2500
2501 extern void initialize_lambdas(FILE *fplog, t_inputrec *ir, int *fep_state, real *lambda, double *lam0)
2502 {
2503     /* this function works, but could probably use a logic rewrite to keep all the different
2504        types of efep straight. */
2505
2506     int       i;
2507     t_lambda *fep = ir->fepvals;
2508
2509     if ((ir->efep == efepNO) && (ir->bSimTemp == FALSE))
2510     {
2511         for (i = 0; i < efptNR; i++)
2512         {
2513             lambda[i] = 0.0;
2514             if (lam0)
2515             {
2516                 lam0[i] = 0.0;
2517             }
2518         }
2519         return;
2520     }
2521     else
2522     {
2523         *fep_state = fep->init_fep_state; /* this might overwrite the checkpoint
2524                                              if checkpoint is set -- a kludge is in for now
2525                                              to prevent this.*/
2526         for (i = 0; i < efptNR; i++)
2527         {
2528             /* overwrite lambda state with init_lambda for now for backwards compatibility */
2529             if (fep->init_lambda >= 0) /* if it's -1, it was never initializd */
2530             {
2531                 lambda[i] = fep->init_lambda;
2532                 if (lam0)
2533                 {
2534                     lam0[i] = lambda[i];
2535                 }
2536             }
2537             else
2538             {
2539                 lambda[i] = fep->all_lambda[i][*fep_state];
2540                 if (lam0)
2541                 {
2542                     lam0[i] = lambda[i];
2543                 }
2544             }
2545         }
2546         if (ir->bSimTemp)
2547         {
2548             /* need to rescale control temperatures to match current state */
2549             for (i = 0; i < ir->opts.ngtc; i++)
2550             {
2551                 if (ir->opts.ref_t[i] > 0)
2552                 {
2553                     ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
2554                 }
2555             }
2556         }
2557     }
2558
2559     /* Send to the log the information on the current lambdas */
2560     if (fplog != NULL)
2561     {
2562         fprintf(fplog, "Initial vector of lambda components:[ ");
2563         for (i = 0; i < efptNR; i++)
2564         {
2565             fprintf(fplog, "%10.4f ", lambda[i]);
2566         }
2567         fprintf(fplog, "]\n");
2568     }
2569     return;
2570 }
2571
2572
2573 void init_md(FILE *fplog,
2574              t_commrec *cr, t_inputrec *ir, const output_env_t oenv,
2575              double *t, double *t0,
2576              real *lambda, int *fep_state, double *lam0,
2577              t_nrnb *nrnb, gmx_mtop_t *mtop,
2578              gmx_update_t *upd,
2579              int nfile, const t_filenm fnm[],
2580              gmx_mdoutf_t **outf, t_mdebin **mdebin,
2581              tensor force_vir, tensor shake_vir, rvec mu_tot,
2582              gmx_bool *bSimAnn, t_vcm **vcm, unsigned long Flags)
2583 {
2584     int  i, j, n;
2585     real tmpt, mod;
2586
2587     /* Initial values */
2588     *t = *t0       = ir->init_t;
2589
2590     *bSimAnn = FALSE;
2591     for (i = 0; i < ir->opts.ngtc; i++)
2592     {
2593         /* set bSimAnn if any group is being annealed */
2594         if (ir->opts.annealing[i] != eannNO)
2595         {
2596             *bSimAnn = TRUE;
2597         }
2598     }
2599     if (*bSimAnn)
2600     {
2601         update_annealing_target_temp(&(ir->opts), ir->init_t);
2602     }
2603
2604     /* Initialize lambda variables */
2605     initialize_lambdas(fplog, ir, fep_state, lambda, lam0);
2606
2607     if (upd)
2608     {
2609         *upd = init_update(ir);
2610     }
2611
2612
2613     if (vcm != NULL)
2614     {
2615         *vcm = init_vcm(fplog, &mtop->groups, ir);
2616     }
2617
2618     if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
2619     {
2620         if (ir->etc == etcBERENDSEN)
2621         {
2622             please_cite(fplog, "Berendsen84a");
2623         }
2624         if (ir->etc == etcVRESCALE)
2625         {
2626             please_cite(fplog, "Bussi2007a");
2627         }
2628     }
2629
2630     init_nrnb(nrnb);
2631
2632     if (nfile != -1)
2633     {
2634         *outf = init_mdoutf(nfile, fnm, Flags, cr, ir, oenv);
2635
2636         *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? NULL : (*outf)->fp_ene,
2637                               mtop, ir, (*outf)->fp_dhdl);
2638     }
2639
2640     if (ir->bAdress)
2641     {
2642         please_cite(fplog, "Fritsch12");
2643         please_cite(fplog, "Junghans10");
2644     }
2645     /* Initiate variables */
2646     clear_mat(force_vir);
2647     clear_mat(shake_vir);
2648     clear_rvec(mu_tot);
2649
2650     debug_gmx();
2651 }