Use full path for legacyheaders
[alexxy/gromacs.git] / src / gromacs / mdlib / sim_util.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #include "config.h"
38
39 #include <assert.h>
40 #include <math.h>
41 #include <stdio.h>
42 #include <string.h>
43 #ifdef HAVE_SYS_TIME_H
44 #include <sys/time.h>
45 #endif
46
47 #include "gromacs/legacyheaders/typedefs.h"
48 #include "gromacs/utility/cstringutil.h"
49 #include "gromacs/legacyheaders/names.h"
50 #include "gromacs/legacyheaders/txtdump.h"
51 #include "gromacs/pbcutil/pbc.h"
52 #include "gromacs/legacyheaders/chargegroup.h"
53 #include "gromacs/math/vec.h"
54 #include "gromacs/legacyheaders/nrnb.h"
55 #include "gromacs/legacyheaders/mdrun.h"
56 #include "gromacs/legacyheaders/sim_util.h"
57 #include "gromacs/legacyheaders/update.h"
58 #include "gromacs/math/units.h"
59 #include "gromacs/legacyheaders/mdatoms.h"
60 #include "gromacs/legacyheaders/force.h"
61 #include "gromacs/legacyheaders/bondf.h"
62 #include "gromacs/legacyheaders/pme.h"
63 #include "gromacs/legacyheaders/disre.h"
64 #include "gromacs/legacyheaders/orires.h"
65 #include "gromacs/legacyheaders/network.h"
66 #include "gromacs/legacyheaders/calcmu.h"
67 #include "gromacs/legacyheaders/constr.h"
68 #include "gromacs/legacyheaders/copyrite.h"
69 #include "gromacs/legacyheaders/domdec.h"
70 #include "gromacs/legacyheaders/genborn.h"
71 #include "nbnxn_atomdata.h"
72 #include "nbnxn_search.h"
73 #include "nbnxn_kernels/nbnxn_kernel_ref.h"
74 #include "nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
75 #include "nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
76 #include "nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
77 #include "gromacs/legacyheaders/nonbonded.h"
78 #include "../gmxlib/nonbonded/nb_kernel.h"
79 #include "../gmxlib/nonbonded/nb_free_energy.h"
80
81 #include "gromacs/legacyheaders/types/commrec.h"
82 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_data_mgmt.h"
83 #include "gromacs/pbcutil/ishift.h"
84 #include "gromacs/pbcutil/mshift.h"
85 #include "gromacs/timing/wallcycle.h"
86 #include "gromacs/timing/walltime_accounting.h"
87 #include "gromacs/utility/gmxmpi.h"
88 #include "gromacs/utility/smalloc.h"
89 #include "gromacs/essentialdynamics/edsam.h"
90 #include "gromacs/pulling/pull.h"
91 #include "gromacs/pulling/pull_rotation.h"
92 #include "gromacs/imd/imd.h"
93 #include "adress.h"
94 #include "gromacs/legacyheaders/qmmm.h"
95
96 #include "gromacs/legacyheaders/gmx_omp_nthreads.h"
97
98 #include "nbnxn_cuda/nbnxn_cuda.h"
99
100 #include "nb_verlet.h"
101
102 void print_time(FILE                     *out,
103                 gmx_walltime_accounting_t walltime_accounting,
104                 gmx_int64_t               step,
105                 t_inputrec               *ir,
106                 t_commrec gmx_unused     *cr)
107 {
108     time_t finish;
109     char   timebuf[STRLEN];
110     double dt, elapsed_seconds, time_per_step;
111     char   buf[48];
112
113 #ifndef GMX_THREAD_MPI
114     if (!PAR(cr))
115 #endif
116     {
117         fprintf(out, "\r");
118     }
119     fprintf(out, "step %s", gmx_step_str(step, buf));
120     if ((step >= ir->nstlist))
121     {
122         double seconds_since_epoch = gmx_gettime();
123         elapsed_seconds = seconds_since_epoch - walltime_accounting_get_start_time_stamp(walltime_accounting);
124         time_per_step   = elapsed_seconds/(step - ir->init_step + 1);
125         dt              = (ir->nsteps + ir->init_step - step) * time_per_step;
126
127         if (ir->nsteps >= 0)
128         {
129             if (dt >= 300)
130             {
131                 finish = (time_t) (seconds_since_epoch + dt);
132                 gmx_ctime_r(&finish, timebuf, STRLEN);
133                 sprintf(buf, "%s", timebuf);
134                 buf[strlen(buf)-1] = '\0';
135                 fprintf(out, ", will finish %s", buf);
136             }
137             else
138             {
139                 fprintf(out, ", remaining wall clock time: %5d s          ", (int)dt);
140             }
141         }
142         else
143         {
144             fprintf(out, " performance: %.1f ns/day    ",
145                     ir->delta_t/1000*24*60*60/time_per_step);
146         }
147     }
148 #ifndef GMX_THREAD_MPI
149     if (PAR(cr))
150     {
151         fprintf(out, "\n");
152     }
153 #endif
154
155     fflush(out);
156 }
157
158 void print_date_and_time(FILE *fplog, int nodeid, const char *title,
159                          double the_time)
160 {
161     char   time_string[STRLEN];
162
163     if (!fplog)
164     {
165         return;
166     }
167
168     {
169         int    i;
170         char   timebuf[STRLEN];
171         time_t temp_time = (time_t) the_time;
172
173         gmx_ctime_r(&temp_time, timebuf, STRLEN);
174         for (i = 0; timebuf[i] >= ' '; i++)
175         {
176             time_string[i] = timebuf[i];
177         }
178         time_string[i] = '\0';
179     }
180
181     fprintf(fplog, "%s on rank %d %s\n", title, nodeid, time_string);
182 }
183
184 void print_start(FILE *fplog, t_commrec *cr,
185                  gmx_walltime_accounting_t walltime_accounting,
186                  const char *name)
187 {
188     char buf[STRLEN];
189
190     sprintf(buf, "Started %s", name);
191     print_date_and_time(fplog, cr->nodeid, buf,
192                         walltime_accounting_get_start_time_stamp(walltime_accounting));
193 }
194
195 static void sum_forces(int start, int end, rvec f[], rvec flr[])
196 {
197     int i;
198
199     if (gmx_debug_at)
200     {
201         pr_rvecs(debug, 0, "fsr", f+start, end-start);
202         pr_rvecs(debug, 0, "flr", flr+start, end-start);
203     }
204     for (i = start; (i < end); i++)
205     {
206         rvec_inc(f[i], flr[i]);
207     }
208 }
209
210 /*
211  * calc_f_el calculates forces due to an electric field.
212  *
213  * force is kJ mol^-1 nm^-1 = e * kJ mol^-1 nm^-1 / e
214  *
215  * Et[] contains the parameters for the time dependent
216  * part of the field (not yet used).
217  * Ex[] contains the parameters for
218  * the spatial dependent part of the field. You can have cool periodic
219  * fields in principle, but only a constant field is supported
220  * now.
221  * The function should return the energy due to the electric field
222  * (if any) but for now returns 0.
223  *
224  * WARNING:
225  * There can be problems with the virial.
226  * Since the field is not self-consistent this is unavoidable.
227  * For neutral molecules the virial is correct within this approximation.
228  * For neutral systems with many charged molecules the error is small.
229  * But for systems with a net charge or a few charged molecules
230  * the error can be significant when the field is high.
231  * Solution: implement a self-consitent electric field into PME.
232  */
233 static void calc_f_el(FILE *fp, int  start, int homenr,
234                       real charge[], rvec f[],
235                       t_cosines Ex[], t_cosines Et[], double t)
236 {
237     rvec Ext;
238     real t0;
239     int  i, m;
240
241     for (m = 0; (m < DIM); m++)
242     {
243         if (Et[m].n > 0)
244         {
245             if (Et[m].n == 3)
246             {
247                 t0     = Et[m].a[1];
248                 Ext[m] = cos(Et[m].a[0]*(t-t0))*exp(-sqr(t-t0)/(2.0*sqr(Et[m].a[2])));
249             }
250             else
251             {
252                 Ext[m] = cos(Et[m].a[0]*t);
253             }
254         }
255         else
256         {
257             Ext[m] = 1.0;
258         }
259         if (Ex[m].n > 0)
260         {
261             /* Convert the field strength from V/nm to MD-units */
262             Ext[m] *= Ex[m].a[0]*FIELDFAC;
263             for (i = start; (i < start+homenr); i++)
264             {
265                 f[i][m] += charge[i]*Ext[m];
266             }
267         }
268         else
269         {
270             Ext[m] = 0;
271         }
272     }
273     if (fp != NULL)
274     {
275         fprintf(fp, "%10g  %10g  %10g  %10g #FIELD\n", t,
276                 Ext[XX]/FIELDFAC, Ext[YY]/FIELDFAC, Ext[ZZ]/FIELDFAC);
277     }
278 }
279
280 static void calc_virial(int start, int homenr, rvec x[], rvec f[],
281                         tensor vir_part, t_graph *graph, matrix box,
282                         t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
283 {
284     int    i, j;
285     tensor virtest;
286
287     /* The short-range virial from surrounding boxes */
288     clear_mat(vir_part);
289     calc_vir(SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
290     inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
291
292     /* Calculate partial virial, for local atoms only, based on short range.
293      * Total virial is computed in global_stat, called from do_md
294      */
295     f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
296     inc_nrnb(nrnb, eNR_VIRIAL, homenr);
297
298     /* Add position restraint contribution */
299     for (i = 0; i < DIM; i++)
300     {
301         vir_part[i][i] += fr->vir_diag_posres[i];
302     }
303
304     /* Add wall contribution */
305     for (i = 0; i < DIM; i++)
306     {
307         vir_part[i][ZZ] += fr->vir_wall_z[i];
308     }
309
310     if (debug)
311     {
312         pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
313     }
314 }
315
316 static void posres_wrapper(int flags,
317                            t_inputrec *ir,
318                            t_nrnb *nrnb,
319                            gmx_localtop_t *top,
320                            matrix box, rvec x[],
321                            gmx_enerdata_t *enerd,
322                            real *lambda,
323                            t_forcerec *fr)
324 {
325     t_pbc pbc;
326     real  v, dvdl;
327     int   i;
328
329     /* Position restraints always require full pbc */
330     set_pbc(&pbc, ir->ePBC, box);
331     dvdl = 0;
332     v    = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
333                   top->idef.iparams_posres,
334                   (const rvec*)x, fr->f_novirsum, fr->vir_diag_posres,
335                   ir->ePBC == epbcNONE ? NULL : &pbc,
336                   lambda[efptRESTRAINT], &dvdl,
337                   fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
338     enerd->term[F_POSRES] += v;
339     /* If just the force constant changes, the FEP term is linear,
340      * but if k changes, it is not.
341      */
342     enerd->dvdl_nonlin[efptRESTRAINT] += dvdl;
343     inc_nrnb(nrnb, eNR_POSRES, top->idef.il[F_POSRES].nr/2);
344
345     if ((ir->fepvals->n_lambda > 0) && (flags & GMX_FORCE_DHDL))
346     {
347         for (i = 0; i < enerd->n_lambda; i++)
348         {
349             real dvdl_dum, lambda_dum;
350
351             lambda_dum = (i == 0 ? lambda[efptRESTRAINT] : ir->fepvals->all_lambda[efptRESTRAINT][i-1]);
352             v          = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
353                                 top->idef.iparams_posres,
354                                 (const rvec*)x, NULL, NULL,
355                                 ir->ePBC == epbcNONE ? NULL : &pbc, lambda_dum, &dvdl,
356                                 fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
357             enerd->enerpart_lambda[i] += v;
358         }
359     }
360 }
361
362 static void fbposres_wrapper(t_inputrec *ir,
363                              t_nrnb *nrnb,
364                              gmx_localtop_t *top,
365                              matrix box, rvec x[],
366                              gmx_enerdata_t *enerd,
367                              t_forcerec *fr)
368 {
369     t_pbc pbc;
370     real  v;
371
372     /* Flat-bottomed position restraints always require full pbc */
373     set_pbc(&pbc, ir->ePBC, box);
374     v = fbposres(top->idef.il[F_FBPOSRES].nr, top->idef.il[F_FBPOSRES].iatoms,
375                  top->idef.iparams_fbposres,
376                  (const rvec*)x, fr->f_novirsum, fr->vir_diag_posres,
377                  ir->ePBC == epbcNONE ? NULL : &pbc,
378                  fr->rc_scaling, fr->ePBC, fr->posres_com);
379     enerd->term[F_FBPOSRES] += v;
380     inc_nrnb(nrnb, eNR_FBPOSRES, top->idef.il[F_FBPOSRES].nr/2);
381 }
382
383 static void pull_potential_wrapper(t_commrec *cr,
384                                    t_inputrec *ir,
385                                    matrix box, rvec x[],
386                                    rvec f[],
387                                    tensor vir_force,
388                                    t_mdatoms *mdatoms,
389                                    gmx_enerdata_t *enerd,
390                                    real *lambda,
391                                    double t)
392 {
393     t_pbc  pbc;
394     real   dvdl;
395
396     /* Calculate the center of mass forces, this requires communication,
397      * which is why pull_potential is called close to other communication.
398      * The virial contribution is calculated directly,
399      * which is why we call pull_potential after calc_virial.
400      */
401     set_pbc(&pbc, ir->ePBC, box);
402     dvdl                     = 0;
403     enerd->term[F_COM_PULL] +=
404         pull_potential(ir->ePull, ir->pull, mdatoms, &pbc,
405                        cr, t, lambda[efptRESTRAINT], x, f, vir_force, &dvdl);
406     enerd->dvdl_lin[efptRESTRAINT] += dvdl;
407 }
408
409 static void pme_receive_force_ener(t_commrec      *cr,
410                                    gmx_wallcycle_t wcycle,
411                                    gmx_enerdata_t *enerd,
412                                    t_forcerec     *fr)
413 {
414     real   e_q, e_lj, v, dvdl_q, dvdl_lj;
415     float  cycles_ppdpme, cycles_seppme;
416
417     cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
418     dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
419
420     /* In case of node-splitting, the PP nodes receive the long-range
421      * forces, virial and energy from the PME nodes here.
422      */
423     wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
424     dvdl_q  = 0;
425     dvdl_lj = 0;
426     gmx_pme_receive_f(cr, fr->f_novirsum, fr->vir_el_recip, &e_q,
427                       fr->vir_lj_recip, &e_lj, &dvdl_q, &dvdl_lj,
428                       &cycles_seppme);
429     enerd->term[F_COUL_RECIP] += e_q;
430     enerd->term[F_LJ_RECIP]   += e_lj;
431     enerd->dvdl_lin[efptCOUL] += dvdl_q;
432     enerd->dvdl_lin[efptVDW]  += dvdl_lj;
433
434     if (wcycle)
435     {
436         dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
437     }
438     wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
439 }
440
441 static void print_large_forces(FILE *fp, t_mdatoms *md, t_commrec *cr,
442                                gmx_int64_t step, real pforce, rvec *x, rvec *f)
443 {
444     int  i;
445     real pf2, fn2;
446     char buf[STEPSTRSIZE];
447
448     pf2 = sqr(pforce);
449     for (i = 0; i < md->homenr; i++)
450     {
451         fn2 = norm2(f[i]);
452         /* We also catch NAN, if the compiler does not optimize this away. */
453         if (fn2 >= pf2 || fn2 != fn2)
454         {
455             fprintf(fp, "step %s  atom %6d  x %8.3f %8.3f %8.3f  force %12.5e\n",
456                     gmx_step_str(step, buf),
457                     ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], sqrt(fn2));
458         }
459     }
460 }
461
462 static void post_process_forces(t_commrec *cr,
463                                 gmx_int64_t step,
464                                 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
465                                 gmx_localtop_t *top,
466                                 matrix box, rvec x[],
467                                 rvec f[],
468                                 tensor vir_force,
469                                 t_mdatoms *mdatoms,
470                                 t_graph *graph,
471                                 t_forcerec *fr, gmx_vsite_t *vsite,
472                                 int flags)
473 {
474     if (fr->bF_NoVirSum)
475     {
476         if (vsite)
477         {
478             /* Spread the mesh force on virtual sites to the other particles...
479              * This is parallellized. MPI communication is performed
480              * if the constructing atoms aren't local.
481              */
482             wallcycle_start(wcycle, ewcVSITESPREAD);
483             spread_vsite_f(vsite, x, fr->f_novirsum, NULL,
484                            (flags & GMX_FORCE_VIRIAL), fr->vir_el_recip,
485                            nrnb,
486                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
487             wallcycle_stop(wcycle, ewcVSITESPREAD);
488         }
489         if (flags & GMX_FORCE_VIRIAL)
490         {
491             /* Now add the forces, this is local */
492             if (fr->bDomDec)
493             {
494                 sum_forces(0, fr->f_novirsum_n, f, fr->f_novirsum);
495             }
496             else
497             {
498                 sum_forces(0, mdatoms->homenr,
499                            f, fr->f_novirsum);
500             }
501             if (EEL_FULL(fr->eeltype))
502             {
503                 /* Add the mesh contribution to the virial */
504                 m_add(vir_force, fr->vir_el_recip, vir_force);
505             }
506             if (EVDW_PME(fr->vdwtype))
507             {
508                 /* Add the mesh contribution to the virial */
509                 m_add(vir_force, fr->vir_lj_recip, vir_force);
510             }
511             if (debug)
512             {
513                 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
514             }
515         }
516     }
517
518     if (fr->print_force >= 0)
519     {
520         print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
521     }
522 }
523
524 static void do_nb_verlet(t_forcerec *fr,
525                          interaction_const_t *ic,
526                          gmx_enerdata_t *enerd,
527                          int flags, int ilocality,
528                          int clearF,
529                          t_nrnb *nrnb,
530                          gmx_wallcycle_t wcycle)
531 {
532     int                        nnbl, kernel_type, enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
533     char                      *env;
534     nonbonded_verlet_group_t  *nbvg;
535     gmx_bool                   bCUDA;
536
537     if (!(flags & GMX_FORCE_NONBONDED))
538     {
539         /* skip non-bonded calculation */
540         return;
541     }
542
543     nbvg = &fr->nbv->grp[ilocality];
544
545     /* CUDA kernel launch overhead is already timed separately */
546     if (fr->cutoff_scheme != ecutsVERLET)
547     {
548         gmx_incons("Invalid cut-off scheme passed!");
549     }
550
551     bCUDA = (nbvg->kernel_type == nbnxnk8x8x8_CUDA);
552
553     if (!bCUDA)
554     {
555         wallcycle_sub_start(wcycle, ewcsNONBONDED);
556     }
557     switch (nbvg->kernel_type)
558     {
559         case nbnxnk4x4_PlainC:
560             nbnxn_kernel_ref(&nbvg->nbl_lists,
561                              nbvg->nbat, ic,
562                              fr->shift_vec,
563                              flags,
564                              clearF,
565                              fr->fshift[0],
566                              enerd->grpp.ener[egCOULSR],
567                              fr->bBHAM ?
568                              enerd->grpp.ener[egBHAMSR] :
569                              enerd->grpp.ener[egLJSR]);
570             break;
571
572         case nbnxnk4xN_SIMD_4xN:
573             nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
574                                   nbvg->nbat, ic,
575                                   nbvg->ewald_excl,
576                                   fr->shift_vec,
577                                   flags,
578                                   clearF,
579                                   fr->fshift[0],
580                                   enerd->grpp.ener[egCOULSR],
581                                   fr->bBHAM ?
582                                   enerd->grpp.ener[egBHAMSR] :
583                                   enerd->grpp.ener[egLJSR]);
584             break;
585         case nbnxnk4xN_SIMD_2xNN:
586             nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
587                                    nbvg->nbat, ic,
588                                    nbvg->ewald_excl,
589                                    fr->shift_vec,
590                                    flags,
591                                    clearF,
592                                    fr->fshift[0],
593                                    enerd->grpp.ener[egCOULSR],
594                                    fr->bBHAM ?
595                                    enerd->grpp.ener[egBHAMSR] :
596                                    enerd->grpp.ener[egLJSR]);
597             break;
598
599         case nbnxnk8x8x8_CUDA:
600             nbnxn_cuda_launch_kernel(fr->nbv->cu_nbv, nbvg->nbat, flags, ilocality);
601             break;
602
603         case nbnxnk8x8x8_PlainC:
604             nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
605                                  nbvg->nbat, ic,
606                                  fr->shift_vec,
607                                  flags,
608                                  clearF,
609                                  nbvg->nbat->out[0].f,
610                                  fr->fshift[0],
611                                  enerd->grpp.ener[egCOULSR],
612                                  fr->bBHAM ?
613                                  enerd->grpp.ener[egBHAMSR] :
614                                  enerd->grpp.ener[egLJSR]);
615             break;
616
617         default:
618             gmx_incons("Invalid nonbonded kernel type passed!");
619
620     }
621     if (!bCUDA)
622     {
623         wallcycle_sub_stop(wcycle, ewcsNONBONDED);
624     }
625
626     if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
627     {
628         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
629     }
630     else if ((!bCUDA && nbvg->ewald_excl == ewaldexclAnalytical) ||
631              (bCUDA && nbnxn_cuda_is_kernel_ewald_analytical(fr->nbv->cu_nbv)))
632     {
633         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
634     }
635     else
636     {
637         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
638     }
639     enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
640     if (flags & GMX_FORCE_ENERGY)
641     {
642         /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
643         enr_nbnxn_kernel_ljc += 1;
644         enr_nbnxn_kernel_lj  += 1;
645     }
646
647     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
648              nbvg->nbl_lists.natpair_ljq);
649     inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
650              nbvg->nbl_lists.natpair_lj);
651     /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
652     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
653              nbvg->nbl_lists.natpair_q);
654
655     if (ic->vdw_modifier == eintmodFORCESWITCH)
656     {
657         /* We add up the switch cost separately */
658         inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_FSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
659                  nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
660     }
661     if (ic->vdw_modifier == eintmodPOTSWITCH)
662     {
663         /* We add up the switch cost separately */
664         inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_PSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
665                  nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
666     }
667     if (ic->vdwtype == evdwPME)
668     {
669         /* We add up the LJ Ewald cost separately */
670         inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_EWALD+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
671                  nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
672     }
673 }
674
675 static void do_nb_verlet_fep(nbnxn_pairlist_set_t *nbl_lists,
676                              t_forcerec           *fr,
677                              rvec                  x[],
678                              rvec                  f[],
679                              t_mdatoms            *mdatoms,
680                              t_lambda             *fepvals,
681                              real                 *lambda,
682                              gmx_enerdata_t       *enerd,
683                              int                   flags,
684                              t_nrnb               *nrnb,
685                              gmx_wallcycle_t       wcycle)
686 {
687     int              donb_flags;
688     nb_kernel_data_t kernel_data;
689     real             lam_i[efptNR];
690     real             dvdl_nb[efptNR];
691     int              th;
692     int              i, j;
693
694     donb_flags = 0;
695     /* Add short-range interactions */
696     donb_flags |= GMX_NONBONDED_DO_SR;
697
698     /* Currently all group scheme kernels always calculate (shift-)forces */
699     if (flags & GMX_FORCE_FORCES)
700     {
701         donb_flags |= GMX_NONBONDED_DO_FORCE;
702     }
703     if (flags & GMX_FORCE_VIRIAL)
704     {
705         donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
706     }
707     if (flags & GMX_FORCE_ENERGY)
708     {
709         donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
710     }
711     if (flags & GMX_FORCE_DO_LR)
712     {
713         donb_flags |= GMX_NONBONDED_DO_LR;
714     }
715
716     kernel_data.flags  = donb_flags;
717     kernel_data.lambda = lambda;
718     kernel_data.dvdl   = dvdl_nb;
719
720     kernel_data.energygrp_elec = enerd->grpp.ener[egCOULSR];
721     kernel_data.energygrp_vdw  = enerd->grpp.ener[egLJSR];
722
723     /* reset free energy components */
724     for (i = 0; i < efptNR; i++)
725     {
726         dvdl_nb[i]  = 0;
727     }
728
729     assert(gmx_omp_nthreads_get(emntNonbonded) == nbl_lists->nnbl);
730
731     wallcycle_sub_start(wcycle, ewcsNONBONDED);
732 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
733     for (th = 0; th < nbl_lists->nnbl; th++)
734     {
735         gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
736                                   x, f, fr, mdatoms, &kernel_data, nrnb);
737     }
738
739     if (fepvals->sc_alpha != 0)
740     {
741         enerd->dvdl_nonlin[efptVDW]  += dvdl_nb[efptVDW];
742         enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
743     }
744     else
745     {
746         enerd->dvdl_lin[efptVDW]  += dvdl_nb[efptVDW];
747         enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
748     }
749
750     /* If we do foreign lambda and we have soft-core interactions
751      * we have to recalculate the (non-linear) energies contributions.
752      */
753     if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
754     {
755         kernel_data.flags          = (donb_flags & ~(GMX_NONBONDED_DO_FORCE | GMX_NONBONDED_DO_SHIFTFORCE)) | GMX_NONBONDED_DO_FOREIGNLAMBDA;
756         kernel_data.lambda         = lam_i;
757         kernel_data.energygrp_elec = enerd->foreign_grpp.ener[egCOULSR];
758         kernel_data.energygrp_vdw  = enerd->foreign_grpp.ener[egLJSR];
759         /* Note that we add to kernel_data.dvdl, but ignore the result */
760
761         for (i = 0; i < enerd->n_lambda; i++)
762         {
763             for (j = 0; j < efptNR; j++)
764             {
765                 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
766             }
767             reset_foreign_enerdata(enerd);
768 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
769             for (th = 0; th < nbl_lists->nnbl; th++)
770             {
771                 gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
772                                           x, f, fr, mdatoms, &kernel_data, nrnb);
773             }
774
775             sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
776             enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
777         }
778     }
779
780     wallcycle_sub_stop(wcycle, ewcsNONBONDED);
781 }
782
783 gmx_bool use_GPU(const nonbonded_verlet_t *nbv)
784 {
785     return nbv != NULL && nbv->bUseGPU;
786 }
787
788 void do_force_cutsVERLET(FILE *fplog, t_commrec *cr,
789                          t_inputrec *inputrec,
790                          gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
791                          gmx_localtop_t *top,
792                          gmx_groups_t gmx_unused *groups,
793                          matrix box, rvec x[], history_t *hist,
794                          rvec f[],
795                          tensor vir_force,
796                          t_mdatoms *mdatoms,
797                          gmx_enerdata_t *enerd, t_fcdata *fcd,
798                          real *lambda, t_graph *graph,
799                          t_forcerec *fr, interaction_const_t *ic,
800                          gmx_vsite_t *vsite, rvec mu_tot,
801                          double t, FILE *field, gmx_edsam_t ed,
802                          gmx_bool bBornRadii,
803                          int flags)
804 {
805     int                 cg0, cg1, i, j;
806     int                 start, homenr;
807     int                 nb_kernel_type;
808     double              mu[2*DIM];
809     gmx_bool            bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
810     gmx_bool            bDoLongRange, bDoForces, bSepLRF, bUseGPU, bUseOrEmulGPU;
811     gmx_bool            bDiffKernels = FALSE;
812     matrix              boxs;
813     rvec                vzero, box_diag;
814     real                e, v, dvdl;
815     float               cycles_pme, cycles_force, cycles_wait_gpu;
816     nonbonded_verlet_t *nbv;
817
818     cycles_force    = 0;
819     cycles_wait_gpu = 0;
820     nbv             = fr->nbv;
821     nb_kernel_type  = fr->nbv->grp[0].kernel_type;
822
823     start  = 0;
824     homenr = mdatoms->homenr;
825
826     clear_mat(vir_force);
827
828     cg0 = 0;
829     if (DOMAINDECOMP(cr))
830     {
831         cg1 = cr->dd->ncg_tot;
832     }
833     else
834     {
835         cg1 = top->cgs.nr;
836     }
837     if (fr->n_tpi > 0)
838     {
839         cg1--;
840     }
841
842     bStateChanged = (flags & GMX_FORCE_STATECHANGED);
843     bNS           = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
844     bFillGrid     = (bNS && bStateChanged);
845     bCalcCGCM     = (bFillGrid && !DOMAINDECOMP(cr));
846     bDoLongRange  = (fr->bTwinRange && bNS && (flags & GMX_FORCE_DO_LR));
847     bDoForces     = (flags & GMX_FORCE_FORCES);
848     bSepLRF       = (bDoLongRange && bDoForces && (flags & GMX_FORCE_SEPLRF));
849     bUseGPU       = fr->nbv->bUseGPU;
850     bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
851
852     if (bStateChanged)
853     {
854         update_forcerec(fr, box);
855
856         if (NEED_MUTOT(*inputrec))
857         {
858             /* Calculate total (local) dipole moment in a temporary common array.
859              * This makes it possible to sum them over nodes faster.
860              */
861             calc_mu(start, homenr,
862                     x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
863                     mu, mu+DIM);
864         }
865     }
866
867     if (fr->ePBC != epbcNONE)
868     {
869         /* Compute shift vectors every step,
870          * because of pressure coupling or box deformation!
871          */
872         if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
873         {
874             calc_shifts(box, fr->shift_vec);
875         }
876
877         if (bCalcCGCM)
878         {
879             put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
880             inc_nrnb(nrnb, eNR_SHIFTX, homenr);
881         }
882         else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
883         {
884             unshift_self(graph, box, x);
885         }
886     }
887
888     nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
889                                  fr->shift_vec, nbv->grp[0].nbat);
890
891 #ifdef GMX_MPI
892     if (!(cr->duty & DUTY_PME))
893     {
894         /* Send particle coordinates to the pme nodes.
895          * Since this is only implemented for domain decomposition
896          * and domain decomposition does not use the graph,
897          * we do not need to worry about shifting.
898          */
899
900         int pme_flags = 0;
901
902         wallcycle_start(wcycle, ewcPP_PMESENDX);
903
904         bBS = (inputrec->nwall == 2);
905         if (bBS)
906         {
907             copy_mat(box, boxs);
908             svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
909         }
910
911         if (EEL_PME(fr->eeltype))
912         {
913             pme_flags |= GMX_PME_DO_COULOMB;
914         }
915
916         if (EVDW_PME(fr->vdwtype))
917         {
918             pme_flags |= GMX_PME_DO_LJ;
919         }
920
921         gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
922                                  mdatoms->nChargePerturbed, mdatoms->nTypePerturbed, lambda[efptCOUL], lambda[efptVDW],
923                                  (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
924                                  pme_flags, step);
925
926         wallcycle_stop(wcycle, ewcPP_PMESENDX);
927     }
928 #endif /* GMX_MPI */
929
930     /* do gridding for pair search */
931     if (bNS)
932     {
933         if (graph && bStateChanged)
934         {
935             /* Calculate intramolecular shift vectors to make molecules whole */
936             mk_mshift(fplog, graph, fr->ePBC, box, x);
937         }
938
939         clear_rvec(vzero);
940         box_diag[XX] = box[XX][XX];
941         box_diag[YY] = box[YY][YY];
942         box_diag[ZZ] = box[ZZ][ZZ];
943
944         wallcycle_start(wcycle, ewcNS);
945         if (!fr->bDomDec)
946         {
947             wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
948             nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
949                               0, vzero, box_diag,
950                               0, mdatoms->homenr, -1, fr->cginfo, x,
951                               0, NULL,
952                               nbv->grp[eintLocal].kernel_type,
953                               nbv->grp[eintLocal].nbat);
954             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
955         }
956         else
957         {
958             wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
959             nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
960                                        fr->cginfo, x,
961                                        nbv->grp[eintNonlocal].kernel_type,
962                                        nbv->grp[eintNonlocal].nbat);
963             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
964         }
965
966         if (nbv->ngrp == 1 ||
967             nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
968         {
969             nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatAll,
970                                nbv->nbs, mdatoms, fr->cginfo);
971         }
972         else
973         {
974             nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatLocal,
975                                nbv->nbs, mdatoms, fr->cginfo);
976             nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat, eatAll,
977                                nbv->nbs, mdatoms, fr->cginfo);
978         }
979         wallcycle_stop(wcycle, ewcNS);
980     }
981
982     /* initialize the GPU atom data and copy shift vector */
983     if (bUseGPU)
984     {
985         if (bNS)
986         {
987             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
988             nbnxn_cuda_init_atomdata(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
989             wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
990         }
991
992         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
993         nbnxn_cuda_upload_shiftvec(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
994         wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
995     }
996
997     /* do local pair search */
998     if (bNS)
999     {
1000         wallcycle_start_nocount(wcycle, ewcNS);
1001         wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
1002         nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintLocal].nbat,
1003                             &top->excls,
1004                             ic->rlist,
1005                             nbv->min_ci_balanced,
1006                             &nbv->grp[eintLocal].nbl_lists,
1007                             eintLocal,
1008                             nbv->grp[eintLocal].kernel_type,
1009                             nrnb);
1010         wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
1011
1012         if (bUseGPU)
1013         {
1014             /* initialize local pair-list on the GPU */
1015             nbnxn_cuda_init_pairlist(nbv->cu_nbv,
1016                                      nbv->grp[eintLocal].nbl_lists.nbl[0],
1017                                      eintLocal);
1018         }
1019         wallcycle_stop(wcycle, ewcNS);
1020     }
1021     else
1022     {
1023         wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1024         wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1025         nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
1026                                         nbv->grp[eintLocal].nbat);
1027         wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1028         wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1029     }
1030
1031     if (bUseGPU)
1032     {
1033         wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1034         /* launch local nonbonded F on GPU */
1035         do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
1036                      nrnb, wcycle);
1037         wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1038     }
1039
1040     /* Communicate coordinates and sum dipole if necessary +
1041        do non-local pair search */
1042     if (DOMAINDECOMP(cr))
1043     {
1044         bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
1045                         nbv->grp[eintLocal].kernel_type);
1046
1047         if (bDiffKernels)
1048         {
1049             /* With GPU+CPU non-bonded calculations we need to copy
1050              * the local coordinates to the non-local nbat struct
1051              * (in CPU format) as the non-local kernel call also
1052              * calculates the local - non-local interactions.
1053              */
1054             wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1055             wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1056             nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, TRUE, x,
1057                                             nbv->grp[eintNonlocal].nbat);
1058             wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1059             wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1060         }
1061
1062         if (bNS)
1063         {
1064             wallcycle_start_nocount(wcycle, ewcNS);
1065             wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1066
1067             if (bDiffKernels)
1068             {
1069                 nbnxn_grid_add_simple(nbv->nbs, nbv->grp[eintNonlocal].nbat);
1070             }
1071
1072             nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintNonlocal].nbat,
1073                                 &top->excls,
1074                                 ic->rlist,
1075                                 nbv->min_ci_balanced,
1076                                 &nbv->grp[eintNonlocal].nbl_lists,
1077                                 eintNonlocal,
1078                                 nbv->grp[eintNonlocal].kernel_type,
1079                                 nrnb);
1080
1081             wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1082
1083             if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_CUDA)
1084             {
1085                 /* initialize non-local pair-list on the GPU */
1086                 nbnxn_cuda_init_pairlist(nbv->cu_nbv,
1087                                          nbv->grp[eintNonlocal].nbl_lists.nbl[0],
1088                                          eintNonlocal);
1089             }
1090             wallcycle_stop(wcycle, ewcNS);
1091         }
1092         else
1093         {
1094             wallcycle_start(wcycle, ewcMOVEX);
1095             dd_move_x(cr->dd, box, x);
1096
1097             /* When we don't need the total dipole we sum it in global_stat */
1098             if (bStateChanged && NEED_MUTOT(*inputrec))
1099             {
1100                 gmx_sumd(2*DIM, mu, cr);
1101             }
1102             wallcycle_stop(wcycle, ewcMOVEX);
1103
1104             wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1105             wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1106             nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
1107                                             nbv->grp[eintNonlocal].nbat);
1108             wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1109             cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1110         }
1111
1112         if (bUseGPU && !bDiffKernels)
1113         {
1114             wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1115             /* launch non-local nonbonded F on GPU */
1116             do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
1117                          nrnb, wcycle);
1118             cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1119         }
1120     }
1121
1122     if (bUseGPU)
1123     {
1124         /* launch D2H copy-back F */
1125         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1126         if (DOMAINDECOMP(cr) && !bDiffKernels)
1127         {
1128             nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintNonlocal].nbat,
1129                                       flags, eatNonlocal);
1130         }
1131         nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintLocal].nbat,
1132                                   flags, eatLocal);
1133         cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1134     }
1135
1136     if (bStateChanged && NEED_MUTOT(*inputrec))
1137     {
1138         if (PAR(cr))
1139         {
1140             gmx_sumd(2*DIM, mu, cr);
1141         }
1142
1143         for (i = 0; i < 2; i++)
1144         {
1145             for (j = 0; j < DIM; j++)
1146             {
1147                 fr->mu_tot[i][j] = mu[i*DIM + j];
1148             }
1149         }
1150     }
1151     if (fr->efep == efepNO)
1152     {
1153         copy_rvec(fr->mu_tot[0], mu_tot);
1154     }
1155     else
1156     {
1157         for (j = 0; j < DIM; j++)
1158         {
1159             mu_tot[j] =
1160                 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1161                 lambda[efptCOUL]*fr->mu_tot[1][j];
1162         }
1163     }
1164
1165     /* Reset energies */
1166     reset_enerdata(fr, bNS, enerd, MASTER(cr));
1167     clear_rvecs(SHIFTS, fr->fshift);
1168
1169     if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1170     {
1171         wallcycle_start(wcycle, ewcPPDURINGPME);
1172         dd_force_flop_start(cr->dd, nrnb);
1173     }
1174
1175     if (inputrec->bRot)
1176     {
1177         /* Enforced rotation has its own cycle counter that starts after the collective
1178          * coordinates have been communicated. It is added to ddCyclF to allow
1179          * for proper load-balancing */
1180         wallcycle_start(wcycle, ewcROT);
1181         do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1182         wallcycle_stop(wcycle, ewcROT);
1183     }
1184
1185     /* Start the force cycle counter.
1186      * This counter is stopped in do_forcelow_level.
1187      * No parallel communication should occur while this counter is running,
1188      * since that will interfere with the dynamic load balancing.
1189      */
1190     wallcycle_start(wcycle, ewcFORCE);
1191     if (bDoForces)
1192     {
1193         /* Reset forces for which the virial is calculated separately:
1194          * PME/Ewald forces if necessary */
1195         if (fr->bF_NoVirSum)
1196         {
1197             if (flags & GMX_FORCE_VIRIAL)
1198             {
1199                 fr->f_novirsum = fr->f_novirsum_alloc;
1200                 if (fr->bDomDec)
1201                 {
1202                     clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1203                 }
1204                 else
1205                 {
1206                     clear_rvecs(homenr, fr->f_novirsum+start);
1207                 }
1208             }
1209             else
1210             {
1211                 /* We are not calculating the pressure so we do not need
1212                  * a separate array for forces that do not contribute
1213                  * to the pressure.
1214                  */
1215                 fr->f_novirsum = f;
1216             }
1217         }
1218
1219         /* Clear the short- and long-range forces */
1220         clear_rvecs(fr->natoms_force_constr, f);
1221         if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1222         {
1223             clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1224         }
1225
1226         clear_rvec(fr->vir_diag_posres);
1227     }
1228
1229     if (inputrec->ePull == epullCONSTRAINT)
1230     {
1231         clear_pull_forces(inputrec->pull);
1232     }
1233
1234     /* We calculate the non-bonded forces, when done on the CPU, here.
1235      * We do this before calling do_force_lowlevel, as in there bondeds
1236      * forces are calculated before PME, which does communication.
1237      * With this order, non-bonded and bonded force calculation imbalance
1238      * can be balanced out by the domain decomposition load balancing.
1239      */
1240
1241     if (!bUseOrEmulGPU)
1242     {
1243         /* Maybe we should move this into do_force_lowlevel */
1244         do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
1245                      nrnb, wcycle);
1246     }
1247
1248     if (fr->efep != efepNO)
1249     {
1250         /* Calculate the local and non-local free energy interactions here.
1251          * Happens here on the CPU both with and without GPU.
1252          */
1253         if (fr->nbv->grp[eintLocal].nbl_lists.nbl_fep[0]->nrj > 0)
1254         {
1255             do_nb_verlet_fep(&fr->nbv->grp[eintLocal].nbl_lists,
1256                              fr, x, f, mdatoms,
1257                              inputrec->fepvals, lambda,
1258                              enerd, flags, nrnb, wcycle);
1259         }
1260
1261         if (DOMAINDECOMP(cr) &&
1262             fr->nbv->grp[eintNonlocal].nbl_lists.nbl_fep[0]->nrj > 0)
1263         {
1264             do_nb_verlet_fep(&fr->nbv->grp[eintNonlocal].nbl_lists,
1265                              fr, x, f, mdatoms,
1266                              inputrec->fepvals, lambda,
1267                              enerd, flags, nrnb, wcycle);
1268         }
1269     }
1270
1271     if (!bUseOrEmulGPU || bDiffKernels)
1272     {
1273         int aloc;
1274
1275         if (DOMAINDECOMP(cr))
1276         {
1277             do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
1278                          bDiffKernels ? enbvClearFYes : enbvClearFNo,
1279                          nrnb, wcycle);
1280         }
1281
1282         if (!bUseOrEmulGPU)
1283         {
1284             aloc = eintLocal;
1285         }
1286         else
1287         {
1288             aloc = eintNonlocal;
1289         }
1290
1291         /* Add all the non-bonded force to the normal force array.
1292          * This can be split into a local a non-local part when overlapping
1293          * communication with calculation with domain decomposition.
1294          */
1295         cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1296         wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1297         wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1298         nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatAll, nbv->grp[aloc].nbat, f);
1299         wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1300         cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1301         wallcycle_start_nocount(wcycle, ewcFORCE);
1302
1303         /* if there are multiple fshift output buffers reduce them */
1304         if ((flags & GMX_FORCE_VIRIAL) &&
1305             nbv->grp[aloc].nbl_lists.nnbl > 1)
1306         {
1307             nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
1308                                                      fr->fshift);
1309         }
1310     }
1311
1312     /* update QMMMrec, if necessary */
1313     if (fr->bQMMM)
1314     {
1315         update_QMMMrec(cr, fr, x, mdatoms, box, top);
1316     }
1317
1318     if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1319     {
1320         posres_wrapper(flags, inputrec, nrnb, top, box, x,
1321                        enerd, lambda, fr);
1322     }
1323
1324     if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_FBPOSRES].nr > 0)
1325     {
1326         fbposres_wrapper(inputrec, nrnb, top, box, x, enerd, fr);
1327     }
1328
1329     /* Compute the bonded and non-bonded energies and optionally forces */
1330     do_force_lowlevel(fr, inputrec, &(top->idef),
1331                       cr, nrnb, wcycle, mdatoms,
1332                       x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, top, fr->born,
1333                       &(top->atomtypes), bBornRadii, box,
1334                       inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1335                       flags, &cycles_pme);
1336
1337     if (bSepLRF)
1338     {
1339         if (do_per_step(step, inputrec->nstcalclr))
1340         {
1341             /* Add the long range forces to the short range forces */
1342             for (i = 0; i < fr->natoms_force_constr; i++)
1343             {
1344                 rvec_add(fr->f_twin[i], f[i], f[i]);
1345             }
1346         }
1347     }
1348
1349     cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1350
1351     if (ed)
1352     {
1353         do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1354     }
1355
1356     if (bUseOrEmulGPU && !bDiffKernels)
1357     {
1358         /* wait for non-local forces (or calculate in emulation mode) */
1359         if (DOMAINDECOMP(cr))
1360         {
1361             if (bUseGPU)
1362             {
1363                 float cycles_tmp;
1364
1365                 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1366                 nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1367                                     nbv->grp[eintNonlocal].nbat,
1368                                     flags, eatNonlocal,
1369                                     enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1370                                     fr->fshift);
1371                 cycles_tmp       = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1372                 cycles_wait_gpu += cycles_tmp;
1373                 cycles_force    += cycles_tmp;
1374             }
1375             else
1376             {
1377                 wallcycle_start_nocount(wcycle, ewcFORCE);
1378                 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
1379                              nrnb, wcycle);
1380                 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1381             }
1382             wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1383             wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1384             /* skip the reduction if there was no non-local work to do */
1385             if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1386             {
1387                 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatNonlocal,
1388                                                nbv->grp[eintNonlocal].nbat, f);
1389             }
1390             wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1391             cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1392         }
1393     }
1394
1395     if (bDoForces && DOMAINDECOMP(cr))
1396     {
1397         /* Communicate the forces */
1398         wallcycle_start(wcycle, ewcMOVEF);
1399         dd_move_f(cr->dd, f, fr->fshift);
1400         /* Do we need to communicate the separate force array
1401          * for terms that do not contribute to the single sum virial?
1402          * Position restraints and electric fields do not introduce
1403          * inter-cg forces, only full electrostatics methods do.
1404          * When we do not calculate the virial, fr->f_novirsum = f,
1405          * so we have already communicated these forces.
1406          */
1407         if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1408             (flags & GMX_FORCE_VIRIAL))
1409         {
1410             dd_move_f(cr->dd, fr->f_novirsum, NULL);
1411         }
1412         if (bSepLRF)
1413         {
1414             /* We should not update the shift forces here,
1415              * since f_twin is already included in f.
1416              */
1417             dd_move_f(cr->dd, fr->f_twin, NULL);
1418         }
1419         wallcycle_stop(wcycle, ewcMOVEF);
1420     }
1421
1422     if (bUseOrEmulGPU)
1423     {
1424         /* wait for local forces (or calculate in emulation mode) */
1425         if (bUseGPU)
1426         {
1427             wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1428             nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1429                                 nbv->grp[eintLocal].nbat,
1430                                 flags, eatLocal,
1431                                 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1432                                 fr->fshift);
1433             cycles_wait_gpu += wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1434
1435             /* now clear the GPU outputs while we finish the step on the CPU */
1436
1437             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1438             nbnxn_cuda_clear_outputs(nbv->cu_nbv, flags);
1439             wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1440         }
1441         else
1442         {
1443             wallcycle_start_nocount(wcycle, ewcFORCE);
1444             do_nb_verlet(fr, ic, enerd, flags, eintLocal,
1445                          DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1446                          nrnb, wcycle);
1447             wallcycle_stop(wcycle, ewcFORCE);
1448         }
1449         wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1450         wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1451         if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1452         {
1453             /* skip the reduction if there was no non-local work to do */
1454             nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatLocal,
1455                                            nbv->grp[eintLocal].nbat, f);
1456         }
1457         wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1458         wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1459     }
1460
1461     if (DOMAINDECOMP(cr))
1462     {
1463         dd_force_flop_stop(cr->dd, nrnb);
1464         if (wcycle)
1465         {
1466             dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1467             if (bUseGPU)
1468             {
1469                 dd_cycles_add(cr->dd, cycles_wait_gpu, ddCyclWaitGPU);
1470             }
1471         }
1472     }
1473
1474     if (bDoForces)
1475     {
1476         if (IR_ELEC_FIELD(*inputrec))
1477         {
1478             /* Compute forces due to electric field */
1479             calc_f_el(MASTER(cr) ? field : NULL,
1480                       start, homenr, mdatoms->chargeA, fr->f_novirsum,
1481                       inputrec->ex, inputrec->et, t);
1482         }
1483
1484         /* If we have NoVirSum forces, but we do not calculate the virial,
1485          * we sum fr->f_novirum=f later.
1486          */
1487         if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1488         {
1489             wallcycle_start(wcycle, ewcVSITESPREAD);
1490             spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1491                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1492             wallcycle_stop(wcycle, ewcVSITESPREAD);
1493
1494             if (bSepLRF)
1495             {
1496                 wallcycle_start(wcycle, ewcVSITESPREAD);
1497                 spread_vsite_f(vsite, x, fr->f_twin, NULL, FALSE, NULL,
1498                                nrnb,
1499                                &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1500                 wallcycle_stop(wcycle, ewcVSITESPREAD);
1501             }
1502         }
1503
1504         if (flags & GMX_FORCE_VIRIAL)
1505         {
1506             /* Calculation of the virial must be done after vsites! */
1507             calc_virial(0, mdatoms->homenr, x, f,
1508                         vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1509         }
1510     }
1511
1512     if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
1513     {
1514         pull_potential_wrapper(cr, inputrec, box, x,
1515                                f, vir_force, mdatoms, enerd, lambda, t);
1516     }
1517
1518     /* Add the forces from enforced rotation potentials (if any) */
1519     if (inputrec->bRot)
1520     {
1521         wallcycle_start(wcycle, ewcROTadd);
1522         enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1523         wallcycle_stop(wcycle, ewcROTadd);
1524     }
1525
1526     /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1527     IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1528
1529     if (PAR(cr) && !(cr->duty & DUTY_PME))
1530     {
1531         /* In case of node-splitting, the PP nodes receive the long-range
1532          * forces, virial and energy from the PME nodes here.
1533          */
1534         pme_receive_force_ener(cr, wcycle, enerd, fr);
1535     }
1536
1537     if (bDoForces)
1538     {
1539         post_process_forces(cr, step, nrnb, wcycle,
1540                             top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1541                             flags);
1542     }
1543
1544     /* Sum the potential energy terms from group contributions */
1545     sum_epot(&(enerd->grpp), enerd->term);
1546 }
1547
1548 void do_force_cutsGROUP(FILE *fplog, t_commrec *cr,
1549                         t_inputrec *inputrec,
1550                         gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1551                         gmx_localtop_t *top,
1552                         gmx_groups_t *groups,
1553                         matrix box, rvec x[], history_t *hist,
1554                         rvec f[],
1555                         tensor vir_force,
1556                         t_mdatoms *mdatoms,
1557                         gmx_enerdata_t *enerd, t_fcdata *fcd,
1558                         real *lambda, t_graph *graph,
1559                         t_forcerec *fr, gmx_vsite_t *vsite, rvec mu_tot,
1560                         double t, FILE *field, gmx_edsam_t ed,
1561                         gmx_bool bBornRadii,
1562                         int flags)
1563 {
1564     int        cg0, cg1, i, j;
1565     int        start, homenr;
1566     double     mu[2*DIM];
1567     gmx_bool   bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
1568     gmx_bool   bDoLongRangeNS, bDoForces, bDoPotential, bSepLRF;
1569     gmx_bool   bDoAdressWF;
1570     matrix     boxs;
1571     rvec       vzero, box_diag;
1572     real       e, v, dvdlambda[efptNR];
1573     t_pbc      pbc;
1574     float      cycles_pme, cycles_force;
1575
1576     start  = 0;
1577     homenr = mdatoms->homenr;
1578
1579     clear_mat(vir_force);
1580
1581     cg0 = 0;
1582     if (DOMAINDECOMP(cr))
1583     {
1584         cg1 = cr->dd->ncg_tot;
1585     }
1586     else
1587     {
1588         cg1 = top->cgs.nr;
1589     }
1590     if (fr->n_tpi > 0)
1591     {
1592         cg1--;
1593     }
1594
1595     bStateChanged  = (flags & GMX_FORCE_STATECHANGED);
1596     bNS            = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
1597     /* Should we update the long-range neighborlists at this step? */
1598     bDoLongRangeNS = fr->bTwinRange && bNS;
1599     /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1600     bFillGrid      = (bNS && bStateChanged);
1601     bCalcCGCM      = (bFillGrid && !DOMAINDECOMP(cr));
1602     bDoForces      = (flags & GMX_FORCE_FORCES);
1603     bDoPotential   = (flags & GMX_FORCE_ENERGY);
1604     bSepLRF        = ((inputrec->nstcalclr > 1) && bDoForces &&
1605                       (flags & GMX_FORCE_SEPLRF) && (flags & GMX_FORCE_DO_LR));
1606
1607     /* should probably move this to the forcerec since it doesn't change */
1608     bDoAdressWF   = ((fr->adress_type != eAdressOff));
1609
1610     if (bStateChanged)
1611     {
1612         update_forcerec(fr, box);
1613
1614         if (NEED_MUTOT(*inputrec))
1615         {
1616             /* Calculate total (local) dipole moment in a temporary common array.
1617              * This makes it possible to sum them over nodes faster.
1618              */
1619             calc_mu(start, homenr,
1620                     x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1621                     mu, mu+DIM);
1622         }
1623     }
1624
1625     if (fr->ePBC != epbcNONE)
1626     {
1627         /* Compute shift vectors every step,
1628          * because of pressure coupling or box deformation!
1629          */
1630         if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1631         {
1632             calc_shifts(box, fr->shift_vec);
1633         }
1634
1635         if (bCalcCGCM)
1636         {
1637             put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1638                                      &(top->cgs), x, fr->cg_cm);
1639             inc_nrnb(nrnb, eNR_CGCM, homenr);
1640             inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1641         }
1642         else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1643         {
1644             unshift_self(graph, box, x);
1645         }
1646     }
1647     else if (bCalcCGCM)
1648     {
1649         calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
1650         inc_nrnb(nrnb, eNR_CGCM, homenr);
1651     }
1652
1653     if (bCalcCGCM && gmx_debug_at)
1654     {
1655         pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1656     }
1657
1658 #ifdef GMX_MPI
1659     if (!(cr->duty & DUTY_PME))
1660     {
1661         /* Send particle coordinates to the pme nodes.
1662          * Since this is only implemented for domain decomposition
1663          * and domain decomposition does not use the graph,
1664          * we do not need to worry about shifting.
1665          */
1666
1667         int pme_flags = 0;
1668
1669         wallcycle_start(wcycle, ewcPP_PMESENDX);
1670
1671         bBS = (inputrec->nwall == 2);
1672         if (bBS)
1673         {
1674             copy_mat(box, boxs);
1675             svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
1676         }
1677
1678         if (EEL_PME(fr->eeltype))
1679         {
1680             pme_flags |= GMX_PME_DO_COULOMB;
1681         }
1682
1683         if (EVDW_PME(fr->vdwtype))
1684         {
1685             pme_flags |= GMX_PME_DO_LJ;
1686         }
1687
1688         gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
1689                                  mdatoms->nChargePerturbed, mdatoms->nTypePerturbed, lambda[efptCOUL], lambda[efptVDW],
1690                                  (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
1691                                  pme_flags, step);
1692
1693         wallcycle_stop(wcycle, ewcPP_PMESENDX);
1694     }
1695 #endif /* GMX_MPI */
1696
1697     /* Communicate coordinates and sum dipole if necessary */
1698     if (DOMAINDECOMP(cr))
1699     {
1700         wallcycle_start(wcycle, ewcMOVEX);
1701         dd_move_x(cr->dd, box, x);
1702         wallcycle_stop(wcycle, ewcMOVEX);
1703     }
1704
1705     /* update adress weight beforehand */
1706     if (bStateChanged && bDoAdressWF)
1707     {
1708         /* need pbc for adress weight calculation with pbc_dx */
1709         set_pbc(&pbc, inputrec->ePBC, box);
1710         if (fr->adress_site == eAdressSITEcog)
1711         {
1712             update_adress_weights_cog(top->idef.iparams, top->idef.il, x, fr, mdatoms,
1713                                       inputrec->ePBC == epbcNONE ? NULL : &pbc);
1714         }
1715         else if (fr->adress_site == eAdressSITEcom)
1716         {
1717             update_adress_weights_com(fplog, cg0, cg1, &(top->cgs), x, fr, mdatoms,
1718                                       inputrec->ePBC == epbcNONE ? NULL : &pbc);
1719         }
1720         else if (fr->adress_site == eAdressSITEatomatom)
1721         {
1722             update_adress_weights_atom_per_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1723                                                 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1724         }
1725         else
1726         {
1727             update_adress_weights_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1728                                        inputrec->ePBC == epbcNONE ? NULL : &pbc);
1729         }
1730     }
1731
1732     if (NEED_MUTOT(*inputrec))
1733     {
1734
1735         if (bStateChanged)
1736         {
1737             if (PAR(cr))
1738             {
1739                 gmx_sumd(2*DIM, mu, cr);
1740             }
1741             for (i = 0; i < 2; i++)
1742             {
1743                 for (j = 0; j < DIM; j++)
1744                 {
1745                     fr->mu_tot[i][j] = mu[i*DIM + j];
1746                 }
1747             }
1748         }
1749         if (fr->efep == efepNO)
1750         {
1751             copy_rvec(fr->mu_tot[0], mu_tot);
1752         }
1753         else
1754         {
1755             for (j = 0; j < DIM; j++)
1756             {
1757                 mu_tot[j] =
1758                     (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1759             }
1760         }
1761     }
1762
1763     /* Reset energies */
1764     reset_enerdata(fr, bNS, enerd, MASTER(cr));
1765     clear_rvecs(SHIFTS, fr->fshift);
1766
1767     if (bNS)
1768     {
1769         wallcycle_start(wcycle, ewcNS);
1770
1771         if (graph && bStateChanged)
1772         {
1773             /* Calculate intramolecular shift vectors to make molecules whole */
1774             mk_mshift(fplog, graph, fr->ePBC, box, x);
1775         }
1776
1777         /* Do the actual neighbour searching */
1778         ns(fplog, fr, box,
1779            groups, top, mdatoms,
1780            cr, nrnb, bFillGrid,
1781            bDoLongRangeNS);
1782
1783         wallcycle_stop(wcycle, ewcNS);
1784     }
1785
1786     if (inputrec->implicit_solvent && bNS)
1787     {
1788         make_gb_nblist(cr, inputrec->gb_algorithm,
1789                        x, box, fr, &top->idef, graph, fr->born);
1790     }
1791
1792     if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1793     {
1794         wallcycle_start(wcycle, ewcPPDURINGPME);
1795         dd_force_flop_start(cr->dd, nrnb);
1796     }
1797
1798     if (inputrec->bRot)
1799     {
1800         /* Enforced rotation has its own cycle counter that starts after the collective
1801          * coordinates have been communicated. It is added to ddCyclF to allow
1802          * for proper load-balancing */
1803         wallcycle_start(wcycle, ewcROT);
1804         do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1805         wallcycle_stop(wcycle, ewcROT);
1806     }
1807
1808     /* Start the force cycle counter.
1809      * This counter is stopped in do_forcelow_level.
1810      * No parallel communication should occur while this counter is running,
1811      * since that will interfere with the dynamic load balancing.
1812      */
1813     wallcycle_start(wcycle, ewcFORCE);
1814
1815     if (bDoForces)
1816     {
1817         /* Reset forces for which the virial is calculated separately:
1818          * PME/Ewald forces if necessary */
1819         if (fr->bF_NoVirSum)
1820         {
1821             if (flags & GMX_FORCE_VIRIAL)
1822             {
1823                 fr->f_novirsum = fr->f_novirsum_alloc;
1824                 if (fr->bDomDec)
1825                 {
1826                     clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1827                 }
1828                 else
1829                 {
1830                     clear_rvecs(homenr, fr->f_novirsum+start);
1831                 }
1832             }
1833             else
1834             {
1835                 /* We are not calculating the pressure so we do not need
1836                  * a separate array for forces that do not contribute
1837                  * to the pressure.
1838                  */
1839                 fr->f_novirsum = f;
1840             }
1841         }
1842
1843         /* Clear the short- and long-range forces */
1844         clear_rvecs(fr->natoms_force_constr, f);
1845         if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1846         {
1847             clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1848         }
1849
1850         clear_rvec(fr->vir_diag_posres);
1851     }
1852     if (inputrec->ePull == epullCONSTRAINT)
1853     {
1854         clear_pull_forces(inputrec->pull);
1855     }
1856
1857     /* update QMMMrec, if necessary */
1858     if (fr->bQMMM)
1859     {
1860         update_QMMMrec(cr, fr, x, mdatoms, box, top);
1861     }
1862
1863     if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1864     {
1865         posres_wrapper(flags, inputrec, nrnb, top, box, x,
1866                        enerd, lambda, fr);
1867     }
1868
1869     if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_FBPOSRES].nr > 0)
1870     {
1871         fbposres_wrapper(inputrec, nrnb, top, box, x, enerd, fr);
1872     }
1873
1874     /* Compute the bonded and non-bonded energies and optionally forces */
1875     do_force_lowlevel(fr, inputrec, &(top->idef),
1876                       cr, nrnb, wcycle, mdatoms,
1877                       x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, top, fr->born,
1878                       &(top->atomtypes), bBornRadii, box,
1879                       inputrec->fepvals, lambda,
1880                       graph, &(top->excls), fr->mu_tot,
1881                       flags,
1882                       &cycles_pme);
1883
1884     if (bSepLRF)
1885     {
1886         if (do_per_step(step, inputrec->nstcalclr))
1887         {
1888             /* Add the long range forces to the short range forces */
1889             for (i = 0; i < fr->natoms_force_constr; i++)
1890             {
1891                 rvec_add(fr->f_twin[i], f[i], f[i]);
1892             }
1893         }
1894     }
1895
1896     cycles_force = wallcycle_stop(wcycle, ewcFORCE);
1897
1898     if (ed)
1899     {
1900         do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1901     }
1902
1903     if (DOMAINDECOMP(cr))
1904     {
1905         dd_force_flop_stop(cr->dd, nrnb);
1906         if (wcycle)
1907         {
1908             dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1909         }
1910     }
1911
1912     if (bDoForces)
1913     {
1914         if (IR_ELEC_FIELD(*inputrec))
1915         {
1916             /* Compute forces due to electric field */
1917             calc_f_el(MASTER(cr) ? field : NULL,
1918                       start, homenr, mdatoms->chargeA, fr->f_novirsum,
1919                       inputrec->ex, inputrec->et, t);
1920         }
1921
1922         if (bDoAdressWF && fr->adress_icor == eAdressICThermoForce)
1923         {
1924             /* Compute thermodynamic force in hybrid AdResS region */
1925             adress_thermo_force(start, homenr, &(top->cgs), x, fr->f_novirsum, fr, mdatoms,
1926                                 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1927         }
1928
1929         /* Communicate the forces */
1930         if (DOMAINDECOMP(cr))
1931         {
1932             wallcycle_start(wcycle, ewcMOVEF);
1933             dd_move_f(cr->dd, f, fr->fshift);
1934             /* Do we need to communicate the separate force array
1935              * for terms that do not contribute to the single sum virial?
1936              * Position restraints and electric fields do not introduce
1937              * inter-cg forces, only full electrostatics methods do.
1938              * When we do not calculate the virial, fr->f_novirsum = f,
1939              * so we have already communicated these forces.
1940              */
1941             if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1942                 (flags & GMX_FORCE_VIRIAL))
1943             {
1944                 dd_move_f(cr->dd, fr->f_novirsum, NULL);
1945             }
1946             if (bSepLRF)
1947             {
1948                 /* We should not update the shift forces here,
1949                  * since f_twin is already included in f.
1950                  */
1951                 dd_move_f(cr->dd, fr->f_twin, NULL);
1952             }
1953             wallcycle_stop(wcycle, ewcMOVEF);
1954         }
1955
1956         /* If we have NoVirSum forces, but we do not calculate the virial,
1957          * we sum fr->f_novirum=f later.
1958          */
1959         if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1960         {
1961             wallcycle_start(wcycle, ewcVSITESPREAD);
1962             spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1963                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1964             wallcycle_stop(wcycle, ewcVSITESPREAD);
1965
1966             if (bSepLRF)
1967             {
1968                 wallcycle_start(wcycle, ewcVSITESPREAD);
1969                 spread_vsite_f(vsite, x, fr->f_twin, NULL, FALSE, NULL,
1970                                nrnb,
1971                                &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1972                 wallcycle_stop(wcycle, ewcVSITESPREAD);
1973             }
1974         }
1975
1976         if (flags & GMX_FORCE_VIRIAL)
1977         {
1978             /* Calculation of the virial must be done after vsites! */
1979             calc_virial(0, mdatoms->homenr, x, f,
1980                         vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1981         }
1982     }
1983
1984     if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
1985     {
1986         pull_potential_wrapper(cr, inputrec, box, x,
1987                                f, vir_force, mdatoms, enerd, lambda, t);
1988     }
1989
1990     /* Add the forces from enforced rotation potentials (if any) */
1991     if (inputrec->bRot)
1992     {
1993         wallcycle_start(wcycle, ewcROTadd);
1994         enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1995         wallcycle_stop(wcycle, ewcROTadd);
1996     }
1997
1998     /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1999     IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
2000
2001     if (PAR(cr) && !(cr->duty & DUTY_PME))
2002     {
2003         /* In case of node-splitting, the PP nodes receive the long-range
2004          * forces, virial and energy from the PME nodes here.
2005          */
2006         pme_receive_force_ener(cr, wcycle, enerd, fr);
2007     }
2008
2009     if (bDoForces)
2010     {
2011         post_process_forces(cr, step, nrnb, wcycle,
2012                             top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
2013                             flags);
2014     }
2015
2016     /* Sum the potential energy terms from group contributions */
2017     sum_epot(&(enerd->grpp), enerd->term);
2018 }
2019
2020 void do_force(FILE *fplog, t_commrec *cr,
2021               t_inputrec *inputrec,
2022               gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
2023               gmx_localtop_t *top,
2024               gmx_groups_t *groups,
2025               matrix box, rvec x[], history_t *hist,
2026               rvec f[],
2027               tensor vir_force,
2028               t_mdatoms *mdatoms,
2029               gmx_enerdata_t *enerd, t_fcdata *fcd,
2030               real *lambda, t_graph *graph,
2031               t_forcerec *fr,
2032               gmx_vsite_t *vsite, rvec mu_tot,
2033               double t, FILE *field, gmx_edsam_t ed,
2034               gmx_bool bBornRadii,
2035               int flags)
2036 {
2037     /* modify force flag if not doing nonbonded */
2038     if (!fr->bNonbonded)
2039     {
2040         flags &= ~GMX_FORCE_NONBONDED;
2041     }
2042
2043     switch (inputrec->cutoff_scheme)
2044     {
2045         case ecutsVERLET:
2046             do_force_cutsVERLET(fplog, cr, inputrec,
2047                                 step, nrnb, wcycle,
2048                                 top,
2049                                 groups,
2050                                 box, x, hist,
2051                                 f, vir_force,
2052                                 mdatoms,
2053                                 enerd, fcd,
2054                                 lambda, graph,
2055                                 fr, fr->ic,
2056                                 vsite, mu_tot,
2057                                 t, field, ed,
2058                                 bBornRadii,
2059                                 flags);
2060             break;
2061         case ecutsGROUP:
2062             do_force_cutsGROUP(fplog, cr, inputrec,
2063                                step, nrnb, wcycle,
2064                                top,
2065                                groups,
2066                                box, x, hist,
2067                                f, vir_force,
2068                                mdatoms,
2069                                enerd, fcd,
2070                                lambda, graph,
2071                                fr, vsite, mu_tot,
2072                                t, field, ed,
2073                                bBornRadii,
2074                                flags);
2075             break;
2076         default:
2077             gmx_incons("Invalid cut-off scheme passed!");
2078     }
2079 }
2080
2081
2082 void do_constrain_first(FILE *fplog, gmx_constr_t constr,
2083                         t_inputrec *ir, t_mdatoms *md,
2084                         t_state *state, t_commrec *cr, t_nrnb *nrnb,
2085                         t_forcerec *fr, gmx_localtop_t *top)
2086 {
2087     int             i, m, start, end;
2088     gmx_int64_t     step;
2089     real            dt = ir->delta_t;
2090     real            dvdl_dum;
2091     rvec           *savex;
2092
2093     snew(savex, state->natoms);
2094
2095     start = 0;
2096     end   = md->homenr;
2097
2098     if (debug)
2099     {
2100         fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
2101                 start, md->homenr, end);
2102     }
2103     /* Do a first constrain to reset particles... */
2104     step = ir->init_step;
2105     if (fplog)
2106     {
2107         char buf[STEPSTRSIZE];
2108         fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
2109                 gmx_step_str(step, buf));
2110     }
2111     dvdl_dum = 0;
2112
2113     /* constrain the current position */
2114     constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2115               ir, NULL, cr, step, 0, 1.0, md,
2116               state->x, state->x, NULL,
2117               fr->bMolPBC, state->box,
2118               state->lambda[efptBONDED], &dvdl_dum,
2119               NULL, NULL, nrnb, econqCoord,
2120               ir->epc == epcMTTK, state->veta, state->veta);
2121     if (EI_VV(ir->eI))
2122     {
2123         /* constrain the inital velocity, and save it */
2124         /* also may be useful if we need the ekin from the halfstep for velocity verlet */
2125         /* might not yet treat veta correctly */
2126         constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2127                   ir, NULL, cr, step, 0, 1.0, md,
2128                   state->x, state->v, state->v,
2129                   fr->bMolPBC, state->box,
2130                   state->lambda[efptBONDED], &dvdl_dum,
2131                   NULL, NULL, nrnb, econqVeloc,
2132                   ir->epc == epcMTTK, state->veta, state->veta);
2133     }
2134     /* constrain the inital velocities at t-dt/2 */
2135     if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
2136     {
2137         for (i = start; (i < end); i++)
2138         {
2139             for (m = 0; (m < DIM); m++)
2140             {
2141                 /* Reverse the velocity */
2142                 state->v[i][m] = -state->v[i][m];
2143                 /* Store the position at t-dt in buf */
2144                 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
2145             }
2146         }
2147         /* Shake the positions at t=-dt with the positions at t=0
2148          * as reference coordinates.
2149          */
2150         if (fplog)
2151         {
2152             char buf[STEPSTRSIZE];
2153             fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
2154                     gmx_step_str(step, buf));
2155         }
2156         dvdl_dum = 0;
2157         constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2158                   ir, NULL, cr, step, -1, 1.0, md,
2159                   state->x, savex, NULL,
2160                   fr->bMolPBC, state->box,
2161                   state->lambda[efptBONDED], &dvdl_dum,
2162                   state->v, NULL, nrnb, econqCoord,
2163                   ir->epc == epcMTTK, state->veta, state->veta);
2164
2165         for (i = start; i < end; i++)
2166         {
2167             for (m = 0; m < DIM; m++)
2168             {
2169                 /* Re-reverse the velocities */
2170                 state->v[i][m] = -state->v[i][m];
2171             }
2172         }
2173     }
2174     sfree(savex);
2175 }
2176
2177
2178 static void
2179 integrate_table(real vdwtab[], real scale, int offstart, int rstart, int rend,
2180                 double *enerout, double *virout)
2181 {
2182     double enersum, virsum;
2183     double invscale, invscale2, invscale3;
2184     double r, ea, eb, ec, pa, pb, pc, pd;
2185     double y0, f, g, h;
2186     int    ri, offset, tabfactor;
2187
2188     invscale  = 1.0/scale;
2189     invscale2 = invscale*invscale;
2190     invscale3 = invscale*invscale2;
2191
2192     /* Following summation derived from cubic spline definition,
2193      * Numerical Recipies in C, second edition, p. 113-116.  Exact for
2194      * the cubic spline.  We first calculate the negative of the
2195      * energy from rvdw to rvdw_switch, assuming that g(r)=1, and then
2196      * add the more standard, abrupt cutoff correction to that result,
2197      * yielding the long-range correction for a switched function.  We
2198      * perform both the pressure and energy loops at the same time for
2199      * simplicity, as the computational cost is low. */
2200
2201     if (offstart == 0)
2202     {
2203         /* Since the dispersion table has been scaled down a factor
2204          * 6.0 and the repulsion a factor 12.0 to compensate for the
2205          * c6/c12 parameters inside nbfp[] being scaled up (to save
2206          * flops in kernels), we need to correct for this.
2207          */
2208         tabfactor = 6.0;
2209     }
2210     else
2211     {
2212         tabfactor = 12.0;
2213     }
2214
2215     enersum = 0.0;
2216     virsum  = 0.0;
2217     for (ri = rstart; ri < rend; ++ri)
2218     {
2219         r  = ri*invscale;
2220         ea = invscale3;
2221         eb = 2.0*invscale2*r;
2222         ec = invscale*r*r;
2223
2224         pa = invscale3;
2225         pb = 3.0*invscale2*r;
2226         pc = 3.0*invscale*r*r;
2227         pd = r*r*r;
2228
2229         /* this "8" is from the packing in the vdwtab array - perhaps
2230            should be defined? */
2231
2232         offset = 8*ri + offstart;
2233         y0     = vdwtab[offset];
2234         f      = vdwtab[offset+1];
2235         g      = vdwtab[offset+2];
2236         h      = vdwtab[offset+3];
2237
2238         enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
2239         virsum  +=  f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
2240     }
2241     *enerout = 4.0*M_PI*enersum*tabfactor;
2242     *virout  = 4.0*M_PI*virsum*tabfactor;
2243 }
2244
2245 void calc_enervirdiff(FILE *fplog, int eDispCorr, t_forcerec *fr)
2246 {
2247     double   eners[2], virs[2], enersum, virsum, y0, f, g, h;
2248     double   r0, r1, r, rc3, rc9, ea, eb, ec, pa, pb, pc, pd;
2249     double   invscale, invscale2, invscale3;
2250     int      ri0, ri1, ri, i, offstart, offset;
2251     real     scale, *vdwtab, tabfactor, tmp;
2252
2253     fr->enershiftsix    = 0;
2254     fr->enershifttwelve = 0;
2255     fr->enerdiffsix     = 0;
2256     fr->enerdifftwelve  = 0;
2257     fr->virdiffsix      = 0;
2258     fr->virdifftwelve   = 0;
2259
2260     if (eDispCorr != edispcNO)
2261     {
2262         for (i = 0; i < 2; i++)
2263         {
2264             eners[i] = 0;
2265             virs[i]  = 0;
2266         }
2267         if ((fr->vdw_modifier == eintmodPOTSHIFT) ||
2268             (fr->vdw_modifier == eintmodPOTSWITCH) ||
2269             (fr->vdw_modifier == eintmodFORCESWITCH) ||
2270             (fr->vdwtype == evdwSHIFT) ||
2271             (fr->vdwtype == evdwSWITCH))
2272         {
2273             if (((fr->vdw_modifier == eintmodPOTSWITCH) ||
2274                  (fr->vdw_modifier == eintmodFORCESWITCH) ||
2275                  (fr->vdwtype == evdwSWITCH)) && fr->rvdw_switch == 0)
2276             {
2277                 gmx_fatal(FARGS,
2278                           "With dispersion correction rvdw-switch can not be zero "
2279                           "for vdw-type = %s", evdw_names[fr->vdwtype]);
2280             }
2281
2282             scale  = fr->nblists[0].table_vdw.scale;
2283             vdwtab = fr->nblists[0].table_vdw.data;
2284
2285             /* Round the cut-offs to exact table values for precision */
2286             ri0  = floor(fr->rvdw_switch*scale);
2287             ri1  = ceil(fr->rvdw*scale);
2288
2289             /* The code below has some support for handling force-switching, i.e.
2290              * when the force (instead of potential) is switched over a limited
2291              * region. This leads to a constant shift in the potential inside the
2292              * switching region, which we can handle by adding a constant energy
2293              * term in the force-switch case just like when we do potential-shift.
2294              *
2295              * For now this is not enabled, but to keep the functionality in the
2296              * code we check separately for switch and shift. When we do force-switch
2297              * the shifting point is rvdw_switch, while it is the cutoff when we
2298              * have a classical potential-shift.
2299              *
2300              * For a pure potential-shift the potential has a constant shift
2301              * all the way out to the cutoff, and that is it. For other forms
2302              * we need to calculate the constant shift up to the point where we
2303              * start modifying the potential.
2304              */
2305             ri0  = (fr->vdw_modifier == eintmodPOTSHIFT) ? ri1 : ri0;
2306
2307             r0   = ri0/scale;
2308             r1   = ri1/scale;
2309             rc3  = r0*r0*r0;
2310             rc9  = rc3*rc3*rc3;
2311
2312             if ((fr->vdw_modifier == eintmodFORCESWITCH) ||
2313                 (fr->vdwtype == evdwSHIFT))
2314             {
2315                 /* Determine the constant energy shift below rvdw_switch.
2316                  * Table has a scale factor since we have scaled it down to compensate
2317                  * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2318                  */
2319                 fr->enershiftsix    = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
2320                 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
2321             }
2322             else if (fr->vdw_modifier == eintmodPOTSHIFT)
2323             {
2324                 fr->enershiftsix    = (real)(-1.0/(rc3*rc3));
2325                 fr->enershifttwelve = (real)( 1.0/(rc9*rc3));
2326             }
2327
2328             /* Add the constant part from 0 to rvdw_switch.
2329              * This integration from 0 to rvdw_switch overcounts the number
2330              * of interactions by 1, as it also counts the self interaction.
2331              * We will correct for this later.
2332              */
2333             eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
2334             eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
2335
2336             /* Calculate the contribution in the range [r0,r1] where we
2337              * modify the potential. For a pure potential-shift modifier we will
2338              * have ri0==ri1, and there will not be any contribution here.
2339              */
2340             for (i = 0; i < 2; i++)
2341             {
2342                 enersum = 0;
2343                 virsum  = 0;
2344                 integrate_table(vdwtab, scale, (i == 0 ? 0 : 4), ri0, ri1, &enersum, &virsum);
2345                 eners[i] -= enersum;
2346                 virs[i]  -= virsum;
2347             }
2348
2349             /* Alright: Above we compensated by REMOVING the parts outside r0
2350              * corresponding to the ideal VdW 1/r6 and /r12 potentials.
2351              *
2352              * Regardless of whether r0 is the point where we start switching,
2353              * or the cutoff where we calculated the constant shift, we include
2354              * all the parts we are missing out to infinity from r0 by
2355              * calculating the analytical dispersion correction.
2356              */
2357             eners[0] += -4.0*M_PI/(3.0*rc3);
2358             eners[1] +=  4.0*M_PI/(9.0*rc9);
2359             virs[0]  +=  8.0*M_PI/rc3;
2360             virs[1]  += -16.0*M_PI/(3.0*rc9);
2361         }
2362         else if (fr->vdwtype == evdwCUT ||
2363                  EVDW_PME(fr->vdwtype) ||
2364                  fr->vdwtype == evdwUSER)
2365         {
2366             if (fr->vdwtype == evdwUSER && fplog)
2367             {
2368                 fprintf(fplog,
2369                         "WARNING: using dispersion correction with user tables\n");
2370             }
2371
2372             /* Note that with LJ-PME, the dispersion correction is multiplied
2373              * by the difference between the actual C6 and the value of C6
2374              * that would produce the combination rule.
2375              * This means the normal energy and virial difference formulas
2376              * can be used here.
2377              */
2378
2379             rc3  = fr->rvdw*fr->rvdw*fr->rvdw;
2380             rc9  = rc3*rc3*rc3;
2381             /* Contribution beyond the cut-off */
2382             eners[0] += -4.0*M_PI/(3.0*rc3);
2383             eners[1] +=  4.0*M_PI/(9.0*rc9);
2384             if (fr->vdw_modifier == eintmodPOTSHIFT)
2385             {
2386                 /* Contribution within the cut-off */
2387                 eners[0] += -4.0*M_PI/(3.0*rc3);
2388                 eners[1] +=  4.0*M_PI/(3.0*rc9);
2389             }
2390             /* Contribution beyond the cut-off */
2391             virs[0]  +=  8.0*M_PI/rc3;
2392             virs[1]  += -16.0*M_PI/(3.0*rc9);
2393         }
2394         else
2395         {
2396             gmx_fatal(FARGS,
2397                       "Dispersion correction is not implemented for vdw-type = %s",
2398                       evdw_names[fr->vdwtype]);
2399         }
2400
2401         /* When we deprecate the group kernels the code below can go too */
2402         if (fr->vdwtype == evdwPME && fr->cutoff_scheme == ecutsGROUP)
2403         {
2404             /* Calculate self-interaction coefficient (assuming that
2405              * the reciprocal-space contribution is constant in the
2406              * region that contributes to the self-interaction).
2407              */
2408             fr->enershiftsix = pow(fr->ewaldcoeff_lj, 6) / 6.0;
2409
2410             eners[0] += -pow(sqrt(M_PI)*fr->ewaldcoeff_lj, 3)/3.0;
2411             virs[0]  +=  pow(sqrt(M_PI)*fr->ewaldcoeff_lj, 3);
2412         }
2413
2414         fr->enerdiffsix    = eners[0];
2415         fr->enerdifftwelve = eners[1];
2416         /* The 0.5 is due to the Gromacs definition of the virial */
2417         fr->virdiffsix     = 0.5*virs[0];
2418         fr->virdifftwelve  = 0.5*virs[1];
2419     }
2420 }
2421
2422 void calc_dispcorr(t_inputrec *ir, t_forcerec *fr,
2423                    int natoms,
2424                    matrix box, real lambda, tensor pres, tensor virial,
2425                    real *prescorr, real *enercorr, real *dvdlcorr)
2426 {
2427     gmx_bool bCorrAll, bCorrPres;
2428     real     dvdlambda, invvol, dens, ninter, avcsix, avctwelve, enerdiff, svir = 0, spres = 0;
2429     int      m;
2430
2431     *prescorr = 0;
2432     *enercorr = 0;
2433     *dvdlcorr = 0;
2434
2435     clear_mat(virial);
2436     clear_mat(pres);
2437
2438     if (ir->eDispCorr != edispcNO)
2439     {
2440         bCorrAll  = (ir->eDispCorr == edispcAllEner ||
2441                      ir->eDispCorr == edispcAllEnerPres);
2442         bCorrPres = (ir->eDispCorr == edispcEnerPres ||
2443                      ir->eDispCorr == edispcAllEnerPres);
2444
2445         invvol = 1/det(box);
2446         if (fr->n_tpi)
2447         {
2448             /* Only correct for the interactions with the inserted molecule */
2449             dens   = (natoms - fr->n_tpi)*invvol;
2450             ninter = fr->n_tpi;
2451         }
2452         else
2453         {
2454             dens   = natoms*invvol;
2455             ninter = 0.5*natoms;
2456         }
2457
2458         if (ir->efep == efepNO)
2459         {
2460             avcsix    = fr->avcsix[0];
2461             avctwelve = fr->avctwelve[0];
2462         }
2463         else
2464         {
2465             avcsix    = (1 - lambda)*fr->avcsix[0]    + lambda*fr->avcsix[1];
2466             avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
2467         }
2468
2469         enerdiff   = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
2470         *enercorr += avcsix*enerdiff;
2471         dvdlambda  = 0.0;
2472         if (ir->efep != efepNO)
2473         {
2474             dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
2475         }
2476         if (bCorrAll)
2477         {
2478             enerdiff   = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
2479             *enercorr += avctwelve*enerdiff;
2480             if (fr->efep != efepNO)
2481             {
2482                 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
2483             }
2484         }
2485
2486         if (bCorrPres)
2487         {
2488             svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
2489             if (ir->eDispCorr == edispcAllEnerPres)
2490             {
2491                 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
2492             }
2493             /* The factor 2 is because of the Gromacs virial definition */
2494             spres = -2.0*invvol*svir*PRESFAC;
2495
2496             for (m = 0; m < DIM; m++)
2497             {
2498                 virial[m][m] += svir;
2499                 pres[m][m]   += spres;
2500             }
2501             *prescorr += spres;
2502         }
2503
2504         /* Can't currently control when it prints, for now, just print when degugging */
2505         if (debug)
2506         {
2507             if (bCorrAll)
2508             {
2509                 fprintf(debug, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2510                         avcsix, avctwelve);
2511             }
2512             if (bCorrPres)
2513             {
2514                 fprintf(debug,
2515                         "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2516                         *enercorr, spres, svir);
2517             }
2518             else
2519             {
2520                 fprintf(debug, "Long Range LJ corr.: Epot %10g\n", *enercorr);
2521             }
2522         }
2523
2524         if (fr->efep != efepNO)
2525         {
2526             *dvdlcorr += dvdlambda;
2527         }
2528     }
2529 }
2530
2531 void do_pbc_first(FILE *fplog, matrix box, t_forcerec *fr,
2532                   t_graph *graph, rvec x[])
2533 {
2534     if (fplog)
2535     {
2536         fprintf(fplog, "Removing pbc first time\n");
2537     }
2538     calc_shifts(box, fr->shift_vec);
2539     if (graph)
2540     {
2541         mk_mshift(fplog, graph, fr->ePBC, box, x);
2542         if (gmx_debug_at)
2543         {
2544             p_graph(debug, "do_pbc_first 1", graph);
2545         }
2546         shift_self(graph, box, x);
2547         /* By doing an extra mk_mshift the molecules that are broken
2548          * because they were e.g. imported from another software
2549          * will be made whole again. Such are the healing powers
2550          * of GROMACS.
2551          */
2552         mk_mshift(fplog, graph, fr->ePBC, box, x);
2553         if (gmx_debug_at)
2554         {
2555             p_graph(debug, "do_pbc_first 2", graph);
2556         }
2557     }
2558     if (fplog)
2559     {
2560         fprintf(fplog, "Done rmpbc\n");
2561     }
2562 }
2563
2564 static void low_do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2565                             gmx_mtop_t *mtop, rvec x[],
2566                             gmx_bool bFirst)
2567 {
2568     t_graph        *graph;
2569     int             mb, as, mol;
2570     gmx_molblock_t *molb;
2571
2572     if (bFirst && fplog)
2573     {
2574         fprintf(fplog, "Removing pbc first time\n");
2575     }
2576
2577     snew(graph, 1);
2578     as = 0;
2579     for (mb = 0; mb < mtop->nmolblock; mb++)
2580     {
2581         molb = &mtop->molblock[mb];
2582         if (molb->natoms_mol == 1 ||
2583             (!bFirst && mtop->moltype[molb->type].cgs.nr == 1))
2584         {
2585             /* Just one atom or charge group in the molecule, no PBC required */
2586             as += molb->nmol*molb->natoms_mol;
2587         }
2588         else
2589         {
2590             /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2591             mk_graph_ilist(NULL, mtop->moltype[molb->type].ilist,
2592                            0, molb->natoms_mol, FALSE, FALSE, graph);
2593
2594             for (mol = 0; mol < molb->nmol; mol++)
2595             {
2596                 mk_mshift(fplog, graph, ePBC, box, x+as);
2597
2598                 shift_self(graph, box, x+as);
2599                 /* The molecule is whole now.
2600                  * We don't need the second mk_mshift call as in do_pbc_first,
2601                  * since we no longer need this graph.
2602                  */
2603
2604                 as += molb->natoms_mol;
2605             }
2606             done_graph(graph);
2607         }
2608     }
2609     sfree(graph);
2610 }
2611
2612 void do_pbc_first_mtop(FILE *fplog, int ePBC, matrix box,
2613                        gmx_mtop_t *mtop, rvec x[])
2614 {
2615     low_do_pbc_mtop(fplog, ePBC, box, mtop, x, TRUE);
2616 }
2617
2618 void do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2619                  gmx_mtop_t *mtop, rvec x[])
2620 {
2621     low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
2622 }
2623
2624 void finish_run(FILE *fplog, t_commrec *cr,
2625                 t_inputrec *inputrec,
2626                 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
2627                 gmx_walltime_accounting_t walltime_accounting,
2628                 nonbonded_verlet_t *nbv,
2629                 gmx_bool bWriteStat)
2630 {
2631     int     i, j;
2632     t_nrnb *nrnb_tot = NULL;
2633     real    delta_t;
2634     double  nbfs, mflop;
2635     double  elapsed_time,
2636             elapsed_time_over_all_ranks,
2637             elapsed_time_over_all_threads,
2638             elapsed_time_over_all_threads_over_all_ranks;
2639     wallcycle_sum(cr, wcycle);
2640
2641     if (cr->nnodes > 1)
2642     {
2643         snew(nrnb_tot, 1);
2644 #ifdef GMX_MPI
2645         MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
2646                       cr->mpi_comm_mysim);
2647 #endif
2648     }
2649     else
2650     {
2651         nrnb_tot = nrnb;
2652     }
2653
2654     elapsed_time                                 = walltime_accounting_get_elapsed_time(walltime_accounting);
2655     elapsed_time_over_all_ranks                  = elapsed_time;
2656     elapsed_time_over_all_threads                = walltime_accounting_get_elapsed_time_over_all_threads(walltime_accounting);
2657     elapsed_time_over_all_threads_over_all_ranks = elapsed_time_over_all_threads;
2658 #ifdef GMX_MPI
2659     if (cr->nnodes > 1)
2660     {
2661         /* reduce elapsed_time over all MPI ranks in the current simulation */
2662         MPI_Allreduce(&elapsed_time,
2663                       &elapsed_time_over_all_ranks,
2664                       1, MPI_DOUBLE, MPI_SUM,
2665                       cr->mpi_comm_mysim);
2666         elapsed_time_over_all_ranks /= cr->nnodes;
2667         /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
2668          * current simulation. */
2669         MPI_Allreduce(&elapsed_time_over_all_threads,
2670                       &elapsed_time_over_all_threads_over_all_ranks,
2671                       1, MPI_DOUBLE, MPI_SUM,
2672                       cr->mpi_comm_mysim);
2673     }
2674 #endif
2675
2676     if (SIMMASTER(cr))
2677     {
2678         print_flop(fplog, nrnb_tot, &nbfs, &mflop);
2679     }
2680     if (cr->nnodes > 1)
2681     {
2682         sfree(nrnb_tot);
2683     }
2684
2685     if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
2686     {
2687         print_dd_statistics(cr, inputrec, fplog);
2688     }
2689
2690     if (SIMMASTER(cr))
2691     {
2692         wallclock_gpu_t* gputimes = use_GPU(nbv) ?
2693             nbnxn_cuda_get_timings(nbv->cu_nbv) : NULL;
2694         wallcycle_print(fplog, cr->nnodes, cr->npmenodes,
2695                         elapsed_time_over_all_ranks,
2696                         wcycle, gputimes);
2697
2698         if (EI_DYNAMICS(inputrec->eI))
2699         {
2700             delta_t = inputrec->delta_t;
2701         }
2702         else
2703         {
2704             delta_t = 0;
2705         }
2706
2707         if (fplog)
2708         {
2709             print_perf(fplog, elapsed_time_over_all_threads_over_all_ranks,
2710                        elapsed_time_over_all_ranks,
2711                        walltime_accounting_get_nsteps_done(walltime_accounting),
2712                        delta_t, nbfs, mflop);
2713         }
2714         if (bWriteStat)
2715         {
2716             print_perf(stderr, elapsed_time_over_all_threads_over_all_ranks,
2717                        elapsed_time_over_all_ranks,
2718                        walltime_accounting_get_nsteps_done(walltime_accounting),
2719                        delta_t, nbfs, mflop);
2720         }
2721     }
2722 }
2723
2724 extern void initialize_lambdas(FILE *fplog, t_inputrec *ir, int *fep_state, real *lambda, double *lam0)
2725 {
2726     /* this function works, but could probably use a logic rewrite to keep all the different
2727        types of efep straight. */
2728
2729     int       i;
2730     t_lambda *fep = ir->fepvals;
2731
2732     if ((ir->efep == efepNO) && (ir->bSimTemp == FALSE))
2733     {
2734         for (i = 0; i < efptNR; i++)
2735         {
2736             lambda[i] = 0.0;
2737             if (lam0)
2738             {
2739                 lam0[i] = 0.0;
2740             }
2741         }
2742         return;
2743     }
2744     else
2745     {
2746         *fep_state = fep->init_fep_state; /* this might overwrite the checkpoint
2747                                              if checkpoint is set -- a kludge is in for now
2748                                              to prevent this.*/
2749         for (i = 0; i < efptNR; i++)
2750         {
2751             /* overwrite lambda state with init_lambda for now for backwards compatibility */
2752             if (fep->init_lambda >= 0) /* if it's -1, it was never initializd */
2753             {
2754                 lambda[i] = fep->init_lambda;
2755                 if (lam0)
2756                 {
2757                     lam0[i] = lambda[i];
2758                 }
2759             }
2760             else
2761             {
2762                 lambda[i] = fep->all_lambda[i][*fep_state];
2763                 if (lam0)
2764                 {
2765                     lam0[i] = lambda[i];
2766                 }
2767             }
2768         }
2769         if (ir->bSimTemp)
2770         {
2771             /* need to rescale control temperatures to match current state */
2772             for (i = 0; i < ir->opts.ngtc; i++)
2773             {
2774                 if (ir->opts.ref_t[i] > 0)
2775                 {
2776                     ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
2777                 }
2778             }
2779         }
2780     }
2781
2782     /* Send to the log the information on the current lambdas */
2783     if (fplog != NULL)
2784     {
2785         fprintf(fplog, "Initial vector of lambda components:[ ");
2786         for (i = 0; i < efptNR; i++)
2787         {
2788             fprintf(fplog, "%10.4f ", lambda[i]);
2789         }
2790         fprintf(fplog, "]\n");
2791     }
2792     return;
2793 }
2794
2795
2796 void init_md(FILE *fplog,
2797              t_commrec *cr, t_inputrec *ir, const output_env_t oenv,
2798              double *t, double *t0,
2799              real *lambda, int *fep_state, double *lam0,
2800              t_nrnb *nrnb, gmx_mtop_t *mtop,
2801              gmx_update_t *upd,
2802              int nfile, const t_filenm fnm[],
2803              gmx_mdoutf_t *outf, t_mdebin **mdebin,
2804              tensor force_vir, tensor shake_vir, rvec mu_tot,
2805              gmx_bool *bSimAnn, t_vcm **vcm, unsigned long Flags)
2806 {
2807     int  i, j, n;
2808     real tmpt, mod;
2809
2810     /* Initial values */
2811     *t = *t0       = ir->init_t;
2812
2813     *bSimAnn = FALSE;
2814     for (i = 0; i < ir->opts.ngtc; i++)
2815     {
2816         /* set bSimAnn if any group is being annealed */
2817         if (ir->opts.annealing[i] != eannNO)
2818         {
2819             *bSimAnn = TRUE;
2820         }
2821     }
2822     if (*bSimAnn)
2823     {
2824         update_annealing_target_temp(&(ir->opts), ir->init_t);
2825     }
2826
2827     /* Initialize lambda variables */
2828     initialize_lambdas(fplog, ir, fep_state, lambda, lam0);
2829
2830     if (upd)
2831     {
2832         *upd = init_update(ir);
2833     }
2834
2835
2836     if (vcm != NULL)
2837     {
2838         *vcm = init_vcm(fplog, &mtop->groups, ir);
2839     }
2840
2841     if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
2842     {
2843         if (ir->etc == etcBERENDSEN)
2844         {
2845             please_cite(fplog, "Berendsen84a");
2846         }
2847         if (ir->etc == etcVRESCALE)
2848         {
2849             please_cite(fplog, "Bussi2007a");
2850         }
2851         if (ir->eI == eiSD1)
2852         {
2853             please_cite(fplog, "Goga2012");
2854         }
2855     }
2856
2857     init_nrnb(nrnb);
2858
2859     if (nfile != -1)
2860     {
2861         *outf = init_mdoutf(fplog, nfile, fnm, Flags, cr, ir, mtop, oenv);
2862
2863         *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? NULL : mdoutf_get_fp_ene(*outf),
2864                               mtop, ir, mdoutf_get_fp_dhdl(*outf));
2865     }
2866
2867     if (ir->bAdress)
2868     {
2869         please_cite(fplog, "Fritsch12");
2870         please_cite(fplog, "Junghans10");
2871     }
2872     /* Initiate variables */
2873     clear_mat(force_vir);
2874     clear_mat(shake_vir);
2875     clear_rvec(mu_tot);
2876
2877     debug_gmx();
2878 }