4090760008135c7cce14eacd163c55c11f2108a5
[alexxy/gromacs.git] / src / gromacs / mdlib / sim_util.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #include "gmxpre.h"
38
39 #include "gromacs/legacyheaders/sim_util.h"
40
41 #include <assert.h>
42 #include <math.h>
43 #include <stdio.h>
44 #include <string.h>
45
46 #include "config.h"
47
48 #ifdef HAVE_SYS_TIME_H
49 #include <sys/time.h>
50 #endif
51
52 #include "gromacs/bonded/bonded.h"
53 #include "gromacs/essentialdynamics/edsam.h"
54 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
55 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
56 #include "gromacs/imd/imd.h"
57 #include "gromacs/legacyheaders/calcmu.h"
58 #include "gromacs/legacyheaders/chargegroup.h"
59 #include "gromacs/legacyheaders/constr.h"
60 #include "gromacs/legacyheaders/copyrite.h"
61 #include "gromacs/legacyheaders/disre.h"
62 #include "gromacs/legacyheaders/domdec.h"
63 #include "gromacs/legacyheaders/force.h"
64 #include "gromacs/legacyheaders/genborn.h"
65 #include "gromacs/legacyheaders/gmx_omp_nthreads.h"
66 #include "gromacs/legacyheaders/mdatoms.h"
67 #include "gromacs/legacyheaders/mdrun.h"
68 #include "gromacs/legacyheaders/names.h"
69 #include "gromacs/legacyheaders/network.h"
70 #include "gromacs/legacyheaders/nonbonded.h"
71 #include "gromacs/legacyheaders/nrnb.h"
72 #include "gromacs/legacyheaders/orires.h"
73 #include "gromacs/legacyheaders/pme.h"
74 #include "gromacs/legacyheaders/qmmm.h"
75 #include "gromacs/legacyheaders/txtdump.h"
76 #include "gromacs/legacyheaders/typedefs.h"
77 #include "gromacs/legacyheaders/update.h"
78 #include "gromacs/legacyheaders/types/commrec.h"
79 #include "gromacs/math/units.h"
80 #include "gromacs/math/vec.h"
81 #include "gromacs/mdlib/adress.h"
82 #include "gromacs/mdlib/nb_verlet.h"
83 #include "gromacs/mdlib/nbnxn_atomdata.h"
84 #include "gromacs/mdlib/nbnxn_search.h"
85 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda.h"
86 #include "gromacs/mdlib/nbnxn_cuda/nbnxn_cuda_data_mgmt.h"
87 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_gpu_ref.h"
88 #include "gromacs/mdlib/nbnxn_kernels/nbnxn_kernel_ref.h"
89 #include "gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn.h"
90 #include "gromacs/mdlib/nbnxn_kernels/simd_4xn/nbnxn_kernel_simd_4xn.h"
91 #include "gromacs/pbcutil/ishift.h"
92 #include "gromacs/pbcutil/mshift.h"
93 #include "gromacs/pbcutil/pbc.h"
94 #include "gromacs/pulling/pull.h"
95 #include "gromacs/pulling/pull_rotation.h"
96 #include "gromacs/timing/wallcycle.h"
97 #include "gromacs/timing/walltime_accounting.h"
98 #include "gromacs/utility/cstringutil.h"
99 #include "gromacs/utility/gmxmpi.h"
100 #include "gromacs/utility/smalloc.h"
101
102 void print_time(FILE                     *out,
103                 gmx_walltime_accounting_t walltime_accounting,
104                 gmx_int64_t               step,
105                 t_inputrec               *ir,
106                 t_commrec gmx_unused     *cr)
107 {
108     time_t finish;
109     char   timebuf[STRLEN];
110     double dt, elapsed_seconds, time_per_step;
111     char   buf[48];
112
113 #ifndef GMX_THREAD_MPI
114     if (!PAR(cr))
115 #endif
116     {
117         fprintf(out, "\r");
118     }
119     fprintf(out, "step %s", gmx_step_str(step, buf));
120     if ((step >= ir->nstlist))
121     {
122         double seconds_since_epoch = gmx_gettime();
123         elapsed_seconds = seconds_since_epoch - walltime_accounting_get_start_time_stamp(walltime_accounting);
124         time_per_step   = elapsed_seconds/(step - ir->init_step + 1);
125         dt              = (ir->nsteps + ir->init_step - step) * time_per_step;
126
127         if (ir->nsteps >= 0)
128         {
129             if (dt >= 300)
130             {
131                 finish = (time_t) (seconds_since_epoch + dt);
132                 gmx_ctime_r(&finish, timebuf, STRLEN);
133                 sprintf(buf, "%s", timebuf);
134                 buf[strlen(buf)-1] = '\0';
135                 fprintf(out, ", will finish %s", buf);
136             }
137             else
138             {
139                 fprintf(out, ", remaining wall clock time: %5d s          ", (int)dt);
140             }
141         }
142         else
143         {
144             fprintf(out, " performance: %.1f ns/day    ",
145                     ir->delta_t/1000*24*60*60/time_per_step);
146         }
147     }
148 #ifndef GMX_THREAD_MPI
149     if (PAR(cr))
150     {
151         fprintf(out, "\n");
152     }
153 #endif
154
155     fflush(out);
156 }
157
158 void print_date_and_time(FILE *fplog, int nodeid, const char *title,
159                          double the_time)
160 {
161     char   time_string[STRLEN];
162
163     if (!fplog)
164     {
165         return;
166     }
167
168     {
169         int    i;
170         char   timebuf[STRLEN];
171         time_t temp_time = (time_t) the_time;
172
173         gmx_ctime_r(&temp_time, timebuf, STRLEN);
174         for (i = 0; timebuf[i] >= ' '; i++)
175         {
176             time_string[i] = timebuf[i];
177         }
178         time_string[i] = '\0';
179     }
180
181     fprintf(fplog, "%s on rank %d %s\n", title, nodeid, time_string);
182 }
183
184 void print_start(FILE *fplog, t_commrec *cr,
185                  gmx_walltime_accounting_t walltime_accounting,
186                  const char *name)
187 {
188     char buf[STRLEN];
189
190     sprintf(buf, "Started %s", name);
191     print_date_and_time(fplog, cr->nodeid, buf,
192                         walltime_accounting_get_start_time_stamp(walltime_accounting));
193 }
194
195 static void sum_forces(int start, int end, rvec f[], rvec flr[])
196 {
197     int i;
198
199     if (gmx_debug_at)
200     {
201         pr_rvecs(debug, 0, "fsr", f+start, end-start);
202         pr_rvecs(debug, 0, "flr", flr+start, end-start);
203     }
204     for (i = start; (i < end); i++)
205     {
206         rvec_inc(f[i], flr[i]);
207     }
208 }
209
210 /*
211  * calc_f_el calculates forces due to an electric field.
212  *
213  * force is kJ mol^-1 nm^-1 = e * kJ mol^-1 nm^-1 / e
214  *
215  * Et[] contains the parameters for the time dependent
216  * part of the field (not yet used).
217  * Ex[] contains the parameters for
218  * the spatial dependent part of the field. You can have cool periodic
219  * fields in principle, but only a constant field is supported
220  * now.
221  * The function should return the energy due to the electric field
222  * (if any) but for now returns 0.
223  *
224  * WARNING:
225  * There can be problems with the virial.
226  * Since the field is not self-consistent this is unavoidable.
227  * For neutral molecules the virial is correct within this approximation.
228  * For neutral systems with many charged molecules the error is small.
229  * But for systems with a net charge or a few charged molecules
230  * the error can be significant when the field is high.
231  * Solution: implement a self-consitent electric field into PME.
232  */
233 static void calc_f_el(FILE *fp, int  start, int homenr,
234                       real charge[], rvec f[],
235                       t_cosines Ex[], t_cosines Et[], double t)
236 {
237     rvec Ext;
238     real t0;
239     int  i, m;
240
241     for (m = 0; (m < DIM); m++)
242     {
243         if (Et[m].n > 0)
244         {
245             if (Et[m].n == 3)
246             {
247                 t0     = Et[m].a[1];
248                 Ext[m] = cos(Et[m].a[0]*(t-t0))*exp(-sqr(t-t0)/(2.0*sqr(Et[m].a[2])));
249             }
250             else
251             {
252                 Ext[m] = cos(Et[m].a[0]*t);
253             }
254         }
255         else
256         {
257             Ext[m] = 1.0;
258         }
259         if (Ex[m].n > 0)
260         {
261             /* Convert the field strength from V/nm to MD-units */
262             Ext[m] *= Ex[m].a[0]*FIELDFAC;
263             for (i = start; (i < start+homenr); i++)
264             {
265                 f[i][m] += charge[i]*Ext[m];
266             }
267         }
268         else
269         {
270             Ext[m] = 0;
271         }
272     }
273     if (fp != NULL)
274     {
275         fprintf(fp, "%10g  %10g  %10g  %10g #FIELD\n", t,
276                 Ext[XX]/FIELDFAC, Ext[YY]/FIELDFAC, Ext[ZZ]/FIELDFAC);
277     }
278 }
279
280 static void calc_virial(int start, int homenr, rvec x[], rvec f[],
281                         tensor vir_part, t_graph *graph, matrix box,
282                         t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
283 {
284     int    i, j;
285     tensor virtest;
286
287     /* The short-range virial from surrounding boxes */
288     clear_mat(vir_part);
289     calc_vir(SHIFTS, fr->shift_vec, fr->fshift, vir_part, ePBC == epbcSCREW, box);
290     inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
291
292     /* Calculate partial virial, for local atoms only, based on short range.
293      * Total virial is computed in global_stat, called from do_md
294      */
295     f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
296     inc_nrnb(nrnb, eNR_VIRIAL, homenr);
297
298     /* Add position restraint contribution */
299     for (i = 0; i < DIM; i++)
300     {
301         vir_part[i][i] += fr->vir_diag_posres[i];
302     }
303
304     /* Add wall contribution */
305     for (i = 0; i < DIM; i++)
306     {
307         vir_part[i][ZZ] += fr->vir_wall_z[i];
308     }
309
310     if (debug)
311     {
312         pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
313     }
314 }
315
316 static void posres_wrapper(int flags,
317                            t_inputrec *ir,
318                            t_nrnb *nrnb,
319                            gmx_localtop_t *top,
320                            matrix box, rvec x[],
321                            gmx_enerdata_t *enerd,
322                            real *lambda,
323                            t_forcerec *fr)
324 {
325     t_pbc pbc;
326     real  v, dvdl;
327     int   i;
328
329     /* Position restraints always require full pbc */
330     set_pbc(&pbc, ir->ePBC, box);
331     dvdl = 0;
332     v    = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
333                   top->idef.iparams_posres,
334                   (const rvec*)x, fr->f_novirsum, fr->vir_diag_posres,
335                   ir->ePBC == epbcNONE ? NULL : &pbc,
336                   lambda[efptRESTRAINT], &dvdl,
337                   fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
338     enerd->term[F_POSRES] += v;
339     /* If just the force constant changes, the FEP term is linear,
340      * but if k changes, it is not.
341      */
342     enerd->dvdl_nonlin[efptRESTRAINT] += dvdl;
343     inc_nrnb(nrnb, eNR_POSRES, top->idef.il[F_POSRES].nr/2);
344
345     if ((ir->fepvals->n_lambda > 0) && (flags & GMX_FORCE_DHDL))
346     {
347         for (i = 0; i < enerd->n_lambda; i++)
348         {
349             real dvdl_dum, lambda_dum;
350
351             lambda_dum = (i == 0 ? lambda[efptRESTRAINT] : ir->fepvals->all_lambda[efptRESTRAINT][i-1]);
352             v          = posres(top->idef.il[F_POSRES].nr, top->idef.il[F_POSRES].iatoms,
353                                 top->idef.iparams_posres,
354                                 (const rvec*)x, NULL, NULL,
355                                 ir->ePBC == epbcNONE ? NULL : &pbc, lambda_dum, &dvdl,
356                                 fr->rc_scaling, fr->ePBC, fr->posres_com, fr->posres_comB);
357             enerd->enerpart_lambda[i] += v;
358         }
359     }
360 }
361
362 static void fbposres_wrapper(t_inputrec *ir,
363                              t_nrnb *nrnb,
364                              gmx_localtop_t *top,
365                              matrix box, rvec x[],
366                              gmx_enerdata_t *enerd,
367                              t_forcerec *fr)
368 {
369     t_pbc pbc;
370     real  v;
371
372     /* Flat-bottomed position restraints always require full pbc */
373     set_pbc(&pbc, ir->ePBC, box);
374     v = fbposres(top->idef.il[F_FBPOSRES].nr, top->idef.il[F_FBPOSRES].iatoms,
375                  top->idef.iparams_fbposres,
376                  (const rvec*)x, fr->f_novirsum, fr->vir_diag_posres,
377                  ir->ePBC == epbcNONE ? NULL : &pbc,
378                  fr->rc_scaling, fr->ePBC, fr->posres_com);
379     enerd->term[F_FBPOSRES] += v;
380     inc_nrnb(nrnb, eNR_FBPOSRES, top->idef.il[F_FBPOSRES].nr/2);
381 }
382
383 static void pull_potential_wrapper(t_commrec *cr,
384                                    t_inputrec *ir,
385                                    matrix box, rvec x[],
386                                    rvec f[],
387                                    tensor vir_force,
388                                    t_mdatoms *mdatoms,
389                                    gmx_enerdata_t *enerd,
390                                    real *lambda,
391                                    double t,
392                                    gmx_wallcycle_t wcycle)
393 {
394     t_pbc  pbc;
395     real   dvdl;
396
397     /* Calculate the center of mass forces, this requires communication,
398      * which is why pull_potential is called close to other communication.
399      * The virial contribution is calculated directly,
400      * which is why we call pull_potential after calc_virial.
401      */
402     wallcycle_start(wcycle, ewcPULLPOT);
403     set_pbc(&pbc, ir->ePBC, box);
404     dvdl                     = 0;
405     enerd->term[F_COM_PULL] +=
406         pull_potential(ir->ePull, ir->pull, mdatoms, &pbc,
407                        cr, t, lambda[efptRESTRAINT], x, f, vir_force, &dvdl);
408     enerd->dvdl_lin[efptRESTRAINT] += dvdl;
409     wallcycle_stop(wcycle, ewcPULLPOT);
410 }
411
412 static void pme_receive_force_ener(t_commrec      *cr,
413                                    gmx_wallcycle_t wcycle,
414                                    gmx_enerdata_t *enerd,
415                                    t_forcerec     *fr)
416 {
417     real   e_q, e_lj, v, dvdl_q, dvdl_lj;
418     float  cycles_ppdpme, cycles_seppme;
419
420     cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
421     dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
422
423     /* In case of node-splitting, the PP nodes receive the long-range
424      * forces, virial and energy from the PME nodes here.
425      */
426     wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
427     dvdl_q  = 0;
428     dvdl_lj = 0;
429     gmx_pme_receive_f(cr, fr->f_novirsum, fr->vir_el_recip, &e_q,
430                       fr->vir_lj_recip, &e_lj, &dvdl_q, &dvdl_lj,
431                       &cycles_seppme);
432     enerd->term[F_COUL_RECIP] += e_q;
433     enerd->term[F_LJ_RECIP]   += e_lj;
434     enerd->dvdl_lin[efptCOUL] += dvdl_q;
435     enerd->dvdl_lin[efptVDW]  += dvdl_lj;
436
437     if (wcycle)
438     {
439         dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
440     }
441     wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
442 }
443
444 static void print_large_forces(FILE *fp, t_mdatoms *md, t_commrec *cr,
445                                gmx_int64_t step, real pforce, rvec *x, rvec *f)
446 {
447     int  i;
448     real pf2, fn2;
449     char buf[STEPSTRSIZE];
450
451     pf2 = sqr(pforce);
452     for (i = 0; i < md->homenr; i++)
453     {
454         fn2 = norm2(f[i]);
455         /* We also catch NAN, if the compiler does not optimize this away. */
456         if (fn2 >= pf2 || fn2 != fn2)
457         {
458             fprintf(fp, "step %s  atom %6d  x %8.3f %8.3f %8.3f  force %12.5e\n",
459                     gmx_step_str(step, buf),
460                     ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], sqrt(fn2));
461         }
462     }
463 }
464
465 static void post_process_forces(t_commrec *cr,
466                                 gmx_int64_t step,
467                                 t_nrnb *nrnb, gmx_wallcycle_t wcycle,
468                                 gmx_localtop_t *top,
469                                 matrix box, rvec x[],
470                                 rvec f[],
471                                 tensor vir_force,
472                                 t_mdatoms *mdatoms,
473                                 t_graph *graph,
474                                 t_forcerec *fr, gmx_vsite_t *vsite,
475                                 int flags)
476 {
477     if (fr->bF_NoVirSum)
478     {
479         if (vsite)
480         {
481             /* Spread the mesh force on virtual sites to the other particles...
482              * This is parallellized. MPI communication is performed
483              * if the constructing atoms aren't local.
484              */
485             wallcycle_start(wcycle, ewcVSITESPREAD);
486             spread_vsite_f(vsite, x, fr->f_novirsum, NULL,
487                            (flags & GMX_FORCE_VIRIAL), fr->vir_el_recip,
488                            nrnb,
489                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
490             wallcycle_stop(wcycle, ewcVSITESPREAD);
491         }
492         if (flags & GMX_FORCE_VIRIAL)
493         {
494             /* Now add the forces, this is local */
495             if (fr->bDomDec)
496             {
497                 sum_forces(0, fr->f_novirsum_n, f, fr->f_novirsum);
498             }
499             else
500             {
501                 sum_forces(0, mdatoms->homenr,
502                            f, fr->f_novirsum);
503             }
504             if (EEL_FULL(fr->eeltype))
505             {
506                 /* Add the mesh contribution to the virial */
507                 m_add(vir_force, fr->vir_el_recip, vir_force);
508             }
509             if (EVDW_PME(fr->vdwtype))
510             {
511                 /* Add the mesh contribution to the virial */
512                 m_add(vir_force, fr->vir_lj_recip, vir_force);
513             }
514             if (debug)
515             {
516                 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
517             }
518         }
519     }
520
521     if (fr->print_force >= 0)
522     {
523         print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
524     }
525 }
526
527 static void do_nb_verlet(t_forcerec *fr,
528                          interaction_const_t *ic,
529                          gmx_enerdata_t *enerd,
530                          int flags, int ilocality,
531                          int clearF,
532                          t_nrnb *nrnb,
533                          gmx_wallcycle_t wcycle)
534 {
535     int                        nnbl, kernel_type, enr_nbnxn_kernel_ljc, enr_nbnxn_kernel_lj;
536     char                      *env;
537     nonbonded_verlet_group_t  *nbvg;
538     gmx_bool                   bCUDA;
539
540     if (!(flags & GMX_FORCE_NONBONDED))
541     {
542         /* skip non-bonded calculation */
543         return;
544     }
545
546     nbvg = &fr->nbv->grp[ilocality];
547
548     /* CUDA kernel launch overhead is already timed separately */
549     if (fr->cutoff_scheme != ecutsVERLET)
550     {
551         gmx_incons("Invalid cut-off scheme passed!");
552     }
553
554     bCUDA = (nbvg->kernel_type == nbnxnk8x8x8_CUDA);
555
556     if (!bCUDA)
557     {
558         wallcycle_sub_start(wcycle, ewcsNONBONDED);
559     }
560     switch (nbvg->kernel_type)
561     {
562         case nbnxnk4x4_PlainC:
563             nbnxn_kernel_ref(&nbvg->nbl_lists,
564                              nbvg->nbat, ic,
565                              fr->shift_vec,
566                              flags,
567                              clearF,
568                              fr->fshift[0],
569                              enerd->grpp.ener[egCOULSR],
570                              fr->bBHAM ?
571                              enerd->grpp.ener[egBHAMSR] :
572                              enerd->grpp.ener[egLJSR]);
573             break;
574
575         case nbnxnk4xN_SIMD_4xN:
576             nbnxn_kernel_simd_4xn(&nbvg->nbl_lists,
577                                   nbvg->nbat, ic,
578                                   nbvg->ewald_excl,
579                                   fr->shift_vec,
580                                   flags,
581                                   clearF,
582                                   fr->fshift[0],
583                                   enerd->grpp.ener[egCOULSR],
584                                   fr->bBHAM ?
585                                   enerd->grpp.ener[egBHAMSR] :
586                                   enerd->grpp.ener[egLJSR]);
587             break;
588         case nbnxnk4xN_SIMD_2xNN:
589             nbnxn_kernel_simd_2xnn(&nbvg->nbl_lists,
590                                    nbvg->nbat, ic,
591                                    nbvg->ewald_excl,
592                                    fr->shift_vec,
593                                    flags,
594                                    clearF,
595                                    fr->fshift[0],
596                                    enerd->grpp.ener[egCOULSR],
597                                    fr->bBHAM ?
598                                    enerd->grpp.ener[egBHAMSR] :
599                                    enerd->grpp.ener[egLJSR]);
600             break;
601
602         case nbnxnk8x8x8_CUDA:
603             nbnxn_cuda_launch_kernel(fr->nbv->cu_nbv, nbvg->nbat, flags, ilocality);
604             break;
605
606         case nbnxnk8x8x8_PlainC:
607             nbnxn_kernel_gpu_ref(nbvg->nbl_lists.nbl[0],
608                                  nbvg->nbat, ic,
609                                  fr->shift_vec,
610                                  flags,
611                                  clearF,
612                                  nbvg->nbat->out[0].f,
613                                  fr->fshift[0],
614                                  enerd->grpp.ener[egCOULSR],
615                                  fr->bBHAM ?
616                                  enerd->grpp.ener[egBHAMSR] :
617                                  enerd->grpp.ener[egLJSR]);
618             break;
619
620         default:
621             gmx_incons("Invalid nonbonded kernel type passed!");
622
623     }
624     if (!bCUDA)
625     {
626         wallcycle_sub_stop(wcycle, ewcsNONBONDED);
627     }
628
629     if (EEL_RF(ic->eeltype) || ic->eeltype == eelCUT)
630     {
631         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_RF;
632     }
633     else if ((!bCUDA && nbvg->ewald_excl == ewaldexclAnalytical) ||
634              (bCUDA && nbnxn_cuda_is_kernel_ewald_analytical(fr->nbv->cu_nbv)))
635     {
636         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_EWALD;
637     }
638     else
639     {
640         enr_nbnxn_kernel_ljc = eNR_NBNXN_LJ_TAB;
641     }
642     enr_nbnxn_kernel_lj = eNR_NBNXN_LJ;
643     if (flags & GMX_FORCE_ENERGY)
644     {
645         /* In eNR_??? the nbnxn F+E kernels are always the F kernel + 1 */
646         enr_nbnxn_kernel_ljc += 1;
647         enr_nbnxn_kernel_lj  += 1;
648     }
649
650     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc,
651              nbvg->nbl_lists.natpair_ljq);
652     inc_nrnb(nrnb, enr_nbnxn_kernel_lj,
653              nbvg->nbl_lists.natpair_lj);
654     /* The Coulomb-only kernels are offset -eNR_NBNXN_LJ_RF+eNR_NBNXN_RF */
655     inc_nrnb(nrnb, enr_nbnxn_kernel_ljc-eNR_NBNXN_LJ_RF+eNR_NBNXN_RF,
656              nbvg->nbl_lists.natpair_q);
657
658     if (ic->vdw_modifier == eintmodFORCESWITCH)
659     {
660         /* We add up the switch cost separately */
661         inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_FSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
662                  nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
663     }
664     if (ic->vdw_modifier == eintmodPOTSWITCH)
665     {
666         /* We add up the switch cost separately */
667         inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_PSW+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
668                  nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
669     }
670     if (ic->vdwtype == evdwPME)
671     {
672         /* We add up the LJ Ewald cost separately */
673         inc_nrnb(nrnb, eNR_NBNXN_ADD_LJ_EWALD+((flags & GMX_FORCE_ENERGY) ? 1 : 0),
674                  nbvg->nbl_lists.natpair_ljq + nbvg->nbl_lists.natpair_lj);
675     }
676 }
677
678 static void do_nb_verlet_fep(nbnxn_pairlist_set_t *nbl_lists,
679                              t_forcerec           *fr,
680                              rvec                  x[],
681                              rvec                  f[],
682                              t_mdatoms            *mdatoms,
683                              t_lambda             *fepvals,
684                              real                 *lambda,
685                              gmx_enerdata_t       *enerd,
686                              int                   flags,
687                              t_nrnb               *nrnb,
688                              gmx_wallcycle_t       wcycle)
689 {
690     int              donb_flags;
691     nb_kernel_data_t kernel_data;
692     real             lam_i[efptNR];
693     real             dvdl_nb[efptNR];
694     int              th;
695     int              i, j;
696
697     donb_flags = 0;
698     /* Add short-range interactions */
699     donb_flags |= GMX_NONBONDED_DO_SR;
700
701     /* Currently all group scheme kernels always calculate (shift-)forces */
702     if (flags & GMX_FORCE_FORCES)
703     {
704         donb_flags |= GMX_NONBONDED_DO_FORCE;
705     }
706     if (flags & GMX_FORCE_VIRIAL)
707     {
708         donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
709     }
710     if (flags & GMX_FORCE_ENERGY)
711     {
712         donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
713     }
714     if (flags & GMX_FORCE_DO_LR)
715     {
716         donb_flags |= GMX_NONBONDED_DO_LR;
717     }
718
719     kernel_data.flags  = donb_flags;
720     kernel_data.lambda = lambda;
721     kernel_data.dvdl   = dvdl_nb;
722
723     kernel_data.energygrp_elec = enerd->grpp.ener[egCOULSR];
724     kernel_data.energygrp_vdw  = enerd->grpp.ener[egLJSR];
725
726     /* reset free energy components */
727     for (i = 0; i < efptNR; i++)
728     {
729         dvdl_nb[i]  = 0;
730     }
731
732     assert(gmx_omp_nthreads_get(emntNonbonded) == nbl_lists->nnbl);
733
734     wallcycle_sub_start(wcycle, ewcsNONBONDED);
735 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
736     for (th = 0; th < nbl_lists->nnbl; th++)
737     {
738         gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
739                                   x, f, fr, mdatoms, &kernel_data, nrnb);
740     }
741
742     if (fepvals->sc_alpha != 0)
743     {
744         enerd->dvdl_nonlin[efptVDW]  += dvdl_nb[efptVDW];
745         enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
746     }
747     else
748     {
749         enerd->dvdl_lin[efptVDW]  += dvdl_nb[efptVDW];
750         enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
751     }
752
753     /* If we do foreign lambda and we have soft-core interactions
754      * we have to recalculate the (non-linear) energies contributions.
755      */
756     if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
757     {
758         kernel_data.flags          = (donb_flags & ~(GMX_NONBONDED_DO_FORCE | GMX_NONBONDED_DO_SHIFTFORCE)) | GMX_NONBONDED_DO_FOREIGNLAMBDA;
759         kernel_data.lambda         = lam_i;
760         kernel_data.energygrp_elec = enerd->foreign_grpp.ener[egCOULSR];
761         kernel_data.energygrp_vdw  = enerd->foreign_grpp.ener[egLJSR];
762         /* Note that we add to kernel_data.dvdl, but ignore the result */
763
764         for (i = 0; i < enerd->n_lambda; i++)
765         {
766             for (j = 0; j < efptNR; j++)
767             {
768                 lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
769             }
770             reset_foreign_enerdata(enerd);
771 #pragma omp parallel for schedule(static) num_threads(nbl_lists->nnbl)
772             for (th = 0; th < nbl_lists->nnbl; th++)
773             {
774                 gmx_nb_free_energy_kernel(nbl_lists->nbl_fep[th],
775                                           x, f, fr, mdatoms, &kernel_data, nrnb);
776             }
777
778             sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
779             enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
780         }
781     }
782
783     wallcycle_sub_stop(wcycle, ewcsNONBONDED);
784 }
785
786 gmx_bool use_GPU(const nonbonded_verlet_t *nbv)
787 {
788     return nbv != NULL && nbv->bUseGPU;
789 }
790
791 void do_force_cutsVERLET(FILE *fplog, t_commrec *cr,
792                          t_inputrec *inputrec,
793                          gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
794                          gmx_localtop_t *top,
795                          gmx_groups_t gmx_unused *groups,
796                          matrix box, rvec x[], history_t *hist,
797                          rvec f[],
798                          tensor vir_force,
799                          t_mdatoms *mdatoms,
800                          gmx_enerdata_t *enerd, t_fcdata *fcd,
801                          real *lambda, t_graph *graph,
802                          t_forcerec *fr, interaction_const_t *ic,
803                          gmx_vsite_t *vsite, rvec mu_tot,
804                          double t, FILE *field, gmx_edsam_t ed,
805                          gmx_bool bBornRadii,
806                          int flags)
807 {
808     int                 cg0, cg1, i, j;
809     int                 start, homenr;
810     int                 nb_kernel_type;
811     double              mu[2*DIM];
812     gmx_bool            bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
813     gmx_bool            bDoLongRange, bDoForces, bSepLRF, bUseGPU, bUseOrEmulGPU;
814     gmx_bool            bDiffKernels = FALSE;
815     matrix              boxs;
816     rvec                vzero, box_diag;
817     real                e, v, dvdl;
818     float               cycles_pme, cycles_force, cycles_wait_gpu;
819     nonbonded_verlet_t *nbv;
820
821     cycles_force    = 0;
822     cycles_wait_gpu = 0;
823     nbv             = fr->nbv;
824     nb_kernel_type  = fr->nbv->grp[0].kernel_type;
825
826     start  = 0;
827     homenr = mdatoms->homenr;
828
829     clear_mat(vir_force);
830
831     cg0 = 0;
832     if (DOMAINDECOMP(cr))
833     {
834         cg1 = cr->dd->ncg_tot;
835     }
836     else
837     {
838         cg1 = top->cgs.nr;
839     }
840     if (fr->n_tpi > 0)
841     {
842         cg1--;
843     }
844
845     bStateChanged = (flags & GMX_FORCE_STATECHANGED);
846     bNS           = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
847     bFillGrid     = (bNS && bStateChanged);
848     bCalcCGCM     = (bFillGrid && !DOMAINDECOMP(cr));
849     bDoLongRange  = (fr->bTwinRange && bNS && (flags & GMX_FORCE_DO_LR));
850     bDoForces     = (flags & GMX_FORCE_FORCES);
851     bSepLRF       = (bDoLongRange && bDoForces && (flags & GMX_FORCE_SEPLRF));
852     bUseGPU       = fr->nbv->bUseGPU;
853     bUseOrEmulGPU = bUseGPU || (nbv->grp[0].kernel_type == nbnxnk8x8x8_PlainC);
854
855     if (bStateChanged)
856     {
857         update_forcerec(fr, box);
858
859         if (NEED_MUTOT(*inputrec))
860         {
861             /* Calculate total (local) dipole moment in a temporary common array.
862              * This makes it possible to sum them over nodes faster.
863              */
864             calc_mu(start, homenr,
865                     x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
866                     mu, mu+DIM);
867         }
868     }
869
870     if (fr->ePBC != epbcNONE)
871     {
872         /* Compute shift vectors every step,
873          * because of pressure coupling or box deformation!
874          */
875         if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
876         {
877             calc_shifts(box, fr->shift_vec);
878         }
879
880         if (bCalcCGCM)
881         {
882             put_atoms_in_box_omp(fr->ePBC, box, homenr, x);
883             inc_nrnb(nrnb, eNR_SHIFTX, homenr);
884         }
885         else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
886         {
887             unshift_self(graph, box, x);
888         }
889     }
890
891     nbnxn_atomdata_copy_shiftvec(flags & GMX_FORCE_DYNAMICBOX,
892                                  fr->shift_vec, nbv->grp[0].nbat);
893
894 #ifdef GMX_MPI
895     if (!(cr->duty & DUTY_PME))
896     {
897         /* Send particle coordinates to the pme nodes.
898          * Since this is only implemented for domain decomposition
899          * and domain decomposition does not use the graph,
900          * we do not need to worry about shifting.
901          */
902
903         int pme_flags = 0;
904
905         wallcycle_start(wcycle, ewcPP_PMESENDX);
906
907         bBS = (inputrec->nwall == 2);
908         if (bBS)
909         {
910             copy_mat(box, boxs);
911             svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
912         }
913
914         if (EEL_PME(fr->eeltype))
915         {
916             pme_flags |= GMX_PME_DO_COULOMB;
917         }
918
919         if (EVDW_PME(fr->vdwtype))
920         {
921             pme_flags |= GMX_PME_DO_LJ;
922         }
923
924         gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
925                                  mdatoms->nChargePerturbed, mdatoms->nTypePerturbed, lambda[efptCOUL], lambda[efptVDW],
926                                  (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
927                                  pme_flags, step);
928
929         wallcycle_stop(wcycle, ewcPP_PMESENDX);
930     }
931 #endif /* GMX_MPI */
932
933     /* do gridding for pair search */
934     if (bNS)
935     {
936         if (graph && bStateChanged)
937         {
938             /* Calculate intramolecular shift vectors to make molecules whole */
939             mk_mshift(fplog, graph, fr->ePBC, box, x);
940         }
941
942         clear_rvec(vzero);
943         box_diag[XX] = box[XX][XX];
944         box_diag[YY] = box[YY][YY];
945         box_diag[ZZ] = box[ZZ][ZZ];
946
947         wallcycle_start(wcycle, ewcNS);
948         if (!fr->bDomDec)
949         {
950             wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
951             nbnxn_put_on_grid(nbv->nbs, fr->ePBC, box,
952                               0, vzero, box_diag,
953                               0, mdatoms->homenr, -1, fr->cginfo, x,
954                               0, NULL,
955                               nbv->grp[eintLocal].kernel_type,
956                               nbv->grp[eintLocal].nbat);
957             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
958         }
959         else
960         {
961             wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
962             nbnxn_put_on_grid_nonlocal(nbv->nbs, domdec_zones(cr->dd),
963                                        fr->cginfo, x,
964                                        nbv->grp[eintNonlocal].kernel_type,
965                                        nbv->grp[eintNonlocal].nbat);
966             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
967         }
968
969         if (nbv->ngrp == 1 ||
970             nbv->grp[eintNonlocal].nbat == nbv->grp[eintLocal].nbat)
971         {
972             nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatAll,
973                                nbv->nbs, mdatoms, fr->cginfo);
974         }
975         else
976         {
977             nbnxn_atomdata_set(nbv->grp[eintLocal].nbat, eatLocal,
978                                nbv->nbs, mdatoms, fr->cginfo);
979             nbnxn_atomdata_set(nbv->grp[eintNonlocal].nbat, eatAll,
980                                nbv->nbs, mdatoms, fr->cginfo);
981         }
982         wallcycle_stop(wcycle, ewcNS);
983     }
984
985     /* initialize the GPU atom data and copy shift vector */
986     if (bUseGPU)
987     {
988         if (bNS)
989         {
990             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
991             nbnxn_cuda_init_atomdata(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
992             wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
993         }
994
995         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
996         nbnxn_cuda_upload_shiftvec(nbv->cu_nbv, nbv->grp[eintLocal].nbat);
997         wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
998     }
999
1000     /* do local pair search */
1001     if (bNS)
1002     {
1003         wallcycle_start_nocount(wcycle, ewcNS);
1004         wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
1005         nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintLocal].nbat,
1006                             &top->excls,
1007                             ic->rlist,
1008                             nbv->min_ci_balanced,
1009                             &nbv->grp[eintLocal].nbl_lists,
1010                             eintLocal,
1011                             nbv->grp[eintLocal].kernel_type,
1012                             nrnb);
1013         wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
1014
1015         if (bUseGPU)
1016         {
1017             /* initialize local pair-list on the GPU */
1018             nbnxn_cuda_init_pairlist(nbv->cu_nbv,
1019                                      nbv->grp[eintLocal].nbl_lists.nbl[0],
1020                                      eintLocal);
1021         }
1022         wallcycle_stop(wcycle, ewcNS);
1023     }
1024     else
1025     {
1026         wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1027         wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1028         nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, FALSE, x,
1029                                         nbv->grp[eintLocal].nbat);
1030         wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1031         wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1032     }
1033
1034     if (bUseGPU)
1035     {
1036         wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1037         /* launch local nonbonded F on GPU */
1038         do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFNo,
1039                      nrnb, wcycle);
1040         wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1041     }
1042
1043     /* Communicate coordinates and sum dipole if necessary +
1044        do non-local pair search */
1045     if (DOMAINDECOMP(cr))
1046     {
1047         bDiffKernels = (nbv->grp[eintNonlocal].kernel_type !=
1048                         nbv->grp[eintLocal].kernel_type);
1049
1050         if (bDiffKernels)
1051         {
1052             /* With GPU+CPU non-bonded calculations we need to copy
1053              * the local coordinates to the non-local nbat struct
1054              * (in CPU format) as the non-local kernel call also
1055              * calculates the local - non-local interactions.
1056              */
1057             wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1058             wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1059             nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatLocal, TRUE, x,
1060                                             nbv->grp[eintNonlocal].nbat);
1061             wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1062             wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1063         }
1064
1065         if (bNS)
1066         {
1067             wallcycle_start_nocount(wcycle, ewcNS);
1068             wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1069
1070             if (bDiffKernels)
1071             {
1072                 nbnxn_grid_add_simple(nbv->nbs, nbv->grp[eintNonlocal].nbat);
1073             }
1074
1075             nbnxn_make_pairlist(nbv->nbs, nbv->grp[eintNonlocal].nbat,
1076                                 &top->excls,
1077                                 ic->rlist,
1078                                 nbv->min_ci_balanced,
1079                                 &nbv->grp[eintNonlocal].nbl_lists,
1080                                 eintNonlocal,
1081                                 nbv->grp[eintNonlocal].kernel_type,
1082                                 nrnb);
1083
1084             wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1085
1086             if (nbv->grp[eintNonlocal].kernel_type == nbnxnk8x8x8_CUDA)
1087             {
1088                 /* initialize non-local pair-list on the GPU */
1089                 nbnxn_cuda_init_pairlist(nbv->cu_nbv,
1090                                          nbv->grp[eintNonlocal].nbl_lists.nbl[0],
1091                                          eintNonlocal);
1092             }
1093             wallcycle_stop(wcycle, ewcNS);
1094         }
1095         else
1096         {
1097             wallcycle_start(wcycle, ewcMOVEX);
1098             dd_move_x(cr->dd, box, x);
1099
1100             /* When we don't need the total dipole we sum it in global_stat */
1101             if (bStateChanged && NEED_MUTOT(*inputrec))
1102             {
1103                 gmx_sumd(2*DIM, mu, cr);
1104             }
1105             wallcycle_stop(wcycle, ewcMOVEX);
1106
1107             wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1108             wallcycle_sub_start(wcycle, ewcsNB_X_BUF_OPS);
1109             nbnxn_atomdata_copy_x_to_nbat_x(nbv->nbs, eatNonlocal, FALSE, x,
1110                                             nbv->grp[eintNonlocal].nbat);
1111             wallcycle_sub_stop(wcycle, ewcsNB_X_BUF_OPS);
1112             cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1113         }
1114
1115         if (bUseGPU && !bDiffKernels)
1116         {
1117             wallcycle_start(wcycle, ewcLAUNCH_GPU_NB);
1118             /* launch non-local nonbonded F on GPU */
1119             do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFNo,
1120                          nrnb, wcycle);
1121             cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1122         }
1123     }
1124
1125     if (bUseGPU)
1126     {
1127         /* launch D2H copy-back F */
1128         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1129         if (DOMAINDECOMP(cr) && !bDiffKernels)
1130         {
1131             nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintNonlocal].nbat,
1132                                       flags, eatNonlocal);
1133         }
1134         nbnxn_cuda_launch_cpyback(nbv->cu_nbv, nbv->grp[eintLocal].nbat,
1135                                   flags, eatLocal);
1136         cycles_force += wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1137     }
1138
1139     if (bStateChanged && NEED_MUTOT(*inputrec))
1140     {
1141         if (PAR(cr))
1142         {
1143             gmx_sumd(2*DIM, mu, cr);
1144         }
1145
1146         for (i = 0; i < 2; i++)
1147         {
1148             for (j = 0; j < DIM; j++)
1149             {
1150                 fr->mu_tot[i][j] = mu[i*DIM + j];
1151             }
1152         }
1153     }
1154     if (fr->efep == efepNO)
1155     {
1156         copy_rvec(fr->mu_tot[0], mu_tot);
1157     }
1158     else
1159     {
1160         for (j = 0; j < DIM; j++)
1161         {
1162             mu_tot[j] =
1163                 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1164                 lambda[efptCOUL]*fr->mu_tot[1][j];
1165         }
1166     }
1167
1168     /* Reset energies */
1169     reset_enerdata(fr, bNS, enerd, MASTER(cr));
1170     clear_rvecs(SHIFTS, fr->fshift);
1171
1172     if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1173     {
1174         wallcycle_start(wcycle, ewcPPDURINGPME);
1175         dd_force_flop_start(cr->dd, nrnb);
1176     }
1177
1178     if (inputrec->bRot)
1179     {
1180         /* Enforced rotation has its own cycle counter that starts after the collective
1181          * coordinates have been communicated. It is added to ddCyclF to allow
1182          * for proper load-balancing */
1183         wallcycle_start(wcycle, ewcROT);
1184         do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1185         wallcycle_stop(wcycle, ewcROT);
1186     }
1187
1188     /* Start the force cycle counter.
1189      * This counter is stopped in do_forcelow_level.
1190      * No parallel communication should occur while this counter is running,
1191      * since that will interfere with the dynamic load balancing.
1192      */
1193     wallcycle_start(wcycle, ewcFORCE);
1194     if (bDoForces)
1195     {
1196         /* Reset forces for which the virial is calculated separately:
1197          * PME/Ewald forces if necessary */
1198         if (fr->bF_NoVirSum)
1199         {
1200             if (flags & GMX_FORCE_VIRIAL)
1201             {
1202                 fr->f_novirsum = fr->f_novirsum_alloc;
1203                 if (fr->bDomDec)
1204                 {
1205                     clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1206                 }
1207                 else
1208                 {
1209                     clear_rvecs(homenr, fr->f_novirsum+start);
1210                 }
1211             }
1212             else
1213             {
1214                 /* We are not calculating the pressure so we do not need
1215                  * a separate array for forces that do not contribute
1216                  * to the pressure.
1217                  */
1218                 fr->f_novirsum = f;
1219             }
1220         }
1221
1222         /* Clear the short- and long-range forces */
1223         clear_rvecs(fr->natoms_force_constr, f);
1224         if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1225         {
1226             clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1227         }
1228
1229         clear_rvec(fr->vir_diag_posres);
1230     }
1231
1232     if (inputrec->ePull == epullCONSTRAINT)
1233     {
1234         clear_pull_forces(inputrec->pull);
1235     }
1236
1237     /* We calculate the non-bonded forces, when done on the CPU, here.
1238      * We do this before calling do_force_lowlevel, as in there bondeds
1239      * forces are calculated before PME, which does communication.
1240      * With this order, non-bonded and bonded force calculation imbalance
1241      * can be balanced out by the domain decomposition load balancing.
1242      */
1243
1244     if (!bUseOrEmulGPU)
1245     {
1246         /* Maybe we should move this into do_force_lowlevel */
1247         do_nb_verlet(fr, ic, enerd, flags, eintLocal, enbvClearFYes,
1248                      nrnb, wcycle);
1249     }
1250
1251     if (fr->efep != efepNO)
1252     {
1253         /* Calculate the local and non-local free energy interactions here.
1254          * Happens here on the CPU both with and without GPU.
1255          */
1256         if (fr->nbv->grp[eintLocal].nbl_lists.nbl_fep[0]->nrj > 0)
1257         {
1258             do_nb_verlet_fep(&fr->nbv->grp[eintLocal].nbl_lists,
1259                              fr, x, f, mdatoms,
1260                              inputrec->fepvals, lambda,
1261                              enerd, flags, nrnb, wcycle);
1262         }
1263
1264         if (DOMAINDECOMP(cr) &&
1265             fr->nbv->grp[eintNonlocal].nbl_lists.nbl_fep[0]->nrj > 0)
1266         {
1267             do_nb_verlet_fep(&fr->nbv->grp[eintNonlocal].nbl_lists,
1268                              fr, x, f, mdatoms,
1269                              inputrec->fepvals, lambda,
1270                              enerd, flags, nrnb, wcycle);
1271         }
1272     }
1273
1274     if (!bUseOrEmulGPU || bDiffKernels)
1275     {
1276         int aloc;
1277
1278         if (DOMAINDECOMP(cr))
1279         {
1280             do_nb_verlet(fr, ic, enerd, flags, eintNonlocal,
1281                          bDiffKernels ? enbvClearFYes : enbvClearFNo,
1282                          nrnb, wcycle);
1283         }
1284
1285         if (!bUseOrEmulGPU)
1286         {
1287             aloc = eintLocal;
1288         }
1289         else
1290         {
1291             aloc = eintNonlocal;
1292         }
1293
1294         /* Add all the non-bonded force to the normal force array.
1295          * This can be split into a local a non-local part when overlapping
1296          * communication with calculation with domain decomposition.
1297          */
1298         cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1299         wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1300         wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1301         nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatAll, nbv->grp[aloc].nbat, f);
1302         wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1303         cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1304         wallcycle_start_nocount(wcycle, ewcFORCE);
1305
1306         /* if there are multiple fshift output buffers reduce them */
1307         if ((flags & GMX_FORCE_VIRIAL) &&
1308             nbv->grp[aloc].nbl_lists.nnbl > 1)
1309         {
1310             nbnxn_atomdata_add_nbat_fshift_to_fshift(nbv->grp[aloc].nbat,
1311                                                      fr->fshift);
1312         }
1313     }
1314
1315     /* update QMMMrec, if necessary */
1316     if (fr->bQMMM)
1317     {
1318         update_QMMMrec(cr, fr, x, mdatoms, box, top);
1319     }
1320
1321     if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1322     {
1323         posres_wrapper(flags, inputrec, nrnb, top, box, x,
1324                        enerd, lambda, fr);
1325     }
1326
1327     if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_FBPOSRES].nr > 0)
1328     {
1329         fbposres_wrapper(inputrec, nrnb, top, box, x, enerd, fr);
1330     }
1331
1332     /* Compute the bonded and non-bonded energies and optionally forces */
1333     do_force_lowlevel(fr, inputrec, &(top->idef),
1334                       cr, nrnb, wcycle, mdatoms,
1335                       x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, top, fr->born,
1336                       bBornRadii, box,
1337                       inputrec->fepvals, lambda, graph, &(top->excls), fr->mu_tot,
1338                       flags, &cycles_pme);
1339
1340     if (bSepLRF)
1341     {
1342         if (do_per_step(step, inputrec->nstcalclr))
1343         {
1344             /* Add the long range forces to the short range forces */
1345             for (i = 0; i < fr->natoms_force_constr; i++)
1346             {
1347                 rvec_add(fr->f_twin[i], f[i], f[i]);
1348             }
1349         }
1350     }
1351
1352     cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1353
1354     if (ed)
1355     {
1356         do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1357     }
1358
1359     if (bUseOrEmulGPU && !bDiffKernels)
1360     {
1361         /* wait for non-local forces (or calculate in emulation mode) */
1362         if (DOMAINDECOMP(cr))
1363         {
1364             if (bUseGPU)
1365             {
1366                 float cycles_tmp;
1367
1368                 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1369                 nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1370                                     nbv->grp[eintNonlocal].nbat,
1371                                     flags, eatNonlocal,
1372                                     enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1373                                     fr->fshift);
1374                 cycles_tmp       = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1375                 cycles_wait_gpu += cycles_tmp;
1376                 cycles_force    += cycles_tmp;
1377             }
1378             else
1379             {
1380                 wallcycle_start_nocount(wcycle, ewcFORCE);
1381                 do_nb_verlet(fr, ic, enerd, flags, eintNonlocal, enbvClearFYes,
1382                              nrnb, wcycle);
1383                 cycles_force += wallcycle_stop(wcycle, ewcFORCE);
1384             }
1385             wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1386             wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1387             /* skip the reduction if there was no non-local work to do */
1388             if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1389             {
1390                 nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatNonlocal,
1391                                                nbv->grp[eintNonlocal].nbat, f);
1392             }
1393             wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1394             cycles_force += wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1395         }
1396     }
1397
1398     if (bDoForces && DOMAINDECOMP(cr))
1399     {
1400         /* Communicate the forces */
1401         wallcycle_start(wcycle, ewcMOVEF);
1402         dd_move_f(cr->dd, f, fr->fshift);
1403         /* Do we need to communicate the separate force array
1404          * for terms that do not contribute to the single sum virial?
1405          * Position restraints and electric fields do not introduce
1406          * inter-cg forces, only full electrostatics methods do.
1407          * When we do not calculate the virial, fr->f_novirsum = f,
1408          * so we have already communicated these forces.
1409          */
1410         if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1411             (flags & GMX_FORCE_VIRIAL))
1412         {
1413             dd_move_f(cr->dd, fr->f_novirsum, NULL);
1414         }
1415         if (bSepLRF)
1416         {
1417             /* We should not update the shift forces here,
1418              * since f_twin is already included in f.
1419              */
1420             dd_move_f(cr->dd, fr->f_twin, NULL);
1421         }
1422         wallcycle_stop(wcycle, ewcMOVEF);
1423     }
1424
1425     if (bUseOrEmulGPU)
1426     {
1427         /* wait for local forces (or calculate in emulation mode) */
1428         if (bUseGPU)
1429         {
1430             wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1431             nbnxn_cuda_wait_gpu(nbv->cu_nbv,
1432                                 nbv->grp[eintLocal].nbat,
1433                                 flags, eatLocal,
1434                                 enerd->grpp.ener[egLJSR], enerd->grpp.ener[egCOULSR],
1435                                 fr->fshift);
1436             cycles_wait_gpu += wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1437
1438             /* now clear the GPU outputs while we finish the step on the CPU */
1439
1440             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU_NB);
1441             nbnxn_cuda_clear_outputs(nbv->cu_nbv, flags);
1442             wallcycle_stop(wcycle, ewcLAUNCH_GPU_NB);
1443         }
1444         else
1445         {
1446             wallcycle_start_nocount(wcycle, ewcFORCE);
1447             do_nb_verlet(fr, ic, enerd, flags, eintLocal,
1448                          DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1449                          nrnb, wcycle);
1450             wallcycle_stop(wcycle, ewcFORCE);
1451         }
1452         wallcycle_start(wcycle, ewcNB_XF_BUF_OPS);
1453         wallcycle_sub_start(wcycle, ewcsNB_F_BUF_OPS);
1454         if (nbv->grp[eintLocal].nbl_lists.nbl[0]->nsci > 0)
1455         {
1456             /* skip the reduction if there was no non-local work to do */
1457             nbnxn_atomdata_add_nbat_f_to_f(nbv->nbs, eatLocal,
1458                                            nbv->grp[eintLocal].nbat, f);
1459         }
1460         wallcycle_sub_stop(wcycle, ewcsNB_F_BUF_OPS);
1461         wallcycle_stop(wcycle, ewcNB_XF_BUF_OPS);
1462     }
1463
1464     if (DOMAINDECOMP(cr))
1465     {
1466         dd_force_flop_stop(cr->dd, nrnb);
1467         if (wcycle)
1468         {
1469             dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1470             if (bUseGPU)
1471             {
1472                 dd_cycles_add(cr->dd, cycles_wait_gpu, ddCyclWaitGPU);
1473             }
1474         }
1475     }
1476
1477     if (bDoForces)
1478     {
1479         if (IR_ELEC_FIELD(*inputrec))
1480         {
1481             /* Compute forces due to electric field */
1482             calc_f_el(MASTER(cr) ? field : NULL,
1483                       start, homenr, mdatoms->chargeA, fr->f_novirsum,
1484                       inputrec->ex, inputrec->et, t);
1485         }
1486
1487         /* If we have NoVirSum forces, but we do not calculate the virial,
1488          * we sum fr->f_novirum=f later.
1489          */
1490         if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1491         {
1492             wallcycle_start(wcycle, ewcVSITESPREAD);
1493             spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1494                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1495             wallcycle_stop(wcycle, ewcVSITESPREAD);
1496
1497             if (bSepLRF)
1498             {
1499                 wallcycle_start(wcycle, ewcVSITESPREAD);
1500                 spread_vsite_f(vsite, x, fr->f_twin, NULL, FALSE, NULL,
1501                                nrnb,
1502                                &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1503                 wallcycle_stop(wcycle, ewcVSITESPREAD);
1504             }
1505         }
1506
1507         if (flags & GMX_FORCE_VIRIAL)
1508         {
1509             /* Calculation of the virial must be done after vsites! */
1510             calc_virial(0, mdatoms->homenr, x, f,
1511                         vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1512         }
1513     }
1514
1515     if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
1516     {
1517         /* Since the COM pulling is always done mass-weighted, no forces are
1518          * applied to vsites and this call can be done after vsite spreading.
1519          */
1520         pull_potential_wrapper(cr, inputrec, box, x,
1521                                f, vir_force, mdatoms, enerd, lambda, t,
1522                                wcycle);
1523     }
1524
1525     /* Add the forces from enforced rotation potentials (if any) */
1526     if (inputrec->bRot)
1527     {
1528         wallcycle_start(wcycle, ewcROTadd);
1529         enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
1530         wallcycle_stop(wcycle, ewcROTadd);
1531     }
1532
1533     /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
1534     IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
1535
1536     if (PAR(cr) && !(cr->duty & DUTY_PME))
1537     {
1538         /* In case of node-splitting, the PP nodes receive the long-range
1539          * forces, virial and energy from the PME nodes here.
1540          */
1541         pme_receive_force_ener(cr, wcycle, enerd, fr);
1542     }
1543
1544     if (bDoForces)
1545     {
1546         post_process_forces(cr, step, nrnb, wcycle,
1547                             top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
1548                             flags);
1549     }
1550
1551     /* Sum the potential energy terms from group contributions */
1552     sum_epot(&(enerd->grpp), enerd->term);
1553 }
1554
1555 void do_force_cutsGROUP(FILE *fplog, t_commrec *cr,
1556                         t_inputrec *inputrec,
1557                         gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
1558                         gmx_localtop_t *top,
1559                         gmx_groups_t *groups,
1560                         matrix box, rvec x[], history_t *hist,
1561                         rvec f[],
1562                         tensor vir_force,
1563                         t_mdatoms *mdatoms,
1564                         gmx_enerdata_t *enerd, t_fcdata *fcd,
1565                         real *lambda, t_graph *graph,
1566                         t_forcerec *fr, gmx_vsite_t *vsite, rvec mu_tot,
1567                         double t, FILE *field, gmx_edsam_t ed,
1568                         gmx_bool bBornRadii,
1569                         int flags)
1570 {
1571     int        cg0, cg1, i, j;
1572     int        start, homenr;
1573     double     mu[2*DIM];
1574     gmx_bool   bStateChanged, bNS, bFillGrid, bCalcCGCM, bBS;
1575     gmx_bool   bDoLongRangeNS, bDoForces, bDoPotential, bSepLRF;
1576     gmx_bool   bDoAdressWF;
1577     matrix     boxs;
1578     rvec       vzero, box_diag;
1579     real       e, v, dvdlambda[efptNR];
1580     t_pbc      pbc;
1581     float      cycles_pme, cycles_force;
1582
1583     start  = 0;
1584     homenr = mdatoms->homenr;
1585
1586     clear_mat(vir_force);
1587
1588     cg0 = 0;
1589     if (DOMAINDECOMP(cr))
1590     {
1591         cg1 = cr->dd->ncg_tot;
1592     }
1593     else
1594     {
1595         cg1 = top->cgs.nr;
1596     }
1597     if (fr->n_tpi > 0)
1598     {
1599         cg1--;
1600     }
1601
1602     bStateChanged  = (flags & GMX_FORCE_STATECHANGED);
1603     bNS            = (flags & GMX_FORCE_NS) && (fr->bAllvsAll == FALSE);
1604     /* Should we update the long-range neighborlists at this step? */
1605     bDoLongRangeNS = fr->bTwinRange && bNS;
1606     /* Should we perform the long-range nonbonded evaluation inside the neighborsearching? */
1607     bFillGrid      = (bNS && bStateChanged);
1608     bCalcCGCM      = (bFillGrid && !DOMAINDECOMP(cr));
1609     bDoForces      = (flags & GMX_FORCE_FORCES);
1610     bDoPotential   = (flags & GMX_FORCE_ENERGY);
1611     bSepLRF        = ((inputrec->nstcalclr > 1) && bDoForces &&
1612                       (flags & GMX_FORCE_SEPLRF) && (flags & GMX_FORCE_DO_LR));
1613
1614     /* should probably move this to the forcerec since it doesn't change */
1615     bDoAdressWF   = ((fr->adress_type != eAdressOff));
1616
1617     if (bStateChanged)
1618     {
1619         update_forcerec(fr, box);
1620
1621         if (NEED_MUTOT(*inputrec))
1622         {
1623             /* Calculate total (local) dipole moment in a temporary common array.
1624              * This makes it possible to sum them over nodes faster.
1625              */
1626             calc_mu(start, homenr,
1627                     x, mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
1628                     mu, mu+DIM);
1629         }
1630     }
1631
1632     if (fr->ePBC != epbcNONE)
1633     {
1634         /* Compute shift vectors every step,
1635          * because of pressure coupling or box deformation!
1636          */
1637         if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
1638         {
1639             calc_shifts(box, fr->shift_vec);
1640         }
1641
1642         if (bCalcCGCM)
1643         {
1644             put_charge_groups_in_box(fplog, cg0, cg1, fr->ePBC, box,
1645                                      &(top->cgs), x, fr->cg_cm);
1646             inc_nrnb(nrnb, eNR_CGCM, homenr);
1647             inc_nrnb(nrnb, eNR_RESETX, cg1-cg0);
1648         }
1649         else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
1650         {
1651             unshift_self(graph, box, x);
1652         }
1653     }
1654     else if (bCalcCGCM)
1655     {
1656         calc_cgcm(fplog, cg0, cg1, &(top->cgs), x, fr->cg_cm);
1657         inc_nrnb(nrnb, eNR_CGCM, homenr);
1658     }
1659
1660     if (bCalcCGCM && gmx_debug_at)
1661     {
1662         pr_rvecs(debug, 0, "cgcm", fr->cg_cm, top->cgs.nr);
1663     }
1664
1665 #ifdef GMX_MPI
1666     if (!(cr->duty & DUTY_PME))
1667     {
1668         /* Send particle coordinates to the pme nodes.
1669          * Since this is only implemented for domain decomposition
1670          * and domain decomposition does not use the graph,
1671          * we do not need to worry about shifting.
1672          */
1673
1674         int pme_flags = 0;
1675
1676         wallcycle_start(wcycle, ewcPP_PMESENDX);
1677
1678         bBS = (inputrec->nwall == 2);
1679         if (bBS)
1680         {
1681             copy_mat(box, boxs);
1682             svmul(inputrec->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
1683         }
1684
1685         if (EEL_PME(fr->eeltype))
1686         {
1687             pme_flags |= GMX_PME_DO_COULOMB;
1688         }
1689
1690         if (EVDW_PME(fr->vdwtype))
1691         {
1692             pme_flags |= GMX_PME_DO_LJ;
1693         }
1694
1695         gmx_pme_send_coordinates(cr, bBS ? boxs : box, x,
1696                                  mdatoms->nChargePerturbed, mdatoms->nTypePerturbed, lambda[efptCOUL], lambda[efptVDW],
1697                                  (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)),
1698                                  pme_flags, step);
1699
1700         wallcycle_stop(wcycle, ewcPP_PMESENDX);
1701     }
1702 #endif /* GMX_MPI */
1703
1704     /* Communicate coordinates and sum dipole if necessary */
1705     if (DOMAINDECOMP(cr))
1706     {
1707         wallcycle_start(wcycle, ewcMOVEX);
1708         dd_move_x(cr->dd, box, x);
1709         wallcycle_stop(wcycle, ewcMOVEX);
1710     }
1711
1712     /* update adress weight beforehand */
1713     if (bStateChanged && bDoAdressWF)
1714     {
1715         /* need pbc for adress weight calculation with pbc_dx */
1716         set_pbc(&pbc, inputrec->ePBC, box);
1717         if (fr->adress_site == eAdressSITEcog)
1718         {
1719             update_adress_weights_cog(top->idef.iparams, top->idef.il, x, fr, mdatoms,
1720                                       inputrec->ePBC == epbcNONE ? NULL : &pbc);
1721         }
1722         else if (fr->adress_site == eAdressSITEcom)
1723         {
1724             update_adress_weights_com(fplog, cg0, cg1, &(top->cgs), x, fr, mdatoms,
1725                                       inputrec->ePBC == epbcNONE ? NULL : &pbc);
1726         }
1727         else if (fr->adress_site == eAdressSITEatomatom)
1728         {
1729             update_adress_weights_atom_per_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1730                                                 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1731         }
1732         else
1733         {
1734             update_adress_weights_atom(cg0, cg1, &(top->cgs), x, fr, mdatoms,
1735                                        inputrec->ePBC == epbcNONE ? NULL : &pbc);
1736         }
1737     }
1738
1739     if (NEED_MUTOT(*inputrec))
1740     {
1741
1742         if (bStateChanged)
1743         {
1744             if (PAR(cr))
1745             {
1746                 gmx_sumd(2*DIM, mu, cr);
1747             }
1748             for (i = 0; i < 2; i++)
1749             {
1750                 for (j = 0; j < DIM; j++)
1751                 {
1752                     fr->mu_tot[i][j] = mu[i*DIM + j];
1753                 }
1754             }
1755         }
1756         if (fr->efep == efepNO)
1757         {
1758             copy_rvec(fr->mu_tot[0], mu_tot);
1759         }
1760         else
1761         {
1762             for (j = 0; j < DIM; j++)
1763             {
1764                 mu_tot[j] =
1765                     (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] + lambda[efptCOUL]*fr->mu_tot[1][j];
1766             }
1767         }
1768     }
1769
1770     /* Reset energies */
1771     reset_enerdata(fr, bNS, enerd, MASTER(cr));
1772     clear_rvecs(SHIFTS, fr->fshift);
1773
1774     if (bNS)
1775     {
1776         wallcycle_start(wcycle, ewcNS);
1777
1778         if (graph && bStateChanged)
1779         {
1780             /* Calculate intramolecular shift vectors to make molecules whole */
1781             mk_mshift(fplog, graph, fr->ePBC, box, x);
1782         }
1783
1784         /* Do the actual neighbour searching */
1785         ns(fplog, fr, box,
1786            groups, top, mdatoms,
1787            cr, nrnb, bFillGrid,
1788            bDoLongRangeNS);
1789
1790         wallcycle_stop(wcycle, ewcNS);
1791     }
1792
1793     if (inputrec->implicit_solvent && bNS)
1794     {
1795         make_gb_nblist(cr, inputrec->gb_algorithm,
1796                        x, box, fr, &top->idef, graph, fr->born);
1797     }
1798
1799     if (DOMAINDECOMP(cr) && !(cr->duty & DUTY_PME))
1800     {
1801         wallcycle_start(wcycle, ewcPPDURINGPME);
1802         dd_force_flop_start(cr->dd, nrnb);
1803     }
1804
1805     if (inputrec->bRot)
1806     {
1807         /* Enforced rotation has its own cycle counter that starts after the collective
1808          * coordinates have been communicated. It is added to ddCyclF to allow
1809          * for proper load-balancing */
1810         wallcycle_start(wcycle, ewcROT);
1811         do_rotation(cr, inputrec, box, x, t, step, wcycle, bNS);
1812         wallcycle_stop(wcycle, ewcROT);
1813     }
1814
1815     /* Start the force cycle counter.
1816      * This counter is stopped in do_forcelow_level.
1817      * No parallel communication should occur while this counter is running,
1818      * since that will interfere with the dynamic load balancing.
1819      */
1820     wallcycle_start(wcycle, ewcFORCE);
1821
1822     if (bDoForces)
1823     {
1824         /* Reset forces for which the virial is calculated separately:
1825          * PME/Ewald forces if necessary */
1826         if (fr->bF_NoVirSum)
1827         {
1828             if (flags & GMX_FORCE_VIRIAL)
1829             {
1830                 fr->f_novirsum = fr->f_novirsum_alloc;
1831                 if (fr->bDomDec)
1832                 {
1833                     clear_rvecs(fr->f_novirsum_n, fr->f_novirsum);
1834                 }
1835                 else
1836                 {
1837                     clear_rvecs(homenr, fr->f_novirsum+start);
1838                 }
1839             }
1840             else
1841             {
1842                 /* We are not calculating the pressure so we do not need
1843                  * a separate array for forces that do not contribute
1844                  * to the pressure.
1845                  */
1846                 fr->f_novirsum = f;
1847             }
1848         }
1849
1850         /* Clear the short- and long-range forces */
1851         clear_rvecs(fr->natoms_force_constr, f);
1852         if (bSepLRF && do_per_step(step, inputrec->nstcalclr))
1853         {
1854             clear_rvecs(fr->natoms_force_constr, fr->f_twin);
1855         }
1856
1857         clear_rvec(fr->vir_diag_posres);
1858     }
1859     if (inputrec->ePull == epullCONSTRAINT)
1860     {
1861         clear_pull_forces(inputrec->pull);
1862     }
1863
1864     /* update QMMMrec, if necessary */
1865     if (fr->bQMMM)
1866     {
1867         update_QMMMrec(cr, fr, x, mdatoms, box, top);
1868     }
1869
1870     if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_POSRES].nr > 0)
1871     {
1872         posres_wrapper(flags, inputrec, nrnb, top, box, x,
1873                        enerd, lambda, fr);
1874     }
1875
1876     if ((flags & GMX_FORCE_BONDED) && top->idef.il[F_FBPOSRES].nr > 0)
1877     {
1878         fbposres_wrapper(inputrec, nrnb, top, box, x, enerd, fr);
1879     }
1880
1881     /* Compute the bonded and non-bonded energies and optionally forces */
1882     do_force_lowlevel(fr, inputrec, &(top->idef),
1883                       cr, nrnb, wcycle, mdatoms,
1884                       x, hist, f, bSepLRF ? fr->f_twin : f, enerd, fcd, top, fr->born,
1885                       bBornRadii, box,
1886                       inputrec->fepvals, lambda,
1887                       graph, &(top->excls), fr->mu_tot,
1888                       flags,
1889                       &cycles_pme);
1890
1891     if (bSepLRF)
1892     {
1893         if (do_per_step(step, inputrec->nstcalclr))
1894         {
1895             /* Add the long range forces to the short range forces */
1896             for (i = 0; i < fr->natoms_force_constr; i++)
1897             {
1898                 rvec_add(fr->f_twin[i], f[i], f[i]);
1899             }
1900         }
1901     }
1902
1903     cycles_force = wallcycle_stop(wcycle, ewcFORCE);
1904
1905     if (ed)
1906     {
1907         do_flood(cr, inputrec, x, f, ed, box, step, bNS);
1908     }
1909
1910     if (DOMAINDECOMP(cr))
1911     {
1912         dd_force_flop_stop(cr->dd, nrnb);
1913         if (wcycle)
1914         {
1915             dd_cycles_add(cr->dd, cycles_force-cycles_pme, ddCyclF);
1916         }
1917     }
1918
1919     if (bDoForces)
1920     {
1921         if (IR_ELEC_FIELD(*inputrec))
1922         {
1923             /* Compute forces due to electric field */
1924             calc_f_el(MASTER(cr) ? field : NULL,
1925                       start, homenr, mdatoms->chargeA, fr->f_novirsum,
1926                       inputrec->ex, inputrec->et, t);
1927         }
1928
1929         if (bDoAdressWF && fr->adress_icor == eAdressICThermoForce)
1930         {
1931             /* Compute thermodynamic force in hybrid AdResS region */
1932             adress_thermo_force(start, homenr, &(top->cgs), x, fr->f_novirsum, fr, mdatoms,
1933                                 inputrec->ePBC == epbcNONE ? NULL : &pbc);
1934         }
1935
1936         /* Communicate the forces */
1937         if (DOMAINDECOMP(cr))
1938         {
1939             wallcycle_start(wcycle, ewcMOVEF);
1940             dd_move_f(cr->dd, f, fr->fshift);
1941             /* Do we need to communicate the separate force array
1942              * for terms that do not contribute to the single sum virial?
1943              * Position restraints and electric fields do not introduce
1944              * inter-cg forces, only full electrostatics methods do.
1945              * When we do not calculate the virial, fr->f_novirsum = f,
1946              * so we have already communicated these forces.
1947              */
1948             if (EEL_FULL(fr->eeltype) && cr->dd->n_intercg_excl &&
1949                 (flags & GMX_FORCE_VIRIAL))
1950             {
1951                 dd_move_f(cr->dd, fr->f_novirsum, NULL);
1952             }
1953             if (bSepLRF)
1954             {
1955                 /* We should not update the shift forces here,
1956                  * since f_twin is already included in f.
1957                  */
1958                 dd_move_f(cr->dd, fr->f_twin, NULL);
1959             }
1960             wallcycle_stop(wcycle, ewcMOVEF);
1961         }
1962
1963         /* If we have NoVirSum forces, but we do not calculate the virial,
1964          * we sum fr->f_novirum=f later.
1965          */
1966         if (vsite && !(fr->bF_NoVirSum && !(flags & GMX_FORCE_VIRIAL)))
1967         {
1968             wallcycle_start(wcycle, ewcVSITESPREAD);
1969             spread_vsite_f(vsite, x, f, fr->fshift, FALSE, NULL, nrnb,
1970                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1971             wallcycle_stop(wcycle, ewcVSITESPREAD);
1972
1973             if (bSepLRF)
1974             {
1975                 wallcycle_start(wcycle, ewcVSITESPREAD);
1976                 spread_vsite_f(vsite, x, fr->f_twin, NULL, FALSE, NULL,
1977                                nrnb,
1978                                &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr);
1979                 wallcycle_stop(wcycle, ewcVSITESPREAD);
1980             }
1981         }
1982
1983         if (flags & GMX_FORCE_VIRIAL)
1984         {
1985             /* Calculation of the virial must be done after vsites! */
1986             calc_virial(0, mdatoms->homenr, x, f,
1987                         vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1988         }
1989     }
1990
1991     if (inputrec->ePull == epullUMBRELLA || inputrec->ePull == epullCONST_F)
1992     {
1993         pull_potential_wrapper(cr, inputrec, box, x,
1994                                f, vir_force, mdatoms, enerd, lambda, t,
1995                                wcycle);
1996     }
1997
1998     /* Add the forces from enforced rotation potentials (if any) */
1999     if (inputrec->bRot)
2000     {
2001         wallcycle_start(wcycle, ewcROTadd);
2002         enerd->term[F_COM_PULL] += add_rot_forces(inputrec->rot, f, cr, step, t);
2003         wallcycle_stop(wcycle, ewcROTadd);
2004     }
2005
2006     /* Add forces from interactive molecular dynamics (IMD), if bIMD == TRUE. */
2007     IMD_apply_forces(inputrec->bIMD, inputrec->imd, cr, f, wcycle);
2008
2009     if (PAR(cr) && !(cr->duty & DUTY_PME))
2010     {
2011         /* In case of node-splitting, the PP nodes receive the long-range
2012          * forces, virial and energy from the PME nodes here.
2013          */
2014         pme_receive_force_ener(cr, wcycle, enerd, fr);
2015     }
2016
2017     if (bDoForces)
2018     {
2019         post_process_forces(cr, step, nrnb, wcycle,
2020                             top, box, x, f, vir_force, mdatoms, graph, fr, vsite,
2021                             flags);
2022     }
2023
2024     /* Sum the potential energy terms from group contributions */
2025     sum_epot(&(enerd->grpp), enerd->term);
2026 }
2027
2028 void do_force(FILE *fplog, t_commrec *cr,
2029               t_inputrec *inputrec,
2030               gmx_int64_t step, t_nrnb *nrnb, gmx_wallcycle_t wcycle,
2031               gmx_localtop_t *top,
2032               gmx_groups_t *groups,
2033               matrix box, rvec x[], history_t *hist,
2034               rvec f[],
2035               tensor vir_force,
2036               t_mdatoms *mdatoms,
2037               gmx_enerdata_t *enerd, t_fcdata *fcd,
2038               real *lambda, t_graph *graph,
2039               t_forcerec *fr,
2040               gmx_vsite_t *vsite, rvec mu_tot,
2041               double t, FILE *field, gmx_edsam_t ed,
2042               gmx_bool bBornRadii,
2043               int flags)
2044 {
2045     /* modify force flag if not doing nonbonded */
2046     if (!fr->bNonbonded)
2047     {
2048         flags &= ~GMX_FORCE_NONBONDED;
2049     }
2050
2051     switch (inputrec->cutoff_scheme)
2052     {
2053         case ecutsVERLET:
2054             do_force_cutsVERLET(fplog, cr, inputrec,
2055                                 step, nrnb, wcycle,
2056                                 top,
2057                                 groups,
2058                                 box, x, hist,
2059                                 f, vir_force,
2060                                 mdatoms,
2061                                 enerd, fcd,
2062                                 lambda, graph,
2063                                 fr, fr->ic,
2064                                 vsite, mu_tot,
2065                                 t, field, ed,
2066                                 bBornRadii,
2067                                 flags);
2068             break;
2069         case ecutsGROUP:
2070             do_force_cutsGROUP(fplog, cr, inputrec,
2071                                step, nrnb, wcycle,
2072                                top,
2073                                groups,
2074                                box, x, hist,
2075                                f, vir_force,
2076                                mdatoms,
2077                                enerd, fcd,
2078                                lambda, graph,
2079                                fr, vsite, mu_tot,
2080                                t, field, ed,
2081                                bBornRadii,
2082                                flags);
2083             break;
2084         default:
2085             gmx_incons("Invalid cut-off scheme passed!");
2086     }
2087 }
2088
2089
2090 void do_constrain_first(FILE *fplog, gmx_constr_t constr,
2091                         t_inputrec *ir, t_mdatoms *md,
2092                         t_state *state, t_commrec *cr, t_nrnb *nrnb,
2093                         t_forcerec *fr, gmx_localtop_t *top)
2094 {
2095     int             i, m, start, end;
2096     gmx_int64_t     step;
2097     real            dt = ir->delta_t;
2098     real            dvdl_dum;
2099     rvec           *savex;
2100
2101     snew(savex, state->natoms);
2102
2103     start = 0;
2104     end   = md->homenr;
2105
2106     if (debug)
2107     {
2108         fprintf(debug, "vcm: start=%d, homenr=%d, end=%d\n",
2109                 start, md->homenr, end);
2110     }
2111     /* Do a first constrain to reset particles... */
2112     step = ir->init_step;
2113     if (fplog)
2114     {
2115         char buf[STEPSTRSIZE];
2116         fprintf(fplog, "\nConstraining the starting coordinates (step %s)\n",
2117                 gmx_step_str(step, buf));
2118     }
2119     dvdl_dum = 0;
2120
2121     /* constrain the current position */
2122     constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2123               ir, NULL, cr, step, 0, 1.0, md,
2124               state->x, state->x, NULL,
2125               fr->bMolPBC, state->box,
2126               state->lambda[efptBONDED], &dvdl_dum,
2127               NULL, NULL, nrnb, econqCoord,
2128               ir->epc == epcMTTK, state->veta, state->veta);
2129     if (EI_VV(ir->eI))
2130     {
2131         /* constrain the inital velocity, and save it */
2132         /* also may be useful if we need the ekin from the halfstep for velocity verlet */
2133         /* might not yet treat veta correctly */
2134         constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2135                   ir, NULL, cr, step, 0, 1.0, md,
2136                   state->x, state->v, state->v,
2137                   fr->bMolPBC, state->box,
2138                   state->lambda[efptBONDED], &dvdl_dum,
2139                   NULL, NULL, nrnb, econqVeloc,
2140                   ir->epc == epcMTTK, state->veta, state->veta);
2141     }
2142     /* constrain the inital velocities at t-dt/2 */
2143     if (EI_STATE_VELOCITY(ir->eI) && ir->eI != eiVV)
2144     {
2145         for (i = start; (i < end); i++)
2146         {
2147             for (m = 0; (m < DIM); m++)
2148             {
2149                 /* Reverse the velocity */
2150                 state->v[i][m] = -state->v[i][m];
2151                 /* Store the position at t-dt in buf */
2152                 savex[i][m] = state->x[i][m] + dt*state->v[i][m];
2153             }
2154         }
2155         /* Shake the positions at t=-dt with the positions at t=0
2156          * as reference coordinates.
2157          */
2158         if (fplog)
2159         {
2160             char buf[STEPSTRSIZE];
2161             fprintf(fplog, "\nConstraining the coordinates at t0-dt (step %s)\n",
2162                     gmx_step_str(step, buf));
2163         }
2164         dvdl_dum = 0;
2165         constrain(NULL, TRUE, FALSE, constr, &(top->idef),
2166                   ir, NULL, cr, step, -1, 1.0, md,
2167                   state->x, savex, NULL,
2168                   fr->bMolPBC, state->box,
2169                   state->lambda[efptBONDED], &dvdl_dum,
2170                   state->v, NULL, nrnb, econqCoord,
2171                   ir->epc == epcMTTK, state->veta, state->veta);
2172
2173         for (i = start; i < end; i++)
2174         {
2175             for (m = 0; m < DIM; m++)
2176             {
2177                 /* Re-reverse the velocities */
2178                 state->v[i][m] = -state->v[i][m];
2179             }
2180         }
2181     }
2182     sfree(savex);
2183 }
2184
2185
2186 static void
2187 integrate_table(real vdwtab[], real scale, int offstart, int rstart, int rend,
2188                 double *enerout, double *virout)
2189 {
2190     double enersum, virsum;
2191     double invscale, invscale2, invscale3;
2192     double r, ea, eb, ec, pa, pb, pc, pd;
2193     double y0, f, g, h;
2194     int    ri, offset, tabfactor;
2195
2196     invscale  = 1.0/scale;
2197     invscale2 = invscale*invscale;
2198     invscale3 = invscale*invscale2;
2199
2200     /* Following summation derived from cubic spline definition,
2201      * Numerical Recipies in C, second edition, p. 113-116.  Exact for
2202      * the cubic spline.  We first calculate the negative of the
2203      * energy from rvdw to rvdw_switch, assuming that g(r)=1, and then
2204      * add the more standard, abrupt cutoff correction to that result,
2205      * yielding the long-range correction for a switched function.  We
2206      * perform both the pressure and energy loops at the same time for
2207      * simplicity, as the computational cost is low. */
2208
2209     if (offstart == 0)
2210     {
2211         /* Since the dispersion table has been scaled down a factor
2212          * 6.0 and the repulsion a factor 12.0 to compensate for the
2213          * c6/c12 parameters inside nbfp[] being scaled up (to save
2214          * flops in kernels), we need to correct for this.
2215          */
2216         tabfactor = 6.0;
2217     }
2218     else
2219     {
2220         tabfactor = 12.0;
2221     }
2222
2223     enersum = 0.0;
2224     virsum  = 0.0;
2225     for (ri = rstart; ri < rend; ++ri)
2226     {
2227         r  = ri*invscale;
2228         ea = invscale3;
2229         eb = 2.0*invscale2*r;
2230         ec = invscale*r*r;
2231
2232         pa = invscale3;
2233         pb = 3.0*invscale2*r;
2234         pc = 3.0*invscale*r*r;
2235         pd = r*r*r;
2236
2237         /* this "8" is from the packing in the vdwtab array - perhaps
2238            should be defined? */
2239
2240         offset = 8*ri + offstart;
2241         y0     = vdwtab[offset];
2242         f      = vdwtab[offset+1];
2243         g      = vdwtab[offset+2];
2244         h      = vdwtab[offset+3];
2245
2246         enersum += y0*(ea/3 + eb/2 + ec) + f*(ea/4 + eb/3 + ec/2) + g*(ea/5 + eb/4 + ec/3) + h*(ea/6 + eb/5 + ec/4);
2247         virsum  +=  f*(pa/4 + pb/3 + pc/2 + pd) + 2*g*(pa/5 + pb/4 + pc/3 + pd/2) + 3*h*(pa/6 + pb/5 + pc/4 + pd/3);
2248     }
2249     *enerout = 4.0*M_PI*enersum*tabfactor;
2250     *virout  = 4.0*M_PI*virsum*tabfactor;
2251 }
2252
2253 void calc_enervirdiff(FILE *fplog, int eDispCorr, t_forcerec *fr)
2254 {
2255     double   eners[2], virs[2], enersum, virsum, y0, f, g, h;
2256     double   r0, r1, r, rc3, rc9, ea, eb, ec, pa, pb, pc, pd;
2257     double   invscale, invscale2, invscale3;
2258     int      ri0, ri1, ri, i, offstart, offset;
2259     real     scale, *vdwtab, tabfactor, tmp;
2260
2261     fr->enershiftsix    = 0;
2262     fr->enershifttwelve = 0;
2263     fr->enerdiffsix     = 0;
2264     fr->enerdifftwelve  = 0;
2265     fr->virdiffsix      = 0;
2266     fr->virdifftwelve   = 0;
2267
2268     if (eDispCorr != edispcNO)
2269     {
2270         for (i = 0; i < 2; i++)
2271         {
2272             eners[i] = 0;
2273             virs[i]  = 0;
2274         }
2275         if ((fr->vdw_modifier == eintmodPOTSHIFT) ||
2276             (fr->vdw_modifier == eintmodPOTSWITCH) ||
2277             (fr->vdw_modifier == eintmodFORCESWITCH) ||
2278             (fr->vdwtype == evdwSHIFT) ||
2279             (fr->vdwtype == evdwSWITCH))
2280         {
2281             if (((fr->vdw_modifier == eintmodPOTSWITCH) ||
2282                  (fr->vdw_modifier == eintmodFORCESWITCH) ||
2283                  (fr->vdwtype == evdwSWITCH)) && fr->rvdw_switch == 0)
2284             {
2285                 gmx_fatal(FARGS,
2286                           "With dispersion correction rvdw-switch can not be zero "
2287                           "for vdw-type = %s", evdw_names[fr->vdwtype]);
2288             }
2289
2290             scale  = fr->nblists[0].table_vdw.scale;
2291             vdwtab = fr->nblists[0].table_vdw.data;
2292
2293             /* Round the cut-offs to exact table values for precision */
2294             ri0  = floor(fr->rvdw_switch*scale);
2295             ri1  = ceil(fr->rvdw*scale);
2296
2297             /* The code below has some support for handling force-switching, i.e.
2298              * when the force (instead of potential) is switched over a limited
2299              * region. This leads to a constant shift in the potential inside the
2300              * switching region, which we can handle by adding a constant energy
2301              * term in the force-switch case just like when we do potential-shift.
2302              *
2303              * For now this is not enabled, but to keep the functionality in the
2304              * code we check separately for switch and shift. When we do force-switch
2305              * the shifting point is rvdw_switch, while it is the cutoff when we
2306              * have a classical potential-shift.
2307              *
2308              * For a pure potential-shift the potential has a constant shift
2309              * all the way out to the cutoff, and that is it. For other forms
2310              * we need to calculate the constant shift up to the point where we
2311              * start modifying the potential.
2312              */
2313             ri0  = (fr->vdw_modifier == eintmodPOTSHIFT) ? ri1 : ri0;
2314
2315             r0   = ri0/scale;
2316             r1   = ri1/scale;
2317             rc3  = r0*r0*r0;
2318             rc9  = rc3*rc3*rc3;
2319
2320             if ((fr->vdw_modifier == eintmodFORCESWITCH) ||
2321                 (fr->vdwtype == evdwSHIFT))
2322             {
2323                 /* Determine the constant energy shift below rvdw_switch.
2324                  * Table has a scale factor since we have scaled it down to compensate
2325                  * for scaling-up c6/c12 with the derivative factors to save flops in analytical kernels.
2326                  */
2327                 fr->enershiftsix    = (real)(-1.0/(rc3*rc3)) - 6.0*vdwtab[8*ri0];
2328                 fr->enershifttwelve = (real)( 1.0/(rc9*rc3)) - 12.0*vdwtab[8*ri0 + 4];
2329             }
2330             else if (fr->vdw_modifier == eintmodPOTSHIFT)
2331             {
2332                 fr->enershiftsix    = (real)(-1.0/(rc3*rc3));
2333                 fr->enershifttwelve = (real)( 1.0/(rc9*rc3));
2334             }
2335
2336             /* Add the constant part from 0 to rvdw_switch.
2337              * This integration from 0 to rvdw_switch overcounts the number
2338              * of interactions by 1, as it also counts the self interaction.
2339              * We will correct for this later.
2340              */
2341             eners[0] += 4.0*M_PI*fr->enershiftsix*rc3/3.0;
2342             eners[1] += 4.0*M_PI*fr->enershifttwelve*rc3/3.0;
2343
2344             /* Calculate the contribution in the range [r0,r1] where we
2345              * modify the potential. For a pure potential-shift modifier we will
2346              * have ri0==ri1, and there will not be any contribution here.
2347              */
2348             for (i = 0; i < 2; i++)
2349             {
2350                 enersum = 0;
2351                 virsum  = 0;
2352                 integrate_table(vdwtab, scale, (i == 0 ? 0 : 4), ri0, ri1, &enersum, &virsum);
2353                 eners[i] -= enersum;
2354                 virs[i]  -= virsum;
2355             }
2356
2357             /* Alright: Above we compensated by REMOVING the parts outside r0
2358              * corresponding to the ideal VdW 1/r6 and /r12 potentials.
2359              *
2360              * Regardless of whether r0 is the point where we start switching,
2361              * or the cutoff where we calculated the constant shift, we include
2362              * all the parts we are missing out to infinity from r0 by
2363              * calculating the analytical dispersion correction.
2364              */
2365             eners[0] += -4.0*M_PI/(3.0*rc3);
2366             eners[1] +=  4.0*M_PI/(9.0*rc9);
2367             virs[0]  +=  8.0*M_PI/rc3;
2368             virs[1]  += -16.0*M_PI/(3.0*rc9);
2369         }
2370         else if (fr->vdwtype == evdwCUT ||
2371                  EVDW_PME(fr->vdwtype) ||
2372                  fr->vdwtype == evdwUSER)
2373         {
2374             if (fr->vdwtype == evdwUSER && fplog)
2375             {
2376                 fprintf(fplog,
2377                         "WARNING: using dispersion correction with user tables\n");
2378             }
2379
2380             /* Note that with LJ-PME, the dispersion correction is multiplied
2381              * by the difference between the actual C6 and the value of C6
2382              * that would produce the combination rule.
2383              * This means the normal energy and virial difference formulas
2384              * can be used here.
2385              */
2386
2387             rc3  = fr->rvdw*fr->rvdw*fr->rvdw;
2388             rc9  = rc3*rc3*rc3;
2389             /* Contribution beyond the cut-off */
2390             eners[0] += -4.0*M_PI/(3.0*rc3);
2391             eners[1] +=  4.0*M_PI/(9.0*rc9);
2392             if (fr->vdw_modifier == eintmodPOTSHIFT)
2393             {
2394                 /* Contribution within the cut-off */
2395                 eners[0] += -4.0*M_PI/(3.0*rc3);
2396                 eners[1] +=  4.0*M_PI/(3.0*rc9);
2397             }
2398             /* Contribution beyond the cut-off */
2399             virs[0]  +=  8.0*M_PI/rc3;
2400             virs[1]  += -16.0*M_PI/(3.0*rc9);
2401         }
2402         else
2403         {
2404             gmx_fatal(FARGS,
2405                       "Dispersion correction is not implemented for vdw-type = %s",
2406                       evdw_names[fr->vdwtype]);
2407         }
2408
2409         /* When we deprecate the group kernels the code below can go too */
2410         if (fr->vdwtype == evdwPME && fr->cutoff_scheme == ecutsGROUP)
2411         {
2412             /* Calculate self-interaction coefficient (assuming that
2413              * the reciprocal-space contribution is constant in the
2414              * region that contributes to the self-interaction).
2415              */
2416             fr->enershiftsix = pow(fr->ewaldcoeff_lj, 6) / 6.0;
2417
2418             eners[0] += -pow(sqrt(M_PI)*fr->ewaldcoeff_lj, 3)/3.0;
2419             virs[0]  +=  pow(sqrt(M_PI)*fr->ewaldcoeff_lj, 3);
2420         }
2421
2422         fr->enerdiffsix    = eners[0];
2423         fr->enerdifftwelve = eners[1];
2424         /* The 0.5 is due to the Gromacs definition of the virial */
2425         fr->virdiffsix     = 0.5*virs[0];
2426         fr->virdifftwelve  = 0.5*virs[1];
2427     }
2428 }
2429
2430 void calc_dispcorr(t_inputrec *ir, t_forcerec *fr,
2431                    int natoms,
2432                    matrix box, real lambda, tensor pres, tensor virial,
2433                    real *prescorr, real *enercorr, real *dvdlcorr)
2434 {
2435     gmx_bool bCorrAll, bCorrPres;
2436     real     dvdlambda, invvol, dens, ninter, avcsix, avctwelve, enerdiff, svir = 0, spres = 0;
2437     int      m;
2438
2439     *prescorr = 0;
2440     *enercorr = 0;
2441     *dvdlcorr = 0;
2442
2443     clear_mat(virial);
2444     clear_mat(pres);
2445
2446     if (ir->eDispCorr != edispcNO)
2447     {
2448         bCorrAll  = (ir->eDispCorr == edispcAllEner ||
2449                      ir->eDispCorr == edispcAllEnerPres);
2450         bCorrPres = (ir->eDispCorr == edispcEnerPres ||
2451                      ir->eDispCorr == edispcAllEnerPres);
2452
2453         invvol = 1/det(box);
2454         if (fr->n_tpi)
2455         {
2456             /* Only correct for the interactions with the inserted molecule */
2457             dens   = (natoms - fr->n_tpi)*invvol;
2458             ninter = fr->n_tpi;
2459         }
2460         else
2461         {
2462             dens   = natoms*invvol;
2463             ninter = 0.5*natoms;
2464         }
2465
2466         if (ir->efep == efepNO)
2467         {
2468             avcsix    = fr->avcsix[0];
2469             avctwelve = fr->avctwelve[0];
2470         }
2471         else
2472         {
2473             avcsix    = (1 - lambda)*fr->avcsix[0]    + lambda*fr->avcsix[1];
2474             avctwelve = (1 - lambda)*fr->avctwelve[0] + lambda*fr->avctwelve[1];
2475         }
2476
2477         enerdiff   = ninter*(dens*fr->enerdiffsix - fr->enershiftsix);
2478         *enercorr += avcsix*enerdiff;
2479         dvdlambda  = 0.0;
2480         if (ir->efep != efepNO)
2481         {
2482             dvdlambda += (fr->avcsix[1] - fr->avcsix[0])*enerdiff;
2483         }
2484         if (bCorrAll)
2485         {
2486             enerdiff   = ninter*(dens*fr->enerdifftwelve - fr->enershifttwelve);
2487             *enercorr += avctwelve*enerdiff;
2488             if (fr->efep != efepNO)
2489             {
2490                 dvdlambda += (fr->avctwelve[1] - fr->avctwelve[0])*enerdiff;
2491             }
2492         }
2493
2494         if (bCorrPres)
2495         {
2496             svir = ninter*dens*avcsix*fr->virdiffsix/3.0;
2497             if (ir->eDispCorr == edispcAllEnerPres)
2498             {
2499                 svir += ninter*dens*avctwelve*fr->virdifftwelve/3.0;
2500             }
2501             /* The factor 2 is because of the Gromacs virial definition */
2502             spres = -2.0*invvol*svir*PRESFAC;
2503
2504             for (m = 0; m < DIM; m++)
2505             {
2506                 virial[m][m] += svir;
2507                 pres[m][m]   += spres;
2508             }
2509             *prescorr += spres;
2510         }
2511
2512         /* Can't currently control when it prints, for now, just print when degugging */
2513         if (debug)
2514         {
2515             if (bCorrAll)
2516             {
2517                 fprintf(debug, "Long Range LJ corr.: <C6> %10.4e, <C12> %10.4e\n",
2518                         avcsix, avctwelve);
2519             }
2520             if (bCorrPres)
2521             {
2522                 fprintf(debug,
2523                         "Long Range LJ corr.: Epot %10g, Pres: %10g, Vir: %10g\n",
2524                         *enercorr, spres, svir);
2525             }
2526             else
2527             {
2528                 fprintf(debug, "Long Range LJ corr.: Epot %10g\n", *enercorr);
2529             }
2530         }
2531
2532         if (fr->efep != efepNO)
2533         {
2534             *dvdlcorr += dvdlambda;
2535         }
2536     }
2537 }
2538
2539 void do_pbc_first(FILE *fplog, matrix box, t_forcerec *fr,
2540                   t_graph *graph, rvec x[])
2541 {
2542     if (fplog)
2543     {
2544         fprintf(fplog, "Removing pbc first time\n");
2545     }
2546     calc_shifts(box, fr->shift_vec);
2547     if (graph)
2548     {
2549         mk_mshift(fplog, graph, fr->ePBC, box, x);
2550         if (gmx_debug_at)
2551         {
2552             p_graph(debug, "do_pbc_first 1", graph);
2553         }
2554         shift_self(graph, box, x);
2555         /* By doing an extra mk_mshift the molecules that are broken
2556          * because they were e.g. imported from another software
2557          * will be made whole again. Such are the healing powers
2558          * of GROMACS.
2559          */
2560         mk_mshift(fplog, graph, fr->ePBC, box, x);
2561         if (gmx_debug_at)
2562         {
2563             p_graph(debug, "do_pbc_first 2", graph);
2564         }
2565     }
2566     if (fplog)
2567     {
2568         fprintf(fplog, "Done rmpbc\n");
2569     }
2570 }
2571
2572 static void low_do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2573                             gmx_mtop_t *mtop, rvec x[],
2574                             gmx_bool bFirst)
2575 {
2576     t_graph        *graph;
2577     int             mb, as, mol;
2578     gmx_molblock_t *molb;
2579
2580     if (bFirst && fplog)
2581     {
2582         fprintf(fplog, "Removing pbc first time\n");
2583     }
2584
2585     snew(graph, 1);
2586     as = 0;
2587     for (mb = 0; mb < mtop->nmolblock; mb++)
2588     {
2589         molb = &mtop->molblock[mb];
2590         if (molb->natoms_mol == 1 ||
2591             (!bFirst && mtop->moltype[molb->type].cgs.nr == 1))
2592         {
2593             /* Just one atom or charge group in the molecule, no PBC required */
2594             as += molb->nmol*molb->natoms_mol;
2595         }
2596         else
2597         {
2598             /* Pass NULL iso fplog to avoid graph prints for each molecule type */
2599             mk_graph_ilist(NULL, mtop->moltype[molb->type].ilist,
2600                            0, molb->natoms_mol, FALSE, FALSE, graph);
2601
2602             for (mol = 0; mol < molb->nmol; mol++)
2603             {
2604                 mk_mshift(fplog, graph, ePBC, box, x+as);
2605
2606                 shift_self(graph, box, x+as);
2607                 /* The molecule is whole now.
2608                  * We don't need the second mk_mshift call as in do_pbc_first,
2609                  * since we no longer need this graph.
2610                  */
2611
2612                 as += molb->natoms_mol;
2613             }
2614             done_graph(graph);
2615         }
2616     }
2617     sfree(graph);
2618 }
2619
2620 void do_pbc_first_mtop(FILE *fplog, int ePBC, matrix box,
2621                        gmx_mtop_t *mtop, rvec x[])
2622 {
2623     low_do_pbc_mtop(fplog, ePBC, box, mtop, x, TRUE);
2624 }
2625
2626 void do_pbc_mtop(FILE *fplog, int ePBC, matrix box,
2627                  gmx_mtop_t *mtop, rvec x[])
2628 {
2629     low_do_pbc_mtop(fplog, ePBC, box, mtop, x, FALSE);
2630 }
2631
2632 void finish_run(FILE *fplog, t_commrec *cr,
2633                 t_inputrec *inputrec,
2634                 t_nrnb nrnb[], gmx_wallcycle_t wcycle,
2635                 gmx_walltime_accounting_t walltime_accounting,
2636                 nonbonded_verlet_t *nbv,
2637                 gmx_bool bWriteStat)
2638 {
2639     int     i, j;
2640     t_nrnb *nrnb_tot = NULL;
2641     real    delta_t;
2642     double  nbfs, mflop;
2643     double  elapsed_time,
2644             elapsed_time_over_all_ranks,
2645             elapsed_time_over_all_threads,
2646             elapsed_time_over_all_threads_over_all_ranks;
2647     wallcycle_sum(cr, wcycle);
2648
2649     if (cr->nnodes > 1)
2650     {
2651         snew(nrnb_tot, 1);
2652 #ifdef GMX_MPI
2653         MPI_Allreduce(nrnb->n, nrnb_tot->n, eNRNB, MPI_DOUBLE, MPI_SUM,
2654                       cr->mpi_comm_mysim);
2655 #endif
2656     }
2657     else
2658     {
2659         nrnb_tot = nrnb;
2660     }
2661
2662     elapsed_time                                 = walltime_accounting_get_elapsed_time(walltime_accounting);
2663     elapsed_time_over_all_ranks                  = elapsed_time;
2664     elapsed_time_over_all_threads                = walltime_accounting_get_elapsed_time_over_all_threads(walltime_accounting);
2665     elapsed_time_over_all_threads_over_all_ranks = elapsed_time_over_all_threads;
2666 #ifdef GMX_MPI
2667     if (cr->nnodes > 1)
2668     {
2669         /* reduce elapsed_time over all MPI ranks in the current simulation */
2670         MPI_Allreduce(&elapsed_time,
2671                       &elapsed_time_over_all_ranks,
2672                       1, MPI_DOUBLE, MPI_SUM,
2673                       cr->mpi_comm_mysim);
2674         elapsed_time_over_all_ranks /= cr->nnodes;
2675         /* Reduce elapsed_time_over_all_threads over all MPI ranks in the
2676          * current simulation. */
2677         MPI_Allreduce(&elapsed_time_over_all_threads,
2678                       &elapsed_time_over_all_threads_over_all_ranks,
2679                       1, MPI_DOUBLE, MPI_SUM,
2680                       cr->mpi_comm_mysim);
2681     }
2682 #endif
2683
2684     if (SIMMASTER(cr))
2685     {
2686         print_flop(fplog, nrnb_tot, &nbfs, &mflop);
2687     }
2688     if (cr->nnodes > 1)
2689     {
2690         sfree(nrnb_tot);
2691     }
2692
2693     if ((cr->duty & DUTY_PP) && DOMAINDECOMP(cr))
2694     {
2695         print_dd_statistics(cr, inputrec, fplog);
2696     }
2697
2698     if (SIMMASTER(cr))
2699     {
2700         wallclock_gpu_t* gputimes = use_GPU(nbv) ?
2701             nbnxn_cuda_get_timings(nbv->cu_nbv) : NULL;
2702         wallcycle_print(fplog, cr->nnodes, cr->npmenodes,
2703                         elapsed_time_over_all_ranks,
2704                         wcycle, gputimes);
2705
2706         if (EI_DYNAMICS(inputrec->eI))
2707         {
2708             delta_t = inputrec->delta_t;
2709         }
2710         else
2711         {
2712             delta_t = 0;
2713         }
2714
2715         if (fplog)
2716         {
2717             print_perf(fplog, elapsed_time_over_all_threads_over_all_ranks,
2718                        elapsed_time_over_all_ranks,
2719                        walltime_accounting_get_nsteps_done(walltime_accounting),
2720                        delta_t, nbfs, mflop);
2721         }
2722         if (bWriteStat)
2723         {
2724             print_perf(stderr, elapsed_time_over_all_threads_over_all_ranks,
2725                        elapsed_time_over_all_ranks,
2726                        walltime_accounting_get_nsteps_done(walltime_accounting),
2727                        delta_t, nbfs, mflop);
2728         }
2729     }
2730 }
2731
2732 extern void initialize_lambdas(FILE *fplog, t_inputrec *ir, int *fep_state, real *lambda, double *lam0)
2733 {
2734     /* this function works, but could probably use a logic rewrite to keep all the different
2735        types of efep straight. */
2736
2737     int       i;
2738     t_lambda *fep = ir->fepvals;
2739
2740     if ((ir->efep == efepNO) && (ir->bSimTemp == FALSE))
2741     {
2742         for (i = 0; i < efptNR; i++)
2743         {
2744             lambda[i] = 0.0;
2745             if (lam0)
2746             {
2747                 lam0[i] = 0.0;
2748             }
2749         }
2750         return;
2751     }
2752     else
2753     {
2754         *fep_state = fep->init_fep_state; /* this might overwrite the checkpoint
2755                                              if checkpoint is set -- a kludge is in for now
2756                                              to prevent this.*/
2757         for (i = 0; i < efptNR; i++)
2758         {
2759             /* overwrite lambda state with init_lambda for now for backwards compatibility */
2760             if (fep->init_lambda >= 0) /* if it's -1, it was never initializd */
2761             {
2762                 lambda[i] = fep->init_lambda;
2763                 if (lam0)
2764                 {
2765                     lam0[i] = lambda[i];
2766                 }
2767             }
2768             else
2769             {
2770                 lambda[i] = fep->all_lambda[i][*fep_state];
2771                 if (lam0)
2772                 {
2773                     lam0[i] = lambda[i];
2774                 }
2775             }
2776         }
2777         if (ir->bSimTemp)
2778         {
2779             /* need to rescale control temperatures to match current state */
2780             for (i = 0; i < ir->opts.ngtc; i++)
2781             {
2782                 if (ir->opts.ref_t[i] > 0)
2783                 {
2784                     ir->opts.ref_t[i] = ir->simtempvals->temperatures[*fep_state];
2785                 }
2786             }
2787         }
2788     }
2789
2790     /* Send to the log the information on the current lambdas */
2791     if (fplog != NULL)
2792     {
2793         fprintf(fplog, "Initial vector of lambda components:[ ");
2794         for (i = 0; i < efptNR; i++)
2795         {
2796             fprintf(fplog, "%10.4f ", lambda[i]);
2797         }
2798         fprintf(fplog, "]\n");
2799     }
2800     return;
2801 }
2802
2803
2804 void init_md(FILE *fplog,
2805              t_commrec *cr, t_inputrec *ir, const output_env_t oenv,
2806              double *t, double *t0,
2807              real *lambda, int *fep_state, double *lam0,
2808              t_nrnb *nrnb, gmx_mtop_t *mtop,
2809              gmx_update_t *upd,
2810              int nfile, const t_filenm fnm[],
2811              gmx_mdoutf_t *outf, t_mdebin **mdebin,
2812              tensor force_vir, tensor shake_vir, rvec mu_tot,
2813              gmx_bool *bSimAnn, t_vcm **vcm, unsigned long Flags,
2814              gmx_wallcycle_t wcycle)
2815 {
2816     int  i, j, n;
2817     real tmpt, mod;
2818
2819     /* Initial values */
2820     *t = *t0       = ir->init_t;
2821
2822     *bSimAnn = FALSE;
2823     for (i = 0; i < ir->opts.ngtc; i++)
2824     {
2825         /* set bSimAnn if any group is being annealed */
2826         if (ir->opts.annealing[i] != eannNO)
2827         {
2828             *bSimAnn = TRUE;
2829         }
2830     }
2831     if (*bSimAnn)
2832     {
2833         update_annealing_target_temp(&(ir->opts), ir->init_t);
2834     }
2835
2836     /* Initialize lambda variables */
2837     initialize_lambdas(fplog, ir, fep_state, lambda, lam0);
2838
2839     if (upd)
2840     {
2841         *upd = init_update(ir);
2842     }
2843
2844
2845     if (vcm != NULL)
2846     {
2847         *vcm = init_vcm(fplog, &mtop->groups, ir);
2848     }
2849
2850     if (EI_DYNAMICS(ir->eI) && !(Flags & MD_APPENDFILES))
2851     {
2852         if (ir->etc == etcBERENDSEN)
2853         {
2854             please_cite(fplog, "Berendsen84a");
2855         }
2856         if (ir->etc == etcVRESCALE)
2857         {
2858             please_cite(fplog, "Bussi2007a");
2859         }
2860         if (ir->eI == eiSD1)
2861         {
2862             please_cite(fplog, "Goga2012");
2863         }
2864     }
2865
2866     init_nrnb(nrnb);
2867
2868     if (nfile != -1)
2869     {
2870         *outf = init_mdoutf(fplog, nfile, fnm, Flags, cr, ir, mtop, oenv, wcycle);
2871
2872         *mdebin = init_mdebin((Flags & MD_APPENDFILES) ? NULL : mdoutf_get_fp_ene(*outf),
2873                               mtop, ir, mdoutf_get_fp_dhdl(*outf));
2874     }
2875
2876     if (ir->bAdress)
2877     {
2878         please_cite(fplog, "Fritsch12");
2879         please_cite(fplog, "Junghans10");
2880     }
2881     /* Initiate variables */
2882     clear_mat(force_vir);
2883     clear_mat(shake_vir);
2884     clear_rvec(mu_tot);
2885
2886     debug_gmx();
2887 }