Add missing mass term to deltaH
[alexxy/gromacs.git] / src / gromacs / mdlib / force.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #include "gmxpre.h"
38
39 #include "force.h"
40
41 #include "config.h"
42
43 #include <assert.h>
44 #include <string.h>
45
46 #include <cmath>
47
48 #include "gromacs/domdec/domdec.h"
49 #include "gromacs/domdec/domdec_struct.h"
50 #include "gromacs/ewald/ewald.h"
51 #include "gromacs/ewald/long-range-correction.h"
52 #include "gromacs/ewald/pme.h"
53 #include "gromacs/gmxlib/network.h"
54 #include "gromacs/gmxlib/nrnb.h"
55 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
56 #include "gromacs/listed-forces/listed-forces.h"
57 #include "gromacs/math/vec.h"
58 #include "gromacs/math/vecdump.h"
59 #include "gromacs/mdlib/forcerec-threading.h"
60 #include "gromacs/mdlib/genborn.h"
61 #include "gromacs/mdlib/mdrun.h"
62 #include "gromacs/mdlib/ns.h"
63 #include "gromacs/mdlib/qmmm.h"
64 #include "gromacs/mdtypes/commrec.h"
65 #include "gromacs/mdtypes/forceoutput.h"
66 #include "gromacs/mdtypes/inputrec.h"
67 #include "gromacs/mdtypes/md_enums.h"
68 #include "gromacs/pbcutil/ishift.h"
69 #include "gromacs/pbcutil/mshift.h"
70 #include "gromacs/pbcutil/pbc.h"
71 #include "gromacs/timing/wallcycle.h"
72 #include "gromacs/utility/cstringutil.h"
73 #include "gromacs/utility/exceptions.h"
74 #include "gromacs/utility/fatalerror.h"
75 #include "gromacs/utility/smalloc.h"
76
77 void ns(FILE              *fp,
78         t_forcerec        *fr,
79         matrix             box,
80         gmx_groups_t      *groups,
81         gmx_localtop_t    *top,
82         t_mdatoms         *md,
83         t_commrec         *cr,
84         t_nrnb            *nrnb,
85         gmx_bool           bFillGrid)
86 {
87     int     nsearch;
88
89
90     if (!fr->ns->nblist_initialized)
91     {
92         init_neighbor_list(fp, fr, md->homenr);
93     }
94
95     nsearch = search_neighbours(fp, fr, box, top, groups, cr, nrnb, md,
96                                 bFillGrid);
97     if (debug)
98     {
99         fprintf(debug, "nsearch = %d\n", nsearch);
100     }
101
102     /* Check whether we have to do dynamic load balancing */
103     /*if ((nsb->nstDlb > 0) && (mod(step,nsb->nstDlb) == 0))
104        count_nb(cr,nsb,&(top->blocks[ebCGS]),nns,fr->nlr,
105        &(top->idef),opts->ngener);
106      */
107     if (fr->ns->dump_nl > 0)
108     {
109         dump_nblist(fp, cr, fr, fr->ns->dump_nl);
110     }
111 }
112
113 static void clearEwaldThreadOutput(ewald_corr_thread_t *ewc_t)
114 {
115     ewc_t->Vcorr_q        = 0;
116     ewc_t->Vcorr_lj       = 0;
117     ewc_t->dvdl[efptCOUL] = 0;
118     ewc_t->dvdl[efptVDW]  = 0;
119     clear_mat(ewc_t->vir_q);
120     clear_mat(ewc_t->vir_lj);
121 }
122
123 static void reduceEwaldThreadOuput(int nthreads, ewald_corr_thread_t *ewc_t)
124 {
125     ewald_corr_thread_t &dest = ewc_t[0];
126
127     for (int t = 1; t < nthreads; t++)
128     {
129         dest.Vcorr_q        += ewc_t[t].Vcorr_q;
130         dest.Vcorr_lj       += ewc_t[t].Vcorr_lj;
131         dest.dvdl[efptCOUL] += ewc_t[t].dvdl[efptCOUL];
132         dest.dvdl[efptVDW]  += ewc_t[t].dvdl[efptVDW];
133         m_add(dest.vir_q,  ewc_t[t].vir_q,  dest.vir_q);
134         m_add(dest.vir_lj, ewc_t[t].vir_lj, dest.vir_lj);
135     }
136 }
137
138 void do_force_lowlevel(t_forcerec *fr,      t_inputrec *ir,
139                        t_idef     *idef,    t_commrec  *cr,
140                        t_nrnb     *nrnb,    gmx_wallcycle_t wcycle,
141                        t_mdatoms  *md,
142                        rvec       x[],      history_t  *hist,
143                        rvec      *forceForUseWithShiftForces,
144                        gmx::ForceWithVirial *forceWithVirial,
145                        gmx_enerdata_t *enerd,
146                        t_fcdata   *fcd,
147                        gmx_localtop_t *top,
148                        gmx_genborn_t *born,
149                        gmx_bool       bBornRadii,
150                        matrix     box,
151                        t_lambda   *fepvals,
152                        real       *lambda,
153                        t_graph    *graph,
154                        t_blocka   *excl,
155                        rvec       mu_tot[],
156                        int        flags,
157                        float     *cycles_pme)
158 {
159     int         i, j;
160     int         donb_flags;
161     int         pme_flags;
162     t_pbc       pbc;
163     real        dvdl_dum[efptNR], dvdl_nb[efptNR];
164
165 #if GMX_MPI
166     double  t0 = 0.0, t1, t2, t3; /* time measurement for coarse load balancing */
167 #endif
168
169     set_pbc(&pbc, fr->ePBC, box);
170
171     /* reset free energy components */
172     for (i = 0; i < efptNR; i++)
173     {
174         dvdl_nb[i]  = 0;
175         dvdl_dum[i] = 0;
176     }
177
178     /* do QMMM first if requested */
179     if (fr->bQMMM)
180     {
181         enerd->term[F_EQM] = calculate_QMMM(cr, forceForUseWithShiftForces, fr);
182     }
183
184     /* Call the short range functions all in one go. */
185
186 #if GMX_MPI
187     /*#define TAKETIME ((cr->npmenodes) && (fr->timesteps < 12))*/
188 #define TAKETIME FALSE
189     if (TAKETIME)
190     {
191         MPI_Barrier(cr->mpi_comm_mygroup);
192         t0 = MPI_Wtime();
193     }
194 #endif
195
196     if (ir->nwall)
197     {
198         /* foreign lambda component for walls */
199         real dvdl_walls = do_walls(ir, fr, box, md, x, forceForUseWithShiftForces, lambda[efptVDW],
200                                    enerd->grpp.ener[egLJSR], nrnb);
201         enerd->dvdl_lin[efptVDW] += dvdl_walls;
202     }
203
204     /* If doing GB, reset dvda and calculate the Born radii */
205     if (ir->implicit_solvent)
206     {
207         wallcycle_sub_start(wcycle, ewcsNONBONDED);
208
209         for (i = 0; i < born->nr; i++)
210         {
211             fr->dvda[i] = 0;
212         }
213
214         if (bBornRadii)
215         {
216             calc_gb_rad(cr, fr, ir, top, x, fr->gblist, born, md, nrnb);
217         }
218
219         wallcycle_sub_stop(wcycle, ewcsNONBONDED);
220     }
221
222     where();
223     /* We only do non-bonded calculation with group scheme here, the verlet
224      * calls are done from do_force_cutsVERLET(). */
225     if (fr->cutoff_scheme == ecutsGROUP && (flags & GMX_FORCE_NONBONDED))
226     {
227         donb_flags = 0;
228         /* Add short-range interactions */
229         donb_flags |= GMX_NONBONDED_DO_SR;
230
231         /* Currently all group scheme kernels always calculate (shift-)forces */
232         if (flags & GMX_FORCE_FORCES)
233         {
234             donb_flags |= GMX_NONBONDED_DO_FORCE;
235         }
236         if (flags & GMX_FORCE_VIRIAL)
237         {
238             donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
239         }
240         if (flags & GMX_FORCE_ENERGY)
241         {
242             donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
243         }
244
245         wallcycle_sub_start(wcycle, ewcsNONBONDED);
246         do_nonbonded(fr, x, forceForUseWithShiftForces, md, excl,
247                      &enerd->grpp, nrnb,
248                      lambda, dvdl_nb, -1, -1, donb_flags);
249
250         /* If we do foreign lambda and we have soft-core interactions
251          * we have to recalculate the (non-linear) energies contributions.
252          */
253         if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
254         {
255             for (i = 0; i < enerd->n_lambda; i++)
256             {
257                 real lam_i[efptNR];
258
259                 for (j = 0; j < efptNR; j++)
260                 {
261                     lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
262                 }
263                 reset_foreign_enerdata(enerd);
264                 do_nonbonded(fr, x, forceForUseWithShiftForces, md, excl,
265                              &(enerd->foreign_grpp), nrnb,
266                              lam_i, dvdl_dum, -1, -1,
267                              (donb_flags & ~GMX_NONBONDED_DO_FORCE) | GMX_NONBONDED_DO_FOREIGNLAMBDA);
268                 sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
269                 enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
270             }
271         }
272         wallcycle_sub_stop(wcycle, ewcsNONBONDED);
273         where();
274     }
275
276     /* If we are doing GB, calculate bonded forces and apply corrections
277      * to the solvation forces */
278     /* MRS: Eventually, many need to include free energy contribution here! */
279     if (ir->implicit_solvent)
280     {
281         wallcycle_sub_start(wcycle, ewcsLISTED);
282         calc_gb_forces(cr, md, born, top, x, forceForUseWithShiftForces, fr, idef,
283                        ir->gb_algorithm, ir->sa_algorithm, nrnb, &pbc, graph, enerd);
284         wallcycle_sub_stop(wcycle, ewcsLISTED);
285     }
286
287 #if GMX_MPI
288     if (TAKETIME)
289     {
290         t1          = MPI_Wtime();
291         fr->t_fnbf += t1-t0;
292     }
293 #endif
294
295     if (fepvals->sc_alpha != 0)
296     {
297         enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
298     }
299     else
300     {
301         enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
302     }
303
304     if (fepvals->sc_alpha != 0)
305
306     /* even though coulomb part is linear, we already added it, beacuse we
307        need to go through the vdw calculation anyway */
308     {
309         enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
310     }
311     else
312     {
313         enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
314     }
315
316     if (debug)
317     {
318         pr_rvecs(debug, 0, "fshift after SR", fr->fshift, SHIFTS);
319     }
320
321     /* Shift the coordinates. Must be done before listed forces and PPPM,
322      * but is also necessary for SHAKE and update, therefore it can NOT
323      * go when no listed forces have to be evaluated.
324      *
325      * The shifting and PBC code is deliberately not timed, since with
326      * the Verlet scheme it only takes non-zero time with triclinic
327      * boxes, and even then the time is around a factor of 100 less
328      * than the next smallest counter.
329      */
330
331
332     /* Here sometimes we would not need to shift with NBFonly,
333      * but we do so anyhow for consistency of the returned coordinates.
334      */
335     if (graph)
336     {
337         shift_self(graph, box, x);
338         if (TRICLINIC(box))
339         {
340             inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
341         }
342         else
343         {
344             inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
345         }
346     }
347     /* Check whether we need to do listed interactions or correct for exclusions */
348     if (fr->bMolPBC &&
349         ((flags & GMX_FORCE_LISTED)
350          || EEL_RF(fr->ic->eeltype) || EEL_FULL(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype)))
351     {
352         /* TODO There are no electrostatics methods that require this
353            transformation, when using the Verlet scheme, so update the
354            above conditional. */
355         /* Since all atoms are in the rectangular or triclinic unit-cell,
356          * only single box vector shifts (2 in x) are required.
357          */
358         set_pbc_dd(&pbc, fr->ePBC, DOMAINDECOMP(cr) ? cr->dd->nc : nullptr,
359                    TRUE, box);
360     }
361
362     do_force_listed(wcycle, box, ir->fepvals, cr,
363                     idef, (const rvec *) x, hist,
364                     forceForUseWithShiftForces, forceWithVirial,
365                     fr, &pbc, graph, enerd, nrnb, lambda, md, fcd,
366                     DOMAINDECOMP(cr) ? cr->dd->gatindex : nullptr,
367                     flags);
368
369     where();
370
371     *cycles_pme = 0;
372
373     /* Do long-range electrostatics and/or LJ-PME, including related short-range
374      * corrections.
375      */
376     if (EEL_FULL(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
377     {
378         int  status            = 0;
379         real Vlr_q             = 0, Vlr_lj = 0;
380
381         /* We reduce all virial, dV/dlambda and energy contributions, except
382          * for the reciprocal energies (Vlr_q, Vlr_lj) into the same struct.
383          */
384         ewald_corr_thread_t &ewaldOutput = fr->ewc_t[0];
385         clearEwaldThreadOutput(&ewaldOutput);
386
387         if (EEL_PME_EWALD(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
388         {
389             /* With the Verlet scheme exclusion forces are calculated
390              * in the non-bonded kernel.
391              */
392             /* The TPI molecule does not have exclusions with the rest
393              * of the system and no intra-molecular PME grid
394              * contributions will be calculated in
395              * gmx_pme_calc_energy.
396              */
397             if ((ir->cutoff_scheme == ecutsGROUP && fr->n_tpi == 0) ||
398                 ir->ewald_geometry != eewg3D ||
399                 ir->epsilon_surface != 0)
400             {
401                 int nthreads, t;
402
403                 wallcycle_sub_start(wcycle, ewcsEWALD_CORRECTION);
404
405                 if (fr->n_tpi > 0)
406                 {
407                     gmx_fatal(FARGS, "TPI with PME currently only works in a 3D geometry with tin-foil boundary conditions");
408                 }
409
410                 nthreads = fr->nthread_ewc;
411 #pragma omp parallel for num_threads(nthreads) schedule(static)
412                 for (t = 0; t < nthreads; t++)
413                 {
414                     try
415                     {
416                         ewald_corr_thread_t &ewc_t = fr->ewc_t[t];
417                         if (t > 0)
418                         {
419                             clearEwaldThreadOutput(&ewc_t);
420                         }
421
422                         /* Threading is only supported with the Verlet cut-off
423                          * scheme and then only single particle forces (no
424                          * exclusion forces) are calculated, so we can store
425                          * the forces in the normal, single forceWithVirial->force_ array.
426                          */
427                         ewald_LRcorrection(md->homenr, cr, nthreads, t, fr, ir,
428                                            md->chargeA, md->chargeB,
429                                            md->sqrt_c6A, md->sqrt_c6B,
430                                            md->sigmaA, md->sigmaB,
431                                            md->sigma3A, md->sigma3B,
432                                            md->nChargePerturbed || md->nTypePerturbed,
433                                            ir->cutoff_scheme != ecutsVERLET,
434                                            excl, x, box, mu_tot,
435                                            ir->ewald_geometry,
436                                            ir->epsilon_surface,
437                                            as_rvec_array(forceWithVirial->force_.data()),
438                                            ewc_t.vir_q, ewc_t.vir_lj,
439                                            &ewc_t.Vcorr_q, &ewc_t.Vcorr_lj,
440                                            lambda[efptCOUL], lambda[efptVDW],
441                                            &ewc_t.dvdl[efptCOUL], &ewc_t.dvdl[efptVDW]);
442                     }
443                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
444                 }
445                 if (nthreads > 1)
446                 {
447                     reduceEwaldThreadOuput(nthreads, fr->ewc_t);
448                 }
449                 wallcycle_sub_stop(wcycle, ewcsEWALD_CORRECTION);
450             }
451
452             if (EEL_PME_EWALD(fr->ic->eeltype) && fr->n_tpi == 0)
453             {
454                 /* This is not in a subcounter because it takes a
455                    negligible and constant-sized amount of time */
456                 ewaldOutput.Vcorr_q +=
457                     ewald_charge_correction(cr, fr, lambda[efptCOUL], box,
458                                             &ewaldOutput.dvdl[efptCOUL],
459                                             ewaldOutput.vir_q);
460             }
461
462             if ((EEL_PME(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype)) &&
463                 thisRankHasDuty(cr, DUTY_PME) && (pme_run_mode(fr->pmedata) == PmeRunMode::CPU))
464             {
465                 /* Do reciprocal PME for Coulomb and/or LJ. */
466                 assert(fr->n_tpi >= 0);
467                 if (fr->n_tpi == 0 || (flags & GMX_FORCE_STATECHANGED))
468                 {
469                     pme_flags = GMX_PME_SPREAD | GMX_PME_SOLVE;
470
471                     if (flags & GMX_FORCE_FORCES)
472                     {
473                         pme_flags |= GMX_PME_CALC_F;
474                     }
475                     if (flags & GMX_FORCE_VIRIAL)
476                     {
477                         pme_flags |= GMX_PME_CALC_ENER_VIR;
478                     }
479                     if (fr->n_tpi > 0)
480                     {
481                         /* We don't calculate f, but we do want the potential */
482                         pme_flags |= GMX_PME_CALC_POT;
483                     }
484
485                     /* With domain decomposition we close the CPU side load
486                      * balancing region here, because PME does global
487                      * communication that acts as a global barrier.
488                      */
489                     if (DOMAINDECOMP(cr))
490                     {
491                         ddCloseBalanceRegionCpu(cr->dd);
492                     }
493
494                     wallcycle_start(wcycle, ewcPMEMESH);
495                     status = gmx_pme_do(fr->pmedata,
496                                         0, md->homenr - fr->n_tpi,
497                                         x,
498                                         as_rvec_array(forceWithVirial->force_.data()),
499                                         md->chargeA, md->chargeB,
500                                         md->sqrt_c6A, md->sqrt_c6B,
501                                         md->sigmaA, md->sigmaB,
502                                         box, cr,
503                                         DOMAINDECOMP(cr) ? dd_pme_maxshift_x(cr->dd) : 0,
504                                         DOMAINDECOMP(cr) ? dd_pme_maxshift_y(cr->dd) : 0,
505                                         nrnb, wcycle,
506                                         ewaldOutput.vir_q, ewaldOutput.vir_lj,
507                                         &Vlr_q, &Vlr_lj,
508                                         lambda[efptCOUL], lambda[efptVDW],
509                                         &ewaldOutput.dvdl[efptCOUL],
510                                         &ewaldOutput.dvdl[efptVDW],
511                                         pme_flags);
512                     *cycles_pme = wallcycle_stop(wcycle, ewcPMEMESH);
513                     if (status != 0)
514                     {
515                         gmx_fatal(FARGS, "Error %d in reciprocal PME routine", status);
516                     }
517
518                     /* We should try to do as little computation after
519                      * this as possible, because parallel PME synchronizes
520                      * the nodes, so we want all load imbalance of the
521                      * rest of the force calculation to be before the PME
522                      * call.  DD load balancing is done on the whole time
523                      * of the force call (without PME).
524                      */
525                 }
526                 if (fr->n_tpi > 0)
527                 {
528                     if (EVDW_PME(ir->vdwtype))
529                     {
530
531                         gmx_fatal(FARGS, "Test particle insertion not implemented with LJ-PME");
532                     }
533                     /* Determine the PME grid energy of the test molecule
534                      * with the PME grid potential of the other charges.
535                      */
536                     gmx_pme_calc_energy(fr->pmedata, fr->n_tpi,
537                                         x + md->homenr - fr->n_tpi,
538                                         md->chargeA + md->homenr - fr->n_tpi,
539                                         &Vlr_q);
540                 }
541             }
542         }
543
544         if (!EEL_PME(fr->ic->eeltype) && EEL_PME_EWALD(fr->ic->eeltype))
545         {
546             Vlr_q = do_ewald(ir, x, as_rvec_array(forceWithVirial->force_.data()),
547                              md->chargeA, md->chargeB,
548                              box, cr, md->homenr,
549                              ewaldOutput.vir_q, fr->ic->ewaldcoeff_q,
550                              lambda[efptCOUL], &ewaldOutput.dvdl[efptCOUL],
551                              fr->ewald_table);
552         }
553
554         /* Note that with separate PME nodes we get the real energies later */
555         forceWithVirial->addVirialContribution(ewaldOutput.vir_q);
556         forceWithVirial->addVirialContribution(ewaldOutput.vir_lj);
557         enerd->dvdl_lin[efptCOUL] += ewaldOutput.dvdl[efptCOUL];
558         enerd->dvdl_lin[efptVDW]  += ewaldOutput.dvdl[efptVDW];
559         enerd->term[F_COUL_RECIP]  = Vlr_q + ewaldOutput.Vcorr_q;
560         enerd->term[F_LJ_RECIP]    = Vlr_lj + ewaldOutput.Vcorr_lj;
561
562         if (debug)
563         {
564             fprintf(debug, "Vlr_q = %g, Vcorr_q = %g, Vlr_corr_q = %g\n",
565                     Vlr_q, ewaldOutput.Vcorr_q, enerd->term[F_COUL_RECIP]);
566             pr_rvecs(debug, 0, "vir_el_recip after corr", ewaldOutput.vir_q, DIM);
567             pr_rvecs(debug, 0, "fshift after LR Corrections", fr->fshift, SHIFTS);
568             fprintf(debug, "Vlr_lj: %g, Vcorr_lj = %g, Vlr_corr_lj = %g\n",
569                     Vlr_lj, ewaldOutput.Vcorr_lj, enerd->term[F_LJ_RECIP]);
570             pr_rvecs(debug, 0, "vir_lj_recip after corr", ewaldOutput.vir_lj, DIM);
571         }
572     }
573     else
574     {
575         /* Is there a reaction-field exclusion correction needed?
576          * With the Verlet scheme, exclusion forces are calculated
577          * in the non-bonded kernel.
578          */
579         if (ir->cutoff_scheme != ecutsVERLET && EEL_RF(fr->ic->eeltype))
580         {
581             real dvdl_rf_excl      = 0;
582             enerd->term[F_RF_EXCL] =
583                 RF_excl_correction(fr, graph, md, excl, x, forceForUseWithShiftForces,
584                                    fr->fshift, &pbc, lambda[efptCOUL], &dvdl_rf_excl);
585
586             enerd->dvdl_lin[efptCOUL] += dvdl_rf_excl;
587         }
588     }
589     where();
590
591     if (debug)
592     {
593         print_nrnb(debug, nrnb);
594     }
595
596 #if GMX_MPI
597     if (TAKETIME)
598     {
599         t2 = MPI_Wtime();
600         MPI_Barrier(cr->mpi_comm_mygroup);
601         t3          = MPI_Wtime();
602         fr->t_wait += t3-t2;
603         if (fr->timesteps == 11)
604         {
605             char buf[22];
606             fprintf(stderr, "* PP load balancing info: rank %d, step %s, rel wait time=%3.0f%% , load string value: %7.2f\n",
607                     cr->nodeid, gmx_step_str(fr->timesteps, buf),
608                     100*fr->t_wait/(fr->t_wait+fr->t_fnbf),
609                     (fr->t_fnbf+fr->t_wait)/fr->t_fnbf);
610         }
611         fr->timesteps++;
612     }
613 #endif
614
615     if (debug)
616     {
617         pr_rvecs(debug, 0, "fshift after bondeds", fr->fshift, SHIFTS);
618     }
619
620 }
621
622 void init_enerdata(int ngener, int n_lambda, gmx_enerdata_t *enerd)
623 {
624     int i, n2;
625
626     for (i = 0; i < F_NRE; i++)
627     {
628         enerd->term[i]         = 0;
629         enerd->foreign_term[i] = 0;
630     }
631
632
633     for (i = 0; i < efptNR; i++)
634     {
635         enerd->dvdl_lin[i]     = 0;
636         enerd->dvdl_nonlin[i]  = 0;
637     }
638
639     n2 = ngener*ngener;
640     if (debug)
641     {
642         fprintf(debug, "Creating %d sized group matrix for energies\n", n2);
643     }
644     enerd->grpp.nener         = n2;
645     enerd->foreign_grpp.nener = n2;
646     for (i = 0; (i < egNR); i++)
647     {
648         snew(enerd->grpp.ener[i], n2);
649         snew(enerd->foreign_grpp.ener[i], n2);
650     }
651
652     if (n_lambda)
653     {
654         enerd->n_lambda = 1 + n_lambda;
655         snew(enerd->enerpart_lambda, enerd->n_lambda);
656     }
657     else
658     {
659         enerd->n_lambda = 0;
660     }
661 }
662
663 void destroy_enerdata(gmx_enerdata_t *enerd)
664 {
665     int i;
666
667     for (i = 0; (i < egNR); i++)
668     {
669         sfree(enerd->grpp.ener[i]);
670     }
671
672     for (i = 0; (i < egNR); i++)
673     {
674         sfree(enerd->foreign_grpp.ener[i]);
675     }
676
677     if (enerd->n_lambda)
678     {
679         sfree(enerd->enerpart_lambda);
680     }
681 }
682
683 static real sum_v(int n, real v[])
684 {
685     real t;
686     int  i;
687
688     t = 0.0;
689     for (i = 0; (i < n); i++)
690     {
691         t = t + v[i];
692     }
693
694     return t;
695 }
696
697 void sum_epot(gmx_grppairener_t *grpp, real *epot)
698 {
699     int i;
700
701     /* Accumulate energies */
702     epot[F_COUL_SR]  = sum_v(grpp->nener, grpp->ener[egCOULSR]);
703     epot[F_LJ]       = sum_v(grpp->nener, grpp->ener[egLJSR]);
704     epot[F_LJ14]     = sum_v(grpp->nener, grpp->ener[egLJ14]);
705     epot[F_COUL14]   = sum_v(grpp->nener, grpp->ener[egCOUL14]);
706     /* We have already added 1-2,1-3, and 1-4 terms to F_GBPOL */
707     epot[F_GBPOL]   += sum_v(grpp->nener, grpp->ener[egGB]);
708
709 /* lattice part of LR doesnt belong to any group
710  * and has been added earlier
711  */
712     epot[F_BHAM]     = sum_v(grpp->nener, grpp->ener[egBHAMSR]);
713
714     epot[F_EPOT] = 0;
715     for (i = 0; (i < F_EPOT); i++)
716     {
717         if (i != F_DISRESVIOL && i != F_ORIRESDEV)
718         {
719             epot[F_EPOT] += epot[i];
720         }
721     }
722 }
723
724 void sum_dhdl(gmx_enerdata_t *enerd, gmx::ArrayRef<const real> lambda, t_lambda *fepvals)
725 {
726     int    index;
727
728     enerd->dvdl_lin[efptVDW] += enerd->term[F_DVDL_VDW];  /* include dispersion correction */
729     enerd->term[F_DVDL]       = 0.0;
730     for (int i = 0; i < efptNR; i++)
731     {
732         if (fepvals->separate_dvdl[i])
733         {
734             /* could this be done more readably/compactly? */
735             switch (i)
736             {
737                 case (efptMASS):
738                     index = F_DKDL;
739                     break;
740                 case (efptCOUL):
741                     index = F_DVDL_COUL;
742                     break;
743                 case (efptVDW):
744                     index = F_DVDL_VDW;
745                     break;
746                 case (efptBONDED):
747                     index = F_DVDL_BONDED;
748                     break;
749                 case (efptRESTRAINT):
750                     index = F_DVDL_RESTRAINT;
751                     break;
752                 default:
753                     index = F_DVDL;
754                     break;
755             }
756             enerd->term[index] = enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
757             if (debug)
758             {
759                 fprintf(debug, "dvdl-%s[%2d]: %f: non-linear %f + linear %f\n",
760                         efpt_names[i], i, enerd->term[index], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
761             }
762         }
763         else
764         {
765             enerd->term[F_DVDL] += enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
766             if (debug)
767             {
768                 fprintf(debug, "dvd-%sl[%2d]: %f: non-linear %f + linear %f\n",
769                         efpt_names[0], i, enerd->term[F_DVDL], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
770             }
771         }
772     }
773
774     if (fepvals->separate_dvdl[efptBONDED])
775     {
776         enerd->term[F_DVDL_BONDED] += enerd->term[F_DVDL_CONSTR];
777     }
778     else
779     {
780         enerd->term[F_DVDL] += enerd->term[F_DVDL_CONSTR];
781     }
782
783     for (int i = 0; i < fepvals->n_lambda; i++)
784     {
785         /* note we are iterating over fepvals here!
786            For the current lam, dlam = 0 automatically,
787            so we don't need to add anything to the
788            enerd->enerpart_lambda[0] */
789
790         /* we don't need to worry about dvdl_lin contributions to dE at
791            current lambda, because the contributions to the current
792            lambda are automatically zeroed */
793
794         double &enerpart_lambda = enerd->enerpart_lambda[i + 1];
795
796         for (size_t j = 0; j < lambda.size(); j++)
797         {
798             /* Note that this loop is over all dhdl components, not just the separated ones */
799             const double dlam  = fepvals->all_lambda[j][i] - lambda[j];
800
801             enerpart_lambda   += dlam*enerd->dvdl_lin[j];
802
803             /* Constraints can not be evaluated at foreign lambdas, so we add
804              * a linear extrapolation. This is an approximation, but usually
805              * quite accurate since constraints change little between lambdas.
806              */
807             if ((j == efptBONDED && fepvals->separate_dvdl[efptBONDED]) ||
808                 (j == efptFEP && !fepvals->separate_dvdl[efptBONDED]))
809             {
810                 enerpart_lambda += dlam*enerd->term[F_DVDL_CONSTR];
811             }
812
813             if (j == efptMASS)
814             {
815                 enerpart_lambda += dlam*enerd->term[F_DKDL];
816             }
817
818             if (debug)
819             {
820                 fprintf(debug, "enerdiff lam %g: (%15s), non-linear %f linear %f*%f\n",
821                         fepvals->all_lambda[j][i], efpt_names[j],
822                         enerpart_lambda - enerd->enerpart_lambda[0],
823                         dlam, enerd->dvdl_lin[j]);
824             }
825         }
826     }
827
828     /* The constrain contribution is now included in other terms, so clear it */
829     enerd->term[F_DVDL_CONSTR] = 0;
830 }
831
832
833 void reset_foreign_enerdata(gmx_enerdata_t *enerd)
834 {
835     int  i, j;
836
837     /* First reset all foreign energy components.  Foreign energies always called on
838        neighbor search steps */
839     for (i = 0; (i < egNR); i++)
840     {
841         for (j = 0; (j < enerd->grpp.nener); j++)
842         {
843             enerd->foreign_grpp.ener[i][j] = 0.0;
844         }
845     }
846
847     /* potential energy components */
848     for (i = 0; (i <= F_EPOT); i++)
849     {
850         enerd->foreign_term[i] = 0.0;
851     }
852 }
853
854 void reset_enerdata(gmx_enerdata_t *enerd)
855 {
856     int      i, j;
857
858     /* First reset all energy components. */
859     for (i = 0; (i < egNR); i++)
860     {
861         for (j = 0; (j < enerd->grpp.nener); j++)
862         {
863             enerd->grpp.ener[i][j] = 0.0;
864         }
865     }
866     for (i = 0; i < efptNR; i++)
867     {
868         enerd->dvdl_lin[i]    = 0.0;
869         enerd->dvdl_nonlin[i] = 0.0;
870     }
871
872     /* Normal potential energy components */
873     for (i = 0; (i <= F_EPOT); i++)
874     {
875         enerd->term[i] = 0.0;
876     }
877     enerd->term[F_DVDL]            = 0.0;
878     enerd->term[F_DVDL_COUL]       = 0.0;
879     enerd->term[F_DVDL_VDW]        = 0.0;
880     enerd->term[F_DVDL_BONDED]     = 0.0;
881     enerd->term[F_DVDL_RESTRAINT]  = 0.0;
882     enerd->term[F_DKDL]            = 0.0;
883     if (enerd->n_lambda > 0)
884     {
885         for (i = 0; i < enerd->n_lambda; i++)
886         {
887             enerd->enerpart_lambda[i] = 0.0;
888         }
889     }
890     /* reset foreign energy data - separate function since we also call it elsewhere */
891     reset_foreign_enerdata(enerd);
892 }