Merge branch release-5-1 into release-2016
[alexxy/gromacs.git] / src / gromacs / mdlib / force.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #include "gmxpre.h"
38
39 #include "force.h"
40
41 #include "config.h"
42
43 #include <assert.h>
44 #include <math.h>
45 #include <string.h>
46
47 #include "gromacs/domdec/domdec.h"
48 #include "gromacs/domdec/domdec_struct.h"
49 #include "gromacs/ewald/ewald.h"
50 #include "gromacs/ewald/long-range-correction.h"
51 #include "gromacs/ewald/pme.h"
52 #include "gromacs/gmxlib/network.h"
53 #include "gromacs/gmxlib/nrnb.h"
54 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
55 #include "gromacs/listed-forces/listed-forces.h"
56 #include "gromacs/math/vec.h"
57 #include "gromacs/math/vecdump.h"
58 #include "gromacs/mdlib/forcerec-threading.h"
59 #include "gromacs/mdlib/genborn.h"
60 #include "gromacs/mdlib/mdrun.h"
61 #include "gromacs/mdlib/ns.h"
62 #include "gromacs/mdlib/qmmm.h"
63 #include "gromacs/mdtypes/commrec.h"
64 #include "gromacs/mdtypes/inputrec.h"
65 #include "gromacs/mdtypes/md_enums.h"
66 #include "gromacs/pbcutil/ishift.h"
67 #include "gromacs/pbcutil/mshift.h"
68 #include "gromacs/pbcutil/pbc.h"
69 #include "gromacs/timing/wallcycle.h"
70 #include "gromacs/utility/cstringutil.h"
71 #include "gromacs/utility/exceptions.h"
72 #include "gromacs/utility/fatalerror.h"
73 #include "gromacs/utility/smalloc.h"
74
75 void ns(FILE              *fp,
76         t_forcerec        *fr,
77         matrix             box,
78         gmx_groups_t      *groups,
79         gmx_localtop_t    *top,
80         t_mdatoms         *md,
81         t_commrec         *cr,
82         t_nrnb            *nrnb,
83         gmx_bool           bFillGrid)
84 {
85     int     nsearch;
86
87
88     if (!fr->ns->nblist_initialized)
89     {
90         init_neighbor_list(fp, fr, md->homenr);
91     }
92
93     nsearch = search_neighbours(fp, fr, box, top, groups, cr, nrnb, md,
94                                 bFillGrid);
95     if (debug)
96     {
97         fprintf(debug, "nsearch = %d\n", nsearch);
98     }
99
100     /* Check whether we have to do dynamic load balancing */
101     /*if ((nsb->nstDlb > 0) && (mod(step,nsb->nstDlb) == 0))
102        count_nb(cr,nsb,&(top->blocks[ebCGS]),nns,fr->nlr,
103        &(top->idef),opts->ngener);
104      */
105     if (fr->ns->dump_nl > 0)
106     {
107         dump_nblist(fp, cr, fr, fr->ns->dump_nl);
108     }
109 }
110
111 static void reduce_thread_energies(tensor vir_q, tensor vir_lj,
112                                    real *Vcorr_q, real *Vcorr_lj,
113                                    real *dvdl_q, real *dvdl_lj,
114                                    int nthreads,
115                                    ewald_corr_thread_t *ewc_t)
116 {
117     int t;
118
119     for (t = 1; t < nthreads; t++)
120     {
121         *Vcorr_q  += ewc_t[t].Vcorr_q;
122         *Vcorr_lj += ewc_t[t].Vcorr_lj;
123         *dvdl_q   += ewc_t[t].dvdl[efptCOUL];
124         *dvdl_lj  += ewc_t[t].dvdl[efptVDW];
125         m_add(vir_q, ewc_t[t].vir_q, vir_q);
126         m_add(vir_lj, ewc_t[t].vir_lj, vir_lj);
127     }
128 }
129
130 void do_force_lowlevel(t_forcerec *fr,      t_inputrec *ir,
131                        t_idef     *idef,    t_commrec  *cr,
132                        t_nrnb     *nrnb,    gmx_wallcycle_t wcycle,
133                        t_mdatoms  *md,
134                        rvec       x[],      history_t  *hist,
135                        rvec       f[],
136                        gmx_enerdata_t *enerd,
137                        t_fcdata   *fcd,
138                        gmx_localtop_t *top,
139                        gmx_genborn_t *born,
140                        gmx_bool       bBornRadii,
141                        matrix     box,
142                        t_lambda   *fepvals,
143                        real       *lambda,
144                        t_graph    *graph,
145                        t_blocka   *excl,
146                        rvec       mu_tot[],
147                        int        flags,
148                        float      *cycles_pme)
149 {
150     int         i, j;
151     int         donb_flags;
152     gmx_bool    bSB;
153     int         pme_flags;
154     matrix      boxs;
155     rvec        box_size;
156     t_pbc       pbc;
157     real        dvdl_dum[efptNR], dvdl_nb[efptNR];
158
159 #if GMX_MPI
160     double  t0 = 0.0, t1, t2, t3; /* time measurement for coarse load balancing */
161 #endif
162
163     set_pbc(&pbc, fr->ePBC, box);
164
165     /* reset free energy components */
166     for (i = 0; i < efptNR; i++)
167     {
168         dvdl_nb[i]  = 0;
169         dvdl_dum[i] = 0;
170     }
171
172     /* Reset box */
173     for (i = 0; (i < DIM); i++)
174     {
175         box_size[i] = box[i][i];
176     }
177
178     /* do QMMM first if requested */
179     if (fr->bQMMM)
180     {
181         enerd->term[F_EQM] = calculate_QMMM(cr, x, f, fr);
182     }
183
184     /* Call the short range functions all in one go. */
185
186 #if GMX_MPI
187     /*#define TAKETIME ((cr->npmenodes) && (fr->timesteps < 12))*/
188 #define TAKETIME FALSE
189     if (TAKETIME)
190     {
191         MPI_Barrier(cr->mpi_comm_mygroup);
192         t0 = MPI_Wtime();
193     }
194 #endif
195
196     if (ir->nwall)
197     {
198         /* foreign lambda component for walls */
199         real dvdl_walls = do_walls(ir, fr, box, md, x, f, lambda[efptVDW],
200                                    enerd->grpp.ener[egLJSR], nrnb);
201         enerd->dvdl_lin[efptVDW] += dvdl_walls;
202     }
203
204     /* If doing GB, reset dvda and calculate the Born radii */
205     if (ir->implicit_solvent)
206     {
207         wallcycle_sub_start(wcycle, ewcsNONBONDED);
208
209         for (i = 0; i < born->nr; i++)
210         {
211             fr->dvda[i] = 0;
212         }
213
214         if (bBornRadii)
215         {
216             calc_gb_rad(cr, fr, ir, top, x, fr->gblist, born, md, nrnb);
217         }
218
219         wallcycle_sub_stop(wcycle, ewcsNONBONDED);
220     }
221
222     where();
223     /* We only do non-bonded calculation with group scheme here, the verlet
224      * calls are done from do_force_cutsVERLET(). */
225     if (fr->cutoff_scheme == ecutsGROUP && (flags & GMX_FORCE_NONBONDED))
226     {
227         donb_flags = 0;
228         /* Add short-range interactions */
229         donb_flags |= GMX_NONBONDED_DO_SR;
230
231         /* Currently all group scheme kernels always calculate (shift-)forces */
232         if (flags & GMX_FORCE_FORCES)
233         {
234             donb_flags |= GMX_NONBONDED_DO_FORCE;
235         }
236         if (flags & GMX_FORCE_VIRIAL)
237         {
238             donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
239         }
240         if (flags & GMX_FORCE_ENERGY)
241         {
242             donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
243         }
244
245         wallcycle_sub_start(wcycle, ewcsNONBONDED);
246         do_nonbonded(fr, x, f, md, excl,
247                      &enerd->grpp, nrnb,
248                      lambda, dvdl_nb, -1, -1, donb_flags);
249
250         /* If we do foreign lambda and we have soft-core interactions
251          * we have to recalculate the (non-linear) energies contributions.
252          */
253         if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
254         {
255             for (i = 0; i < enerd->n_lambda; i++)
256             {
257                 real lam_i[efptNR];
258
259                 for (j = 0; j < efptNR; j++)
260                 {
261                     lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
262                 }
263                 reset_foreign_enerdata(enerd);
264                 do_nonbonded(fr, x, f, md, excl,
265                              &(enerd->foreign_grpp), nrnb,
266                              lam_i, dvdl_dum, -1, -1,
267                              (donb_flags & ~GMX_NONBONDED_DO_FORCE) | GMX_NONBONDED_DO_FOREIGNLAMBDA);
268                 sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
269                 enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
270             }
271         }
272         wallcycle_sub_stop(wcycle, ewcsNONBONDED);
273         where();
274     }
275
276     /* If we are doing GB, calculate bonded forces and apply corrections
277      * to the solvation forces */
278     /* MRS: Eventually, many need to include free energy contribution here! */
279     if (ir->implicit_solvent)
280     {
281         wallcycle_sub_start(wcycle, ewcsLISTED);
282         calc_gb_forces(cr, md, born, top, x, f, fr, idef,
283                        ir->gb_algorithm, ir->sa_algorithm, nrnb, &pbc, graph, enerd);
284         wallcycle_sub_stop(wcycle, ewcsLISTED);
285     }
286
287 #if GMX_MPI
288     if (TAKETIME)
289     {
290         t1          = MPI_Wtime();
291         fr->t_fnbf += t1-t0;
292     }
293 #endif
294
295     if (fepvals->sc_alpha != 0)
296     {
297         enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
298     }
299     else
300     {
301         enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
302     }
303
304     if (fepvals->sc_alpha != 0)
305
306     /* even though coulomb part is linear, we already added it, beacuse we
307        need to go through the vdw calculation anyway */
308     {
309         enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
310     }
311     else
312     {
313         enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
314     }
315
316     if (debug)
317     {
318         pr_rvecs(debug, 0, "fshift after SR", fr->fshift, SHIFTS);
319     }
320
321     /* Shift the coordinates. Must be done before listed forces and PPPM,
322      * but is also necessary for SHAKE and update, therefore it can NOT
323      * go when no listed forces have to be evaluated.
324      *
325      * The shifting and PBC code is deliberately not timed, since with
326      * the Verlet scheme it only takes non-zero time with triclinic
327      * boxes, and even then the time is around a factor of 100 less
328      * than the next smallest counter.
329      */
330
331
332     /* Here sometimes we would not need to shift with NBFonly,
333      * but we do so anyhow for consistency of the returned coordinates.
334      */
335     if (graph)
336     {
337         shift_self(graph, box, x);
338         if (TRICLINIC(box))
339         {
340             inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
341         }
342         else
343         {
344             inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
345         }
346     }
347     /* Check whether we need to do listed interactions or correct for exclusions */
348     if (fr->bMolPBC &&
349         ((flags & GMX_FORCE_LISTED)
350          || EEL_RF(fr->eeltype) || EEL_FULL(fr->eeltype) || EVDW_PME(fr->vdwtype)))
351     {
352         /* TODO There are no electrostatics methods that require this
353            transformation, when using the Verlet scheme, so update the
354            above conditional. */
355         /* Since all atoms are in the rectangular or triclinic unit-cell,
356          * only single box vector shifts (2 in x) are required.
357          */
358         set_pbc_dd(&pbc, fr->ePBC, DOMAINDECOMP(cr) ? cr->dd->nc : nullptr,
359                    TRUE, box);
360     }
361
362     do_force_listed(wcycle, box, ir->fepvals, cr->ms,
363                     idef, (const rvec *) x, hist, f, fr,
364                     &pbc, graph, enerd, nrnb, lambda, md, fcd,
365                     DOMAINDECOMP(cr) ? cr->dd->gatindex : NULL,
366                     flags);
367
368     where();
369
370     *cycles_pme = 0;
371     clear_mat(fr->vir_el_recip);
372     clear_mat(fr->vir_lj_recip);
373
374     /* Do long-range electrostatics and/or LJ-PME, including related short-range
375      * corrections.
376      */
377     if (EEL_FULL(fr->eeltype) || EVDW_PME(fr->vdwtype))
378     {
379         int  status            = 0;
380         real Vlr_q             = 0, Vlr_lj = 0, Vcorr_q = 0, Vcorr_lj = 0;
381         real dvdl_long_range_q = 0, dvdl_long_range_lj = 0;
382
383         bSB = (ir->nwall == 2);
384         if (bSB)
385         {
386             copy_mat(box, boxs);
387             svmul(ir->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
388             box_size[ZZ] *= ir->wall_ewald_zfac;
389         }
390
391         if (EEL_PME_EWALD(fr->eeltype) || EVDW_PME(fr->vdwtype))
392         {
393             real dvdl_long_range_correction_q   = 0;
394             real dvdl_long_range_correction_lj  = 0;
395             /* With the Verlet scheme exclusion forces are calculated
396              * in the non-bonded kernel.
397              */
398             /* The TPI molecule does not have exclusions with the rest
399              * of the system and no intra-molecular PME grid
400              * contributions will be calculated in
401              * gmx_pme_calc_energy.
402              */
403             if ((ir->cutoff_scheme == ecutsGROUP && fr->n_tpi == 0) ||
404                 ir->ewald_geometry != eewg3D ||
405                 ir->epsilon_surface != 0)
406             {
407                 int nthreads, t;
408
409                 wallcycle_sub_start(wcycle, ewcsEWALD_CORRECTION);
410
411                 if (fr->n_tpi > 0)
412                 {
413                     gmx_fatal(FARGS, "TPI with PME currently only works in a 3D geometry with tin-foil boundary conditions");
414                 }
415
416                 nthreads = fr->nthread_ewc;
417 #pragma omp parallel for num_threads(nthreads) schedule(static)
418                 for (t = 0; t < nthreads; t++)
419                 {
420                     try
421                     {
422                         tensor *vir_q, *vir_lj;
423                         real   *Vcorrt_q, *Vcorrt_lj, *dvdlt_q, *dvdlt_lj;
424                         if (t == 0)
425                         {
426                             vir_q     = &fr->vir_el_recip;
427                             vir_lj    = &fr->vir_lj_recip;
428                             Vcorrt_q  = &Vcorr_q;
429                             Vcorrt_lj = &Vcorr_lj;
430                             dvdlt_q   = &dvdl_long_range_correction_q;
431                             dvdlt_lj  = &dvdl_long_range_correction_lj;
432                         }
433                         else
434                         {
435                             vir_q     = &fr->ewc_t[t].vir_q;
436                             vir_lj    = &fr->ewc_t[t].vir_lj;
437                             Vcorrt_q  = &fr->ewc_t[t].Vcorr_q;
438                             Vcorrt_lj = &fr->ewc_t[t].Vcorr_lj;
439                             dvdlt_q   = &fr->ewc_t[t].dvdl[efptCOUL];
440                             dvdlt_lj  = &fr->ewc_t[t].dvdl[efptVDW];
441                             clear_mat(*vir_q);
442                             clear_mat(*vir_lj);
443                         }
444                         *dvdlt_q  = 0;
445                         *dvdlt_lj = 0;
446
447                         /* Threading is only supported with the Verlet cut-off
448                          * scheme and then only single particle forces (no
449                          * exclusion forces) are calculated, so we can store
450                          * the forces in the normal, single fr->f_novirsum array.
451                          */
452                         ewald_LRcorrection(md->homenr, cr, nthreads, t, fr,
453                                            md->chargeA, md->chargeB,
454                                            md->sqrt_c6A, md->sqrt_c6B,
455                                            md->sigmaA, md->sigmaB,
456                                            md->sigma3A, md->sigma3B,
457                                            md->nChargePerturbed || md->nTypePerturbed,
458                                            ir->cutoff_scheme != ecutsVERLET,
459                                            excl, x, bSB ? boxs : box, mu_tot,
460                                            ir->ewald_geometry,
461                                            ir->epsilon_surface,
462                                            fr->f_novirsum, *vir_q, *vir_lj,
463                                            Vcorrt_q, Vcorrt_lj,
464                                            lambda[efptCOUL], lambda[efptVDW],
465                                            dvdlt_q, dvdlt_lj);
466                     }
467                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
468                 }
469                 if (nthreads > 1)
470                 {
471                     reduce_thread_energies(fr->vir_el_recip, fr->vir_lj_recip,
472                                            &Vcorr_q, &Vcorr_lj,
473                                            &dvdl_long_range_correction_q,
474                                            &dvdl_long_range_correction_lj,
475                                            nthreads, fr->ewc_t);
476                 }
477                 wallcycle_sub_stop(wcycle, ewcsEWALD_CORRECTION);
478             }
479
480             if (EEL_PME_EWALD(fr->eeltype) && fr->n_tpi == 0)
481             {
482                 /* This is not in a subcounter because it takes a
483                    negligible and constant-sized amount of time */
484                 Vcorr_q += ewald_charge_correction(cr, fr, lambda[efptCOUL], box,
485                                                    &dvdl_long_range_correction_q,
486                                                    fr->vir_el_recip);
487             }
488
489             enerd->dvdl_lin[efptCOUL] += dvdl_long_range_correction_q;
490             enerd->dvdl_lin[efptVDW]  += dvdl_long_range_correction_lj;
491
492             if ((EEL_PME(fr->eeltype) || EVDW_PME(fr->vdwtype)) && (cr->duty & DUTY_PME))
493             {
494                 /* Do reciprocal PME for Coulomb and/or LJ. */
495                 assert(fr->n_tpi >= 0);
496                 if (fr->n_tpi == 0 || (flags & GMX_FORCE_STATECHANGED))
497                 {
498                     pme_flags = GMX_PME_SPREAD | GMX_PME_SOLVE;
499                     if (EEL_PME(fr->eeltype))
500                     {
501                         pme_flags     |= GMX_PME_DO_COULOMB;
502                     }
503                     if (EVDW_PME(fr->vdwtype))
504                     {
505                         pme_flags |= GMX_PME_DO_LJ;
506                     }
507                     if (flags & GMX_FORCE_FORCES)
508                     {
509                         pme_flags |= GMX_PME_CALC_F;
510                     }
511                     if (flags & GMX_FORCE_VIRIAL)
512                     {
513                         pme_flags |= GMX_PME_CALC_ENER_VIR;
514                     }
515                     if (fr->n_tpi > 0)
516                     {
517                         /* We don't calculate f, but we do want the potential */
518                         pme_flags |= GMX_PME_CALC_POT;
519                     }
520                     wallcycle_start(wcycle, ewcPMEMESH);
521                     status = gmx_pme_do(fr->pmedata,
522                                         0, md->homenr - fr->n_tpi,
523                                         x, fr->f_novirsum,
524                                         md->chargeA, md->chargeB,
525                                         md->sqrt_c6A, md->sqrt_c6B,
526                                         md->sigmaA, md->sigmaB,
527                                         bSB ? boxs : box, cr,
528                                         DOMAINDECOMP(cr) ? dd_pme_maxshift_x(cr->dd) : 0,
529                                         DOMAINDECOMP(cr) ? dd_pme_maxshift_y(cr->dd) : 0,
530                                         nrnb, wcycle,
531                                         fr->vir_el_recip, fr->ewaldcoeff_q,
532                                         fr->vir_lj_recip, fr->ewaldcoeff_lj,
533                                         &Vlr_q, &Vlr_lj,
534                                         lambda[efptCOUL], lambda[efptVDW],
535                                         &dvdl_long_range_q, &dvdl_long_range_lj, pme_flags);
536                     *cycles_pme = wallcycle_stop(wcycle, ewcPMEMESH);
537                     if (status != 0)
538                     {
539                         gmx_fatal(FARGS, "Error %d in reciprocal PME routine", status);
540                     }
541                     /* We should try to do as little computation after
542                      * this as possible, because parallel PME synchronizes
543                      * the nodes, so we want all load imbalance of the
544                      * rest of the force calculation to be before the PME
545                      * call.  DD load balancing is done on the whole time
546                      * of the force call (without PME).
547                      */
548                 }
549                 if (fr->n_tpi > 0)
550                 {
551                     if (EVDW_PME(ir->vdwtype))
552                     {
553
554                         gmx_fatal(FARGS, "Test particle insertion not implemented with LJ-PME");
555                     }
556                     /* Determine the PME grid energy of the test molecule
557                      * with the PME grid potential of the other charges.
558                      */
559                     gmx_pme_calc_energy(fr->pmedata, fr->n_tpi,
560                                         x + md->homenr - fr->n_tpi,
561                                         md->chargeA + md->homenr - fr->n_tpi,
562                                         &Vlr_q);
563                 }
564             }
565         }
566
567         if (!EEL_PME(fr->eeltype) && EEL_PME_EWALD(fr->eeltype))
568         {
569             Vlr_q = do_ewald(ir, x, fr->f_novirsum,
570                              md->chargeA, md->chargeB,
571                              box_size, cr, md->homenr,
572                              fr->vir_el_recip, fr->ewaldcoeff_q,
573                              lambda[efptCOUL], &dvdl_long_range_q, fr->ewald_table);
574         }
575
576         /* Note that with separate PME nodes we get the real energies later */
577         enerd->dvdl_lin[efptCOUL] += dvdl_long_range_q;
578         enerd->dvdl_lin[efptVDW]  += dvdl_long_range_lj;
579         enerd->term[F_COUL_RECIP]  = Vlr_q + Vcorr_q;
580         enerd->term[F_LJ_RECIP]    = Vlr_lj + Vcorr_lj;
581         if (debug)
582         {
583             fprintf(debug, "Vlr_q = %g, Vcorr_q = %g, Vlr_corr_q = %g\n",
584                     Vlr_q, Vcorr_q, enerd->term[F_COUL_RECIP]);
585             pr_rvecs(debug, 0, "vir_el_recip after corr", fr->vir_el_recip, DIM);
586             pr_rvecs(debug, 0, "fshift after LR Corrections", fr->fshift, SHIFTS);
587             fprintf(debug, "Vlr_lj: %g, Vcorr_lj = %g, Vlr_corr_lj = %g\n",
588                     Vlr_lj, Vcorr_lj, enerd->term[F_LJ_RECIP]);
589             pr_rvecs(debug, 0, "vir_lj_recip after corr", fr->vir_lj_recip, DIM);
590         }
591     }
592     else
593     {
594         /* Is there a reaction-field exclusion correction needed?
595          * With the Verlet scheme, exclusion forces are calculated
596          * in the non-bonded kernel.
597          */
598         if (ir->cutoff_scheme != ecutsVERLET && EEL_RF(fr->eeltype))
599         {
600             real dvdl_rf_excl      = 0;
601             enerd->term[F_RF_EXCL] =
602                 RF_excl_correction(fr, graph, md, excl, x, f,
603                                    fr->fshift, &pbc, lambda[efptCOUL], &dvdl_rf_excl);
604
605             enerd->dvdl_lin[efptCOUL] += dvdl_rf_excl;
606         }
607     }
608     where();
609
610     if (debug)
611     {
612         print_nrnb(debug, nrnb);
613     }
614
615 #if GMX_MPI
616     if (TAKETIME)
617     {
618         t2 = MPI_Wtime();
619         MPI_Barrier(cr->mpi_comm_mygroup);
620         t3          = MPI_Wtime();
621         fr->t_wait += t3-t2;
622         if (fr->timesteps == 11)
623         {
624             char buf[22];
625             fprintf(stderr, "* PP load balancing info: rank %d, step %s, rel wait time=%3.0f%% , load string value: %7.2f\n",
626                     cr->nodeid, gmx_step_str(fr->timesteps, buf),
627                     100*fr->t_wait/(fr->t_wait+fr->t_fnbf),
628                     (fr->t_fnbf+fr->t_wait)/fr->t_fnbf);
629         }
630         fr->timesteps++;
631     }
632 #endif
633
634     if (debug)
635     {
636         pr_rvecs(debug, 0, "fshift after bondeds", fr->fshift, SHIFTS);
637     }
638
639 }
640
641 void init_enerdata(int ngener, int n_lambda, gmx_enerdata_t *enerd)
642 {
643     int i, n2;
644
645     for (i = 0; i < F_NRE; i++)
646     {
647         enerd->term[i]         = 0;
648         enerd->foreign_term[i] = 0;
649     }
650
651
652     for (i = 0; i < efptNR; i++)
653     {
654         enerd->dvdl_lin[i]     = 0;
655         enerd->dvdl_nonlin[i]  = 0;
656     }
657
658     n2 = ngener*ngener;
659     if (debug)
660     {
661         fprintf(debug, "Creating %d sized group matrix for energies\n", n2);
662     }
663     enerd->grpp.nener         = n2;
664     enerd->foreign_grpp.nener = n2;
665     for (i = 0; (i < egNR); i++)
666     {
667         snew(enerd->grpp.ener[i], n2);
668         snew(enerd->foreign_grpp.ener[i], n2);
669     }
670
671     if (n_lambda)
672     {
673         enerd->n_lambda = 1 + n_lambda;
674         snew(enerd->enerpart_lambda, enerd->n_lambda);
675     }
676     else
677     {
678         enerd->n_lambda = 0;
679     }
680 }
681
682 void destroy_enerdata(gmx_enerdata_t *enerd)
683 {
684     int i;
685
686     for (i = 0; (i < egNR); i++)
687     {
688         sfree(enerd->grpp.ener[i]);
689     }
690
691     for (i = 0; (i < egNR); i++)
692     {
693         sfree(enerd->foreign_grpp.ener[i]);
694     }
695
696     if (enerd->n_lambda)
697     {
698         sfree(enerd->enerpart_lambda);
699     }
700 }
701
702 static real sum_v(int n, real v[])
703 {
704     real t;
705     int  i;
706
707     t = 0.0;
708     for (i = 0; (i < n); i++)
709     {
710         t = t + v[i];
711     }
712
713     return t;
714 }
715
716 void sum_epot(gmx_grppairener_t *grpp, real *epot)
717 {
718     int i;
719
720     /* Accumulate energies */
721     epot[F_COUL_SR]  = sum_v(grpp->nener, grpp->ener[egCOULSR]);
722     epot[F_LJ]       = sum_v(grpp->nener, grpp->ener[egLJSR]);
723     epot[F_LJ14]     = sum_v(grpp->nener, grpp->ener[egLJ14]);
724     epot[F_COUL14]   = sum_v(grpp->nener, grpp->ener[egCOUL14]);
725     /* We have already added 1-2,1-3, and 1-4 terms to F_GBPOL */
726     epot[F_GBPOL]   += sum_v(grpp->nener, grpp->ener[egGB]);
727
728 /* lattice part of LR doesnt belong to any group
729  * and has been added earlier
730  */
731     epot[F_BHAM]     = sum_v(grpp->nener, grpp->ener[egBHAMSR]);
732
733     epot[F_EPOT] = 0;
734     for (i = 0; (i < F_EPOT); i++)
735     {
736         if (i != F_DISRESVIOL && i != F_ORIRESDEV)
737         {
738             epot[F_EPOT] += epot[i];
739         }
740     }
741 }
742
743 void sum_dhdl(gmx_enerdata_t *enerd, real *lambda, t_lambda *fepvals)
744 {
745     int    i, j, index;
746     double dlam;
747
748     enerd->dvdl_lin[efptVDW] += enerd->term[F_DVDL_VDW];  /* include dispersion correction */
749     enerd->term[F_DVDL]       = 0.0;
750     for (i = 0; i < efptNR; i++)
751     {
752         if (fepvals->separate_dvdl[i])
753         {
754             /* could this be done more readably/compactly? */
755             switch (i)
756             {
757                 case (efptMASS):
758                     index = F_DKDL;
759                     break;
760                 case (efptCOUL):
761                     index = F_DVDL_COUL;
762                     break;
763                 case (efptVDW):
764                     index = F_DVDL_VDW;
765                     break;
766                 case (efptBONDED):
767                     index = F_DVDL_BONDED;
768                     break;
769                 case (efptRESTRAINT):
770                     index = F_DVDL_RESTRAINT;
771                     break;
772                 default:
773                     index = F_DVDL;
774                     break;
775             }
776             enerd->term[index] = enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
777             if (debug)
778             {
779                 fprintf(debug, "dvdl-%s[%2d]: %f: non-linear %f + linear %f\n",
780                         efpt_names[i], i, enerd->term[index], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
781             }
782         }
783         else
784         {
785             enerd->term[F_DVDL] += enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
786             if (debug)
787             {
788                 fprintf(debug, "dvd-%sl[%2d]: %f: non-linear %f + linear %f\n",
789                         efpt_names[0], i, enerd->term[F_DVDL], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
790             }
791         }
792     }
793
794     /* Notes on the foreign lambda free energy difference evaluation:
795      * Adding the potential and ekin terms that depend linearly on lambda
796      * as delta lam * dvdl to the energy differences is exact.
797      * For the constraints this is not exact, but we have no other option
798      * without literally changing the lengths and reevaluating the energies at each step.
799      * (try to remedy this post 4.6 - MRS)
800      */
801     if (fepvals->separate_dvdl[efptBONDED])
802     {
803         enerd->term[F_DVDL_BONDED] += enerd->term[F_DVDL_CONSTR];
804     }
805     else
806     {
807         enerd->term[F_DVDL] += enerd->term[F_DVDL_CONSTR];
808     }
809     enerd->term[F_DVDL_CONSTR] = 0;
810
811     for (i = 0; i < fepvals->n_lambda; i++)
812     {
813         /* note we are iterating over fepvals here!
814            For the current lam, dlam = 0 automatically,
815            so we don't need to add anything to the
816            enerd->enerpart_lambda[0] */
817
818         /* we don't need to worry about dvdl_lin contributions to dE at
819            current lambda, because the contributions to the current
820            lambda are automatically zeroed */
821
822         for (j = 0; j < efptNR; j++)
823         {
824             /* Note that this loop is over all dhdl components, not just the separated ones */
825             dlam = (fepvals->all_lambda[j][i]-lambda[j]);
826             enerd->enerpart_lambda[i+1] += dlam*enerd->dvdl_lin[j];
827             if (debug)
828             {
829                 fprintf(debug, "enerdiff lam %g: (%15s), non-linear %f linear %f*%f\n",
830                         fepvals->all_lambda[j][i], efpt_names[j],
831                         (enerd->enerpart_lambda[i+1] - enerd->enerpart_lambda[0]),
832                         dlam, enerd->dvdl_lin[j]);
833             }
834         }
835     }
836 }
837
838
839 void reset_foreign_enerdata(gmx_enerdata_t *enerd)
840 {
841     int  i, j;
842
843     /* First reset all foreign energy components.  Foreign energies always called on
844        neighbor search steps */
845     for (i = 0; (i < egNR); i++)
846     {
847         for (j = 0; (j < enerd->grpp.nener); j++)
848         {
849             enerd->foreign_grpp.ener[i][j] = 0.0;
850         }
851     }
852
853     /* potential energy components */
854     for (i = 0; (i <= F_EPOT); i++)
855     {
856         enerd->foreign_term[i] = 0.0;
857     }
858 }
859
860 void reset_enerdata(gmx_enerdata_t *enerd)
861 {
862     int      i, j;
863
864     /* First reset all energy components. */
865     for (i = 0; (i < egNR); i++)
866     {
867         for (j = 0; (j < enerd->grpp.nener); j++)
868         {
869             enerd->grpp.ener[i][j] = 0.0;
870         }
871     }
872     for (i = 0; i < efptNR; i++)
873     {
874         enerd->dvdl_lin[i]    = 0.0;
875         enerd->dvdl_nonlin[i] = 0.0;
876     }
877
878     /* Normal potential energy components */
879     for (i = 0; (i <= F_EPOT); i++)
880     {
881         enerd->term[i] = 0.0;
882     }
883     enerd->term[F_DVDL]            = 0.0;
884     enerd->term[F_DVDL_COUL]       = 0.0;
885     enerd->term[F_DVDL_VDW]        = 0.0;
886     enerd->term[F_DVDL_BONDED]     = 0.0;
887     enerd->term[F_DVDL_RESTRAINT]  = 0.0;
888     enerd->term[F_DKDL]            = 0.0;
889     if (enerd->n_lambda > 0)
890     {
891         for (i = 0; i < enerd->n_lambda; i++)
892         {
893             enerd->enerpart_lambda[i] = 0.0;
894         }
895     }
896     /* reset foreign energy data - separate function since we also call it elsewhere */
897     reset_foreign_enerdata(enerd);
898 }