Move endian, float format and XDR settings to cmakedefine01
[alexxy/gromacs.git] / src / gromacs / mdlib / force.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #include "gmxpre.h"
38
39 #include "gromacs/legacyheaders/force.h"
40
41 #include "config.h"
42
43 #include <assert.h>
44 #include <math.h>
45 #include <string.h>
46
47 #include "gromacs/domdec/domdec.h"
48 #include "gromacs/ewald/ewald.h"
49 #include "gromacs/ewald/long-range-correction.h"
50 #include "gromacs/ewald/pme.h"
51 #include "gromacs/legacyheaders/gmx_omp_nthreads.h"
52 #include "gromacs/legacyheaders/macros.h"
53 #include "gromacs/legacyheaders/mdrun.h"
54 #include "gromacs/legacyheaders/names.h"
55 #include "gromacs/legacyheaders/network.h"
56 #include "gromacs/legacyheaders/nonbonded.h"
57 #include "gromacs/legacyheaders/nrnb.h"
58 #include "gromacs/legacyheaders/ns.h"
59 #include "gromacs/legacyheaders/qmmm.h"
60 #include "gromacs/legacyheaders/txtdump.h"
61 #include "gromacs/legacyheaders/typedefs.h"
62 #include "gromacs/legacyheaders/types/commrec.h"
63 #include "gromacs/listed-forces/listed-forces.h"
64 #include "gromacs/math/vec.h"
65 #include "gromacs/mdlib/forcerec-threading.h"
66 #include "gromacs/pbcutil/ishift.h"
67 #include "gromacs/pbcutil/mshift.h"
68 #include "gromacs/pbcutil/pbc.h"
69 #include "gromacs/timing/wallcycle.h"
70 #include "gromacs/utility/cstringutil.h"
71 #include "gromacs/utility/fatalerror.h"
72 #include "gromacs/utility/smalloc.h"
73
74 void ns(FILE              *fp,
75         t_forcerec        *fr,
76         matrix             box,
77         gmx_groups_t      *groups,
78         gmx_localtop_t    *top,
79         t_mdatoms         *md,
80         t_commrec         *cr,
81         t_nrnb            *nrnb,
82         gmx_bool           bFillGrid,
83         gmx_bool           bDoLongRangeNS)
84 {
85     int     nsearch;
86
87
88     if (!fr->ns.nblist_initialized)
89     {
90         init_neighbor_list(fp, fr, md->homenr);
91     }
92
93     if (fr->bTwinRange)
94     {
95         fr->nlr = 0;
96     }
97
98     nsearch = search_neighbours(fp, fr, box, top, groups, cr, nrnb, md,
99                                 bFillGrid, bDoLongRangeNS);
100     if (debug)
101     {
102         fprintf(debug, "nsearch = %d\n", nsearch);
103     }
104
105     /* Check whether we have to do dynamic load balancing */
106     /*if ((nsb->nstDlb > 0) && (mod(step,nsb->nstDlb) == 0))
107        count_nb(cr,nsb,&(top->blocks[ebCGS]),nns,fr->nlr,
108        &(top->idef),opts->ngener);
109      */
110     if (fr->ns.dump_nl > 0)
111     {
112         dump_nblist(fp, cr, fr, fr->ns.dump_nl);
113     }
114 }
115
116 static void reduce_thread_forces(int n, rvec *f,
117                                  tensor vir_q, tensor vir_lj,
118                                  real *Vcorr_q, real *Vcorr_lj,
119                                  real *dvdl_q, real *dvdl_lj,
120                                  int nthreads, f_thread_t *f_t)
121 {
122     int t, i;
123     int nthreads_loop gmx_unused;
124
125     // cppcheck-suppress unreadVariable
126     nthreads_loop = gmx_omp_nthreads_get(emntBonded);
127     /* This reduction can run over any number of threads */
128 #pragma omp parallel for num_threads(nthreads_loop) private(t) schedule(static)
129     for (i = 0; i < n; i++)
130     {
131         for (t = 1; t < nthreads; t++)
132         {
133             rvec_inc(f[i], f_t[t].f[i]);
134         }
135     }
136     for (t = 1; t < nthreads; t++)
137     {
138         *Vcorr_q  += f_t[t].Vcorr_q;
139         *Vcorr_lj += f_t[t].Vcorr_lj;
140         *dvdl_q   += f_t[t].dvdl[efptCOUL];
141         *dvdl_lj  += f_t[t].dvdl[efptVDW];
142         m_add(vir_q, f_t[t].vir_q, vir_q);
143         m_add(vir_lj, f_t[t].vir_lj, vir_lj);
144     }
145 }
146
147 void do_force_lowlevel(t_forcerec *fr,      t_inputrec *ir,
148                        t_idef     *idef,    t_commrec  *cr,
149                        t_nrnb     *nrnb,    gmx_wallcycle_t wcycle,
150                        t_mdatoms  *md,
151                        rvec       x[],      history_t  *hist,
152                        rvec       f[],
153                        rvec       f_longrange[],
154                        gmx_enerdata_t *enerd,
155                        t_fcdata   *fcd,
156                        gmx_localtop_t *top,
157                        gmx_genborn_t *born,
158                        gmx_bool       bBornRadii,
159                        matrix     box,
160                        t_lambda   *fepvals,
161                        real       *lambda,
162                        t_graph    *graph,
163                        t_blocka   *excl,
164                        rvec       mu_tot[],
165                        int        flags,
166                        float      *cycles_pme)
167 {
168     int         i, j;
169     int         donb_flags;
170     gmx_bool    bSB;
171     int         pme_flags;
172     matrix      boxs;
173     rvec        box_size;
174     t_pbc       pbc;
175     real        dvdl_dum[efptNR], dvdl_nb[efptNR];
176
177 #ifdef GMX_MPI
178     double  t0 = 0.0, t1, t2, t3; /* time measurement for coarse load balancing */
179 #endif
180
181     set_pbc(&pbc, fr->ePBC, box);
182
183     /* reset free energy components */
184     for (i = 0; i < efptNR; i++)
185     {
186         dvdl_nb[i]  = 0;
187         dvdl_dum[i] = 0;
188     }
189
190     /* Reset box */
191     for (i = 0; (i < DIM); i++)
192     {
193         box_size[i] = box[i][i];
194     }
195
196     debug_gmx();
197
198     /* do QMMM first if requested */
199     if (fr->bQMMM)
200     {
201         enerd->term[F_EQM] = calculate_QMMM(cr, x, f, fr);
202     }
203
204     /* Call the short range functions all in one go. */
205
206 #ifdef GMX_MPI
207     /*#define TAKETIME ((cr->npmenodes) && (fr->timesteps < 12))*/
208 #define TAKETIME FALSE
209     if (TAKETIME)
210     {
211         MPI_Barrier(cr->mpi_comm_mygroup);
212         t0 = MPI_Wtime();
213     }
214 #endif
215
216     if (ir->nwall)
217     {
218         /* foreign lambda component for walls */
219         real dvdl_walls = do_walls(ir, fr, box, md, x, f, lambda[efptVDW],
220                                    enerd->grpp.ener[egLJSR], nrnb);
221         enerd->dvdl_lin[efptVDW] += dvdl_walls;
222     }
223
224     /* If doing GB, reset dvda and calculate the Born radii */
225     if (ir->implicit_solvent)
226     {
227         wallcycle_sub_start(wcycle, ewcsNONBONDED);
228
229         for (i = 0; i < born->nr; i++)
230         {
231             fr->dvda[i] = 0;
232         }
233
234         if (bBornRadii)
235         {
236             calc_gb_rad(cr, fr, ir, top, x, &(fr->gblist), born, md, nrnb);
237         }
238
239         wallcycle_sub_stop(wcycle, ewcsNONBONDED);
240     }
241
242     where();
243     /* We only do non-bonded calculation with group scheme here, the verlet
244      * calls are done from do_force_cutsVERLET(). */
245     if (fr->cutoff_scheme == ecutsGROUP && (flags & GMX_FORCE_NONBONDED))
246     {
247         donb_flags = 0;
248         /* Add short-range interactions */
249         donb_flags |= GMX_NONBONDED_DO_SR;
250
251         /* Currently all group scheme kernels always calculate (shift-)forces */
252         if (flags & GMX_FORCE_FORCES)
253         {
254             donb_flags |= GMX_NONBONDED_DO_FORCE;
255         }
256         if (flags & GMX_FORCE_VIRIAL)
257         {
258             donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
259         }
260         if (flags & GMX_FORCE_ENERGY)
261         {
262             donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
263         }
264         if (flags & GMX_FORCE_DO_LR)
265         {
266             donb_flags |= GMX_NONBONDED_DO_LR;
267         }
268
269         wallcycle_sub_start(wcycle, ewcsNONBONDED);
270         do_nonbonded(fr, x, f, f_longrange, md, excl,
271                      &enerd->grpp, nrnb,
272                      lambda, dvdl_nb, -1, -1, donb_flags);
273
274         /* If we do foreign lambda and we have soft-core interactions
275          * we have to recalculate the (non-linear) energies contributions.
276          */
277         if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
278         {
279             for (i = 0; i < enerd->n_lambda; i++)
280             {
281                 real lam_i[efptNR];
282
283                 for (j = 0; j < efptNR; j++)
284                 {
285                     lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
286                 }
287                 reset_foreign_enerdata(enerd);
288                 do_nonbonded(fr, x, f, f_longrange, md, excl,
289                              &(enerd->foreign_grpp), nrnb,
290                              lam_i, dvdl_dum, -1, -1,
291                              (donb_flags & ~GMX_NONBONDED_DO_FORCE) | GMX_NONBONDED_DO_FOREIGNLAMBDA);
292                 sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
293                 enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
294             }
295         }
296         wallcycle_sub_stop(wcycle, ewcsNONBONDED);
297         where();
298     }
299
300     /* If we are doing GB, calculate bonded forces and apply corrections
301      * to the solvation forces */
302     /* MRS: Eventually, many need to include free energy contribution here! */
303     if (ir->implicit_solvent)
304     {
305         wallcycle_sub_start(wcycle, ewcsLISTED);
306         calc_gb_forces(cr, md, born, top, x, f, fr, idef,
307                        ir->gb_algorithm, ir->sa_algorithm, nrnb, &pbc, graph, enerd);
308         wallcycle_sub_stop(wcycle, ewcsLISTED);
309     }
310
311 #ifdef GMX_MPI
312     if (TAKETIME)
313     {
314         t1          = MPI_Wtime();
315         fr->t_fnbf += t1-t0;
316     }
317 #endif
318
319     if (fepvals->sc_alpha != 0)
320     {
321         enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
322     }
323     else
324     {
325         enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
326     }
327
328     if (fepvals->sc_alpha != 0)
329
330     /* even though coulomb part is linear, we already added it, beacuse we
331        need to go through the vdw calculation anyway */
332     {
333         enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
334     }
335     else
336     {
337         enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
338     }
339
340     debug_gmx();
341
342
343     if (debug)
344     {
345         pr_rvecs(debug, 0, "fshift after SR", fr->fshift, SHIFTS);
346     }
347
348     /* Shift the coordinates. Must be done before listed forces and PPPM,
349      * but is also necessary for SHAKE and update, therefore it can NOT
350      * go when no listed forces have to be evaluated.
351      *
352      * The shifting and PBC code is deliberately not timed, since with
353      * the Verlet scheme it only takes non-zero time with triclinic
354      * boxes, and even then the time is around a factor of 100 less
355      * than the next smallest counter.
356      */
357
358
359     /* Here sometimes we would not need to shift with NBFonly,
360      * but we do so anyhow for consistency of the returned coordinates.
361      */
362     if (graph)
363     {
364         shift_self(graph, box, x);
365         if (TRICLINIC(box))
366         {
367             inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
368         }
369         else
370         {
371             inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
372         }
373     }
374     /* Check whether we need to do listed interactions or correct for exclusions */
375     if (fr->bMolPBC &&
376         ((flags & GMX_FORCE_LISTED)
377          || EEL_RF(fr->eeltype) || EEL_FULL(fr->eeltype) || EVDW_PME(fr->vdwtype)))
378     {
379         /* TODO There are no electrostatics methods that require this
380            transformation, when using the Verlet scheme, so update the
381            above conditional. */
382         /* Since all atoms are in the rectangular or triclinic unit-cell,
383          * only single box vector shifts (2 in x) are required.
384          */
385         set_pbc_dd(&pbc, fr->ePBC, cr->dd, TRUE, box);
386     }
387     debug_gmx();
388
389     do_force_listed(wcycle, box, ir->fepvals, cr->ms,
390                     idef, (const rvec *) x, hist, f, fr,
391                     &pbc, graph, enerd, nrnb, lambda, md, fcd,
392                     DOMAINDECOMP(cr) ? cr->dd->gatindex : NULL,
393                     flags);
394
395     where();
396
397     *cycles_pme = 0;
398     clear_mat(fr->vir_el_recip);
399     clear_mat(fr->vir_lj_recip);
400
401     /* Do long-range electrostatics and/or LJ-PME, including related short-range
402      * corrections.
403      */
404     if (EEL_FULL(fr->eeltype) || EVDW_PME(fr->vdwtype))
405     {
406         int  status            = 0;
407         real Vlr_q             = 0, Vlr_lj = 0, Vcorr_q = 0, Vcorr_lj = 0;
408         real dvdl_long_range_q = 0, dvdl_long_range_lj = 0;
409
410         bSB = (ir->nwall == 2);
411         if (bSB)
412         {
413             copy_mat(box, boxs);
414             svmul(ir->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
415             box_size[ZZ] *= ir->wall_ewald_zfac;
416         }
417
418         if (EEL_PME_EWALD(fr->eeltype) || EVDW_PME(fr->vdwtype))
419         {
420             real dvdl_long_range_correction_q   = 0;
421             real dvdl_long_range_correction_lj  = 0;
422             /* With the Verlet scheme exclusion forces are calculated
423              * in the non-bonded kernel.
424              */
425             /* The TPI molecule does not have exclusions with the rest
426              * of the system and no intra-molecular PME grid
427              * contributions will be calculated in
428              * gmx_pme_calc_energy.
429              */
430             if ((ir->cutoff_scheme == ecutsGROUP && fr->n_tpi == 0) ||
431                 ir->ewald_geometry != eewg3D ||
432                 ir->epsilon_surface != 0)
433             {
434                 int nthreads, t;
435
436                 wallcycle_sub_start(wcycle, ewcsEWALD_CORRECTION);
437
438                 if (fr->n_tpi > 0)
439                 {
440                     gmx_fatal(FARGS, "TPI with PME currently only works in a 3D geometry with tin-foil boundary conditions");
441                 }
442
443                 nthreads = gmx_omp_nthreads_get(emntBonded);
444 #pragma omp parallel for num_threads(nthreads) schedule(static)
445                 for (t = 0; t < nthreads; t++)
446                 {
447                     int     i;
448                     rvec   *fnv;
449                     tensor *vir_q, *vir_lj;
450                     real   *Vcorrt_q, *Vcorrt_lj, *dvdlt_q, *dvdlt_lj;
451                     if (t == 0)
452                     {
453                         fnv       = fr->f_novirsum;
454                         vir_q     = &fr->vir_el_recip;
455                         vir_lj    = &fr->vir_lj_recip;
456                         Vcorrt_q  = &Vcorr_q;
457                         Vcorrt_lj = &Vcorr_lj;
458                         dvdlt_q   = &dvdl_long_range_correction_q;
459                         dvdlt_lj  = &dvdl_long_range_correction_lj;
460                     }
461                     else
462                     {
463                         fnv       = fr->f_t[t].f;
464                         vir_q     = &fr->f_t[t].vir_q;
465                         vir_lj    = &fr->f_t[t].vir_lj;
466                         Vcorrt_q  = &fr->f_t[t].Vcorr_q;
467                         Vcorrt_lj = &fr->f_t[t].Vcorr_lj;
468                         dvdlt_q   = &fr->f_t[t].dvdl[efptCOUL];
469                         dvdlt_lj  = &fr->f_t[t].dvdl[efptVDW];
470                         for (i = 0; i < fr->natoms_force; i++)
471                         {
472                             clear_rvec(fnv[i]);
473                         }
474                         clear_mat(*vir_q);
475                         clear_mat(*vir_lj);
476                     }
477                     *dvdlt_q  = 0;
478                     *dvdlt_lj = 0;
479
480                     ewald_LRcorrection(fr->excl_load[t], fr->excl_load[t+1],
481                                        cr, t, fr,
482                                        md->chargeA, md->chargeB,
483                                        md->sqrt_c6A, md->sqrt_c6B,
484                                        md->sigmaA, md->sigmaB,
485                                        md->sigma3A, md->sigma3B,
486                                        md->nChargePerturbed || md->nTypePerturbed,
487                                        ir->cutoff_scheme != ecutsVERLET,
488                                        excl, x, bSB ? boxs : box, mu_tot,
489                                        ir->ewald_geometry,
490                                        ir->epsilon_surface,
491                                        fnv, *vir_q, *vir_lj,
492                                        Vcorrt_q, Vcorrt_lj,
493                                        lambda[efptCOUL], lambda[efptVDW],
494                                        dvdlt_q, dvdlt_lj);
495                 }
496                 if (nthreads > 1)
497                 {
498                     reduce_thread_forces(fr->natoms_force, fr->f_novirsum,
499                                          fr->vir_el_recip, fr->vir_lj_recip,
500                                          &Vcorr_q, &Vcorr_lj,
501                                          &dvdl_long_range_correction_q,
502                                          &dvdl_long_range_correction_lj,
503                                          nthreads, fr->f_t);
504                 }
505                 wallcycle_sub_stop(wcycle, ewcsEWALD_CORRECTION);
506             }
507
508             if (EEL_PME_EWALD(fr->eeltype) && fr->n_tpi == 0)
509             {
510                 /* This is not in a subcounter because it takes a
511                    negligible and constant-sized amount of time */
512                 Vcorr_q += ewald_charge_correction(cr, fr, lambda[efptCOUL], box,
513                                                    &dvdl_long_range_correction_q,
514                                                    fr->vir_el_recip);
515             }
516
517             enerd->dvdl_lin[efptCOUL] += dvdl_long_range_correction_q;
518             enerd->dvdl_lin[efptVDW]  += dvdl_long_range_correction_lj;
519
520             if ((EEL_PME(fr->eeltype) || EVDW_PME(fr->vdwtype)) && (cr->duty & DUTY_PME))
521             {
522                 /* Do reciprocal PME for Coulomb and/or LJ. */
523                 assert(fr->n_tpi >= 0);
524                 if (fr->n_tpi == 0 || (flags & GMX_FORCE_STATECHANGED))
525                 {
526                     pme_flags = GMX_PME_SPREAD | GMX_PME_SOLVE;
527                     if (EEL_PME(fr->eeltype))
528                     {
529                         pme_flags     |= GMX_PME_DO_COULOMB;
530                     }
531                     if (EVDW_PME(fr->vdwtype))
532                     {
533                         pme_flags |= GMX_PME_DO_LJ;
534                     }
535                     if (flags & GMX_FORCE_FORCES)
536                     {
537                         pme_flags |= GMX_PME_CALC_F;
538                     }
539                     if (flags & GMX_FORCE_VIRIAL)
540                     {
541                         pme_flags |= GMX_PME_CALC_ENER_VIR;
542                     }
543                     if (fr->n_tpi > 0)
544                     {
545                         /* We don't calculate f, but we do want the potential */
546                         pme_flags |= GMX_PME_CALC_POT;
547                     }
548                     wallcycle_start(wcycle, ewcPMEMESH);
549                     status = gmx_pme_do(fr->pmedata,
550                                         0, md->homenr - fr->n_tpi,
551                                         x, fr->f_novirsum,
552                                         md->chargeA, md->chargeB,
553                                         md->sqrt_c6A, md->sqrt_c6B,
554                                         md->sigmaA, md->sigmaB,
555                                         bSB ? boxs : box, cr,
556                                         DOMAINDECOMP(cr) ? dd_pme_maxshift_x(cr->dd) : 0,
557                                         DOMAINDECOMP(cr) ? dd_pme_maxshift_y(cr->dd) : 0,
558                                         nrnb, wcycle,
559                                         fr->vir_el_recip, fr->ewaldcoeff_q,
560                                         fr->vir_lj_recip, fr->ewaldcoeff_lj,
561                                         &Vlr_q, &Vlr_lj,
562                                         lambda[efptCOUL], lambda[efptVDW],
563                                         &dvdl_long_range_q, &dvdl_long_range_lj, pme_flags);
564                     *cycles_pme = wallcycle_stop(wcycle, ewcPMEMESH);
565                     if (status != 0)
566                     {
567                         gmx_fatal(FARGS, "Error %d in reciprocal PME routine", status);
568                     }
569                     /* We should try to do as little computation after
570                      * this as possible, because parallel PME synchronizes
571                      * the nodes, so we want all load imbalance of the
572                      * rest of the force calculation to be before the PME
573                      * call.  DD load balancing is done on the whole time
574                      * of the force call (without PME).
575                      */
576                 }
577                 if (fr->n_tpi > 0)
578                 {
579                     if (EVDW_PME(ir->vdwtype))
580                     {
581
582                         gmx_fatal(FARGS, "Test particle insertion not implemented with LJ-PME");
583                     }
584                     /* Determine the PME grid energy of the test molecule
585                      * with the PME grid potential of the other charges.
586                      */
587                     gmx_pme_calc_energy(fr->pmedata, fr->n_tpi,
588                                         x + md->homenr - fr->n_tpi,
589                                         md->chargeA + md->homenr - fr->n_tpi,
590                                         &Vlr_q);
591                 }
592             }
593         }
594
595         if (!EEL_PME(fr->eeltype) && EEL_PME_EWALD(fr->eeltype))
596         {
597             Vlr_q = do_ewald(ir, x, fr->f_novirsum,
598                              md->chargeA, md->chargeB,
599                              box_size, cr, md->homenr,
600                              fr->vir_el_recip, fr->ewaldcoeff_q,
601                              lambda[efptCOUL], &dvdl_long_range_q, fr->ewald_table);
602         }
603
604         /* Note that with separate PME nodes we get the real energies later */
605         enerd->dvdl_lin[efptCOUL] += dvdl_long_range_q;
606         enerd->dvdl_lin[efptVDW]  += dvdl_long_range_lj;
607         enerd->term[F_COUL_RECIP]  = Vlr_q + Vcorr_q;
608         enerd->term[F_LJ_RECIP]    = Vlr_lj + Vcorr_lj;
609         if (debug)
610         {
611             fprintf(debug, "Vlr_q = %g, Vcorr_q = %g, Vlr_corr_q = %g\n",
612                     Vlr_q, Vcorr_q, enerd->term[F_COUL_RECIP]);
613             pr_rvecs(debug, 0, "vir_el_recip after corr", fr->vir_el_recip, DIM);
614             pr_rvecs(debug, 0, "fshift after LR Corrections", fr->fshift, SHIFTS);
615             fprintf(debug, "Vlr_lj: %g, Vcorr_lj = %g, Vlr_corr_lj = %g\n",
616                     Vlr_lj, Vcorr_lj, enerd->term[F_LJ_RECIP]);
617             pr_rvecs(debug, 0, "vir_lj_recip after corr", fr->vir_lj_recip, DIM);
618         }
619     }
620     else
621     {
622         /* Is there a reaction-field exclusion correction needed? */
623         if (EEL_RF(fr->eeltype) && eelRF_NEC != fr->eeltype)
624         {
625             /* With the Verlet scheme, exclusion forces are calculated
626              * in the non-bonded kernel.
627              */
628             if (ir->cutoff_scheme != ecutsVERLET)
629             {
630                 real dvdl_rf_excl      = 0;
631                 enerd->term[F_RF_EXCL] =
632                     RF_excl_correction(fr, graph, md, excl, x, f,
633                                        fr->fshift, &pbc, lambda[efptCOUL], &dvdl_rf_excl);
634
635                 enerd->dvdl_lin[efptCOUL] += dvdl_rf_excl;
636             }
637         }
638     }
639     where();
640     debug_gmx();
641
642     if (debug)
643     {
644         print_nrnb(debug, nrnb);
645     }
646     debug_gmx();
647
648 #ifdef GMX_MPI
649     if (TAKETIME)
650     {
651         t2 = MPI_Wtime();
652         MPI_Barrier(cr->mpi_comm_mygroup);
653         t3          = MPI_Wtime();
654         fr->t_wait += t3-t2;
655         if (fr->timesteps == 11)
656         {
657             char buf[22];
658             fprintf(stderr, "* PP load balancing info: rank %d, step %s, rel wait time=%3.0f%% , load string value: %7.2f\n",
659                     cr->nodeid, gmx_step_str(fr->timesteps, buf),
660                     100*fr->t_wait/(fr->t_wait+fr->t_fnbf),
661                     (fr->t_fnbf+fr->t_wait)/fr->t_fnbf);
662         }
663         fr->timesteps++;
664     }
665 #endif
666
667     if (debug)
668     {
669         pr_rvecs(debug, 0, "fshift after bondeds", fr->fshift, SHIFTS);
670     }
671
672 }
673
674 void init_enerdata(int ngener, int n_lambda, gmx_enerdata_t *enerd)
675 {
676     int i, n2;
677
678     for (i = 0; i < F_NRE; i++)
679     {
680         enerd->term[i]         = 0;
681         enerd->foreign_term[i] = 0;
682     }
683
684
685     for (i = 0; i < efptNR; i++)
686     {
687         enerd->dvdl_lin[i]     = 0;
688         enerd->dvdl_nonlin[i]  = 0;
689     }
690
691     n2 = ngener*ngener;
692     if (debug)
693     {
694         fprintf(debug, "Creating %d sized group matrix for energies\n", n2);
695     }
696     enerd->grpp.nener         = n2;
697     enerd->foreign_grpp.nener = n2;
698     for (i = 0; (i < egNR); i++)
699     {
700         snew(enerd->grpp.ener[i], n2);
701         snew(enerd->foreign_grpp.ener[i], n2);
702     }
703
704     if (n_lambda)
705     {
706         enerd->n_lambda = 1 + n_lambda;
707         snew(enerd->enerpart_lambda, enerd->n_lambda);
708     }
709     else
710     {
711         enerd->n_lambda = 0;
712     }
713 }
714
715 void destroy_enerdata(gmx_enerdata_t *enerd)
716 {
717     int i;
718
719     for (i = 0; (i < egNR); i++)
720     {
721         sfree(enerd->grpp.ener[i]);
722     }
723
724     for (i = 0; (i < egNR); i++)
725     {
726         sfree(enerd->foreign_grpp.ener[i]);
727     }
728
729     if (enerd->n_lambda)
730     {
731         sfree(enerd->enerpart_lambda);
732     }
733 }
734
735 static real sum_v(int n, real v[])
736 {
737     real t;
738     int  i;
739
740     t = 0.0;
741     for (i = 0; (i < n); i++)
742     {
743         t = t + v[i];
744     }
745
746     return t;
747 }
748
749 void sum_epot(gmx_grppairener_t *grpp, real *epot)
750 {
751     int i;
752
753     /* Accumulate energies */
754     epot[F_COUL_SR]  = sum_v(grpp->nener, grpp->ener[egCOULSR]);
755     epot[F_LJ]       = sum_v(grpp->nener, grpp->ener[egLJSR]);
756     epot[F_LJ14]     = sum_v(grpp->nener, grpp->ener[egLJ14]);
757     epot[F_COUL14]   = sum_v(grpp->nener, grpp->ener[egCOUL14]);
758     epot[F_COUL_LR]  = sum_v(grpp->nener, grpp->ener[egCOULLR]);
759     epot[F_LJ_LR]    = sum_v(grpp->nener, grpp->ener[egLJLR]);
760     /* We have already added 1-2,1-3, and 1-4 terms to F_GBPOL */
761     epot[F_GBPOL]   += sum_v(grpp->nener, grpp->ener[egGB]);
762
763 /* lattice part of LR doesnt belong to any group
764  * and has been added earlier
765  */
766     epot[F_BHAM]     = sum_v(grpp->nener, grpp->ener[egBHAMSR]);
767     epot[F_BHAM_LR]  = sum_v(grpp->nener, grpp->ener[egBHAMLR]);
768
769     epot[F_EPOT] = 0;
770     for (i = 0; (i < F_EPOT); i++)
771     {
772         if (i != F_DISRESVIOL && i != F_ORIRESDEV)
773         {
774             epot[F_EPOT] += epot[i];
775         }
776     }
777 }
778
779 void sum_dhdl(gmx_enerdata_t *enerd, real *lambda, t_lambda *fepvals)
780 {
781     int    i, j, index;
782     double dlam;
783
784     enerd->dvdl_lin[efptVDW] += enerd->term[F_DVDL_VDW];  /* include dispersion correction */
785     enerd->term[F_DVDL]       = 0.0;
786     for (i = 0; i < efptNR; i++)
787     {
788         if (fepvals->separate_dvdl[i])
789         {
790             /* could this be done more readably/compactly? */
791             switch (i)
792             {
793                 case (efptMASS):
794                     index = F_DKDL;
795                     break;
796                 case (efptCOUL):
797                     index = F_DVDL_COUL;
798                     break;
799                 case (efptVDW):
800                     index = F_DVDL_VDW;
801                     break;
802                 case (efptBONDED):
803                     index = F_DVDL_BONDED;
804                     break;
805                 case (efptRESTRAINT):
806                     index = F_DVDL_RESTRAINT;
807                     break;
808                 default:
809                     index = F_DVDL;
810                     break;
811             }
812             enerd->term[index] = enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
813             if (debug)
814             {
815                 fprintf(debug, "dvdl-%s[%2d]: %f: non-linear %f + linear %f\n",
816                         efpt_names[i], i, enerd->term[index], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
817             }
818         }
819         else
820         {
821             enerd->term[F_DVDL] += enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
822             if (debug)
823             {
824                 fprintf(debug, "dvd-%sl[%2d]: %f: non-linear %f + linear %f\n",
825                         efpt_names[0], i, enerd->term[F_DVDL], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
826             }
827         }
828     }
829
830     /* Notes on the foreign lambda free energy difference evaluation:
831      * Adding the potential and ekin terms that depend linearly on lambda
832      * as delta lam * dvdl to the energy differences is exact.
833      * For the constraints this is not exact, but we have no other option
834      * without literally changing the lengths and reevaluating the energies at each step.
835      * (try to remedy this post 4.6 - MRS)
836      * For the non-bonded LR term we assume that the soft-core (if present)
837      * no longer affects the energy beyond the short-range cut-off,
838      * which is a very good approximation (except for exotic settings).
839      * (investigate how to overcome this post 4.6 - MRS)
840      */
841     if (fepvals->separate_dvdl[efptBONDED])
842     {
843         enerd->term[F_DVDL_BONDED] += enerd->term[F_DVDL_CONSTR];
844     }
845     else
846     {
847         enerd->term[F_DVDL] += enerd->term[F_DVDL_CONSTR];
848     }
849     enerd->term[F_DVDL_CONSTR] = 0;
850
851     for (i = 0; i < fepvals->n_lambda; i++)
852     {
853         /* note we are iterating over fepvals here!
854            For the current lam, dlam = 0 automatically,
855            so we don't need to add anything to the
856            enerd->enerpart_lambda[0] */
857
858         /* we don't need to worry about dvdl_lin contributions to dE at
859            current lambda, because the contributions to the current
860            lambda are automatically zeroed */
861
862         for (j = 0; j < efptNR; j++)
863         {
864             /* Note that this loop is over all dhdl components, not just the separated ones */
865             dlam = (fepvals->all_lambda[j][i]-lambda[j]);
866             enerd->enerpart_lambda[i+1] += dlam*enerd->dvdl_lin[j];
867             if (debug)
868             {
869                 fprintf(debug, "enerdiff lam %g: (%15s), non-linear %f linear %f*%f\n",
870                         fepvals->all_lambda[j][i], efpt_names[j],
871                         (enerd->enerpart_lambda[i+1] - enerd->enerpart_lambda[0]),
872                         dlam, enerd->dvdl_lin[j]);
873             }
874         }
875     }
876 }
877
878
879 void reset_foreign_enerdata(gmx_enerdata_t *enerd)
880 {
881     int  i, j;
882
883     /* First reset all foreign energy components.  Foreign energies always called on
884        neighbor search steps */
885     for (i = 0; (i < egNR); i++)
886     {
887         for (j = 0; (j < enerd->grpp.nener); j++)
888         {
889             enerd->foreign_grpp.ener[i][j] = 0.0;
890         }
891     }
892
893     /* potential energy components */
894     for (i = 0; (i <= F_EPOT); i++)
895     {
896         enerd->foreign_term[i] = 0.0;
897     }
898 }
899
900 void reset_enerdata(t_forcerec *fr, gmx_bool bNS,
901                     gmx_enerdata_t *enerd,
902                     gmx_bool bMaster)
903 {
904     gmx_bool bKeepLR;
905     int      i, j;
906
907     /* First reset all energy components, except for the long range terms
908      * on the master at non neighbor search steps, since the long range
909      * terms have already been summed at the last neighbor search step.
910      */
911     bKeepLR = (fr->bTwinRange && !bNS);
912     for (i = 0; (i < egNR); i++)
913     {
914         if (!(bKeepLR && bMaster && (i == egCOULLR || i == egLJLR)))
915         {
916             for (j = 0; (j < enerd->grpp.nener); j++)
917             {
918                 enerd->grpp.ener[i][j] = 0.0;
919             }
920         }
921     }
922     for (i = 0; i < efptNR; i++)
923     {
924         enerd->dvdl_lin[i]    = 0.0;
925         enerd->dvdl_nonlin[i] = 0.0;
926     }
927
928     /* Normal potential energy components */
929     for (i = 0; (i <= F_EPOT); i++)
930     {
931         enerd->term[i] = 0.0;
932     }
933     /* Initialize the dVdlambda term with the long range contribution */
934     /* Initialize the dvdl term with the long range contribution */
935     enerd->term[F_DVDL]            = 0.0;
936     enerd->term[F_DVDL_COUL]       = 0.0;
937     enerd->term[F_DVDL_VDW]        = 0.0;
938     enerd->term[F_DVDL_BONDED]     = 0.0;
939     enerd->term[F_DVDL_RESTRAINT]  = 0.0;
940     enerd->term[F_DKDL]            = 0.0;
941     if (enerd->n_lambda > 0)
942     {
943         for (i = 0; i < enerd->n_lambda; i++)
944         {
945             enerd->enerpart_lambda[i] = 0.0;
946         }
947     }
948     /* reset foreign energy data - separate function since we also call it elsewhere */
949     reset_foreign_enerdata(enerd);
950 }