16393693189644140e2fab7f46aa16feb3e6efef
[alexxy/gromacs.git] / src / gromacs / mdlib / force.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #include "gmxpre.h"
38
39 #include "gromacs/legacyheaders/force.h"
40
41 #include "config.h"
42
43 #include <assert.h>
44 #include <math.h>
45 #include <string.h>
46
47 #include "gromacs/domdec/domdec.h"
48 #include "gromacs/ewald/ewald.h"
49 #include "gromacs/ewald/long-range-correction.h"
50 #include "gromacs/ewald/pme.h"
51 #include "gromacs/legacyheaders/gmx_omp_nthreads.h"
52 #include "gromacs/legacyheaders/macros.h"
53 #include "gromacs/legacyheaders/mdrun.h"
54 #include "gromacs/legacyheaders/names.h"
55 #include "gromacs/legacyheaders/network.h"
56 #include "gromacs/legacyheaders/nonbonded.h"
57 #include "gromacs/legacyheaders/nrnb.h"
58 #include "gromacs/legacyheaders/ns.h"
59 #include "gromacs/legacyheaders/qmmm.h"
60 #include "gromacs/legacyheaders/txtdump.h"
61 #include "gromacs/legacyheaders/typedefs.h"
62 #include "gromacs/legacyheaders/types/commrec.h"
63 #include "gromacs/listed-forces/listed-forces.h"
64 #include "gromacs/math/vec.h"
65 #include "gromacs/pbcutil/ishift.h"
66 #include "gromacs/pbcutil/mshift.h"
67 #include "gromacs/pbcutil/pbc.h"
68 #include "gromacs/timing/wallcycle.h"
69 #include "gromacs/utility/fatalerror.h"
70 #include "gromacs/utility/smalloc.h"
71
72 void ns(FILE              *fp,
73         t_forcerec        *fr,
74         matrix             box,
75         gmx_groups_t      *groups,
76         gmx_localtop_t    *top,
77         t_mdatoms         *md,
78         t_commrec         *cr,
79         t_nrnb            *nrnb,
80         gmx_bool           bFillGrid,
81         gmx_bool           bDoLongRangeNS)
82 {
83     int     nsearch;
84
85
86     if (!fr->ns.nblist_initialized)
87     {
88         init_neighbor_list(fp, fr, md->homenr);
89     }
90
91     if (fr->bTwinRange)
92     {
93         fr->nlr = 0;
94     }
95
96     nsearch = search_neighbours(fp, fr, box, top, groups, cr, nrnb, md,
97                                 bFillGrid, bDoLongRangeNS);
98     if (debug)
99     {
100         fprintf(debug, "nsearch = %d\n", nsearch);
101     }
102
103     /* Check whether we have to do dynamic load balancing */
104     /*if ((nsb->nstDlb > 0) && (mod(step,nsb->nstDlb) == 0))
105        count_nb(cr,nsb,&(top->blocks[ebCGS]),nns,fr->nlr,
106        &(top->idef),opts->ngener);
107      */
108     if (fr->ns.dump_nl > 0)
109     {
110         dump_nblist(fp, cr, fr, fr->ns.dump_nl);
111     }
112 }
113
114 static void reduce_thread_forces(int n, rvec *f,
115                                  tensor vir_q, tensor vir_lj,
116                                  real *Vcorr_q, real *Vcorr_lj,
117                                  real *dvdl_q, real *dvdl_lj,
118                                  int nthreads, f_thread_t *f_t)
119 {
120     int t, i;
121     int nthreads_loop gmx_unused;
122
123     // cppcheck-suppress unreadVariable
124     nthreads_loop = gmx_omp_nthreads_get(emntBonded);
125     /* This reduction can run over any number of threads */
126 #pragma omp parallel for num_threads(nthreads_loop) private(t) schedule(static)
127     for (i = 0; i < n; i++)
128     {
129         for (t = 1; t < nthreads; t++)
130         {
131             rvec_inc(f[i], f_t[t].f[i]);
132         }
133     }
134     for (t = 1; t < nthreads; t++)
135     {
136         *Vcorr_q  += f_t[t].Vcorr_q;
137         *Vcorr_lj += f_t[t].Vcorr_lj;
138         *dvdl_q   += f_t[t].dvdl[efptCOUL];
139         *dvdl_lj  += f_t[t].dvdl[efptVDW];
140         m_add(vir_q, f_t[t].vir_q, vir_q);
141         m_add(vir_lj, f_t[t].vir_lj, vir_lj);
142     }
143 }
144
145 void do_force_lowlevel(t_forcerec *fr,      t_inputrec *ir,
146                        t_idef     *idef,    t_commrec  *cr,
147                        t_nrnb     *nrnb,    gmx_wallcycle_t wcycle,
148                        t_mdatoms  *md,
149                        rvec       x[],      history_t  *hist,
150                        rvec       f[],
151                        rvec       f_longrange[],
152                        gmx_enerdata_t *enerd,
153                        t_fcdata   *fcd,
154                        gmx_localtop_t *top,
155                        gmx_genborn_t *born,
156                        gmx_bool       bBornRadii,
157                        matrix     box,
158                        t_lambda   *fepvals,
159                        real       *lambda,
160                        t_graph    *graph,
161                        t_blocka   *excl,
162                        rvec       mu_tot[],
163                        int        flags,
164                        float      *cycles_pme)
165 {
166     int         i, j;
167     int         donb_flags;
168     gmx_bool    bSB;
169     int         pme_flags;
170     matrix      boxs;
171     rvec        box_size;
172     t_pbc       pbc;
173     real        dvdl_dum[efptNR], dvdl_nb[efptNR];
174
175 #ifdef GMX_MPI
176     double  t0 = 0.0, t1, t2, t3; /* time measurement for coarse load balancing */
177 #endif
178
179     set_pbc(&pbc, fr->ePBC, box);
180
181     /* reset free energy components */
182     for (i = 0; i < efptNR; i++)
183     {
184         dvdl_nb[i]  = 0;
185         dvdl_dum[i] = 0;
186     }
187
188     /* Reset box */
189     for (i = 0; (i < DIM); i++)
190     {
191         box_size[i] = box[i][i];
192     }
193
194     debug_gmx();
195
196     /* do QMMM first if requested */
197     if (fr->bQMMM)
198     {
199         enerd->term[F_EQM] = calculate_QMMM(cr, x, f, fr);
200     }
201
202     /* Call the short range functions all in one go. */
203
204 #ifdef GMX_MPI
205     /*#define TAKETIME ((cr->npmenodes) && (fr->timesteps < 12))*/
206 #define TAKETIME FALSE
207     if (TAKETIME)
208     {
209         MPI_Barrier(cr->mpi_comm_mygroup);
210         t0 = MPI_Wtime();
211     }
212 #endif
213
214     if (ir->nwall)
215     {
216         /* foreign lambda component for walls */
217         real dvdl_walls = do_walls(ir, fr, box, md, x, f, lambda[efptVDW],
218                                    enerd->grpp.ener[egLJSR], nrnb);
219         enerd->dvdl_lin[efptVDW] += dvdl_walls;
220     }
221
222     /* If doing GB, reset dvda and calculate the Born radii */
223     if (ir->implicit_solvent)
224     {
225         wallcycle_sub_start(wcycle, ewcsNONBONDED);
226
227         for (i = 0; i < born->nr; i++)
228         {
229             fr->dvda[i] = 0;
230         }
231
232         if (bBornRadii)
233         {
234             calc_gb_rad(cr, fr, ir, top, x, &(fr->gblist), born, md, nrnb);
235         }
236
237         wallcycle_sub_stop(wcycle, ewcsNONBONDED);
238     }
239
240     where();
241     /* We only do non-bonded calculation with group scheme here, the verlet
242      * calls are done from do_force_cutsVERLET(). */
243     if (fr->cutoff_scheme == ecutsGROUP && (flags & GMX_FORCE_NONBONDED))
244     {
245         donb_flags = 0;
246         /* Add short-range interactions */
247         donb_flags |= GMX_NONBONDED_DO_SR;
248
249         /* Currently all group scheme kernels always calculate (shift-)forces */
250         if (flags & GMX_FORCE_FORCES)
251         {
252             donb_flags |= GMX_NONBONDED_DO_FORCE;
253         }
254         if (flags & GMX_FORCE_VIRIAL)
255         {
256             donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
257         }
258         if (flags & GMX_FORCE_ENERGY)
259         {
260             donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
261         }
262         if (flags & GMX_FORCE_DO_LR)
263         {
264             donb_flags |= GMX_NONBONDED_DO_LR;
265         }
266
267         wallcycle_sub_start(wcycle, ewcsNONBONDED);
268         do_nonbonded(fr, x, f, f_longrange, md, excl,
269                      &enerd->grpp, nrnb,
270                      lambda, dvdl_nb, -1, -1, donb_flags);
271
272         /* If we do foreign lambda and we have soft-core interactions
273          * we have to recalculate the (non-linear) energies contributions.
274          */
275         if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
276         {
277             for (i = 0; i < enerd->n_lambda; i++)
278             {
279                 real lam_i[efptNR];
280
281                 for (j = 0; j < efptNR; j++)
282                 {
283                     lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
284                 }
285                 reset_foreign_enerdata(enerd);
286                 do_nonbonded(fr, x, f, f_longrange, md, excl,
287                              &(enerd->foreign_grpp), nrnb,
288                              lam_i, dvdl_dum, -1, -1,
289                              (donb_flags & ~GMX_NONBONDED_DO_FORCE) | GMX_NONBONDED_DO_FOREIGNLAMBDA);
290                 sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
291                 enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
292             }
293         }
294         wallcycle_sub_stop(wcycle, ewcsNONBONDED);
295         where();
296     }
297
298     /* If we are doing GB, calculate bonded forces and apply corrections
299      * to the solvation forces */
300     /* MRS: Eventually, many need to include free energy contribution here! */
301     if (ir->implicit_solvent)
302     {
303         wallcycle_sub_start(wcycle, ewcsLISTED);
304         calc_gb_forces(cr, md, born, top, x, f, fr, idef,
305                        ir->gb_algorithm, ir->sa_algorithm, nrnb, &pbc, graph, enerd);
306         wallcycle_sub_stop(wcycle, ewcsLISTED);
307     }
308
309 #ifdef GMX_MPI
310     if (TAKETIME)
311     {
312         t1          = MPI_Wtime();
313         fr->t_fnbf += t1-t0;
314     }
315 #endif
316
317     if (fepvals->sc_alpha != 0)
318     {
319         enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
320     }
321     else
322     {
323         enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
324     }
325
326     if (fepvals->sc_alpha != 0)
327
328     /* even though coulomb part is linear, we already added it, beacuse we
329        need to go through the vdw calculation anyway */
330     {
331         enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
332     }
333     else
334     {
335         enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
336     }
337
338     debug_gmx();
339
340
341     if (debug)
342     {
343         pr_rvecs(debug, 0, "fshift after SR", fr->fshift, SHIFTS);
344     }
345
346     /* Shift the coordinates. Must be done before listed forces and PPPM,
347      * but is also necessary for SHAKE and update, therefore it can NOT
348      * go when no listed forces have to be evaluated.
349      */
350
351     /* Here sometimes we would not need to shift with NBFonly,
352      * but we do so anyhow for consistency of the returned coordinates.
353      */
354     if (graph)
355     {
356         shift_self(graph, box, x);
357         if (TRICLINIC(box))
358         {
359             inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
360         }
361         else
362         {
363             inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
364         }
365     }
366     /* Check whether we need to do listed interactions or correct for exclusions */
367     if (fr->bMolPBC &&
368         ((flags & GMX_FORCE_LISTED)
369          || EEL_RF(fr->eeltype) || EEL_FULL(fr->eeltype) || EVDW_PME(fr->vdwtype)))
370     {
371         /* Since all atoms are in the rectangular or triclinic unit-cell,
372          * only single box vector shifts (2 in x) are required.
373          */
374         set_pbc_dd(&pbc, fr->ePBC, cr->dd, TRUE, box);
375     }
376     debug_gmx();
377
378     do_force_listed(wcycle, box, ir->fepvals, cr->ms,
379                     idef, (const rvec *) x, hist, f, fr,
380                     &pbc, graph, enerd, nrnb, lambda, md, fcd,
381                     DOMAINDECOMP(cr) ? cr->dd->gatindex : NULL,
382                     flags);
383
384     where();
385
386     *cycles_pme = 0;
387     clear_mat(fr->vir_el_recip);
388     clear_mat(fr->vir_lj_recip);
389
390     /* Do long-range electrostatics and/or LJ-PME, including related short-range
391      * corrections.
392      */
393     if (EEL_FULL(fr->eeltype) || EVDW_PME(fr->vdwtype))
394     {
395         int  status            = 0;
396         real Vlr_q             = 0, Vlr_lj = 0, Vcorr_q = 0, Vcorr_lj = 0;
397         real dvdl_long_range_q = 0, dvdl_long_range_lj = 0;
398
399         bSB = (ir->nwall == 2);
400         if (bSB)
401         {
402             copy_mat(box, boxs);
403             svmul(ir->wall_ewald_zfac, boxs[ZZ], boxs[ZZ]);
404             box_size[ZZ] *= ir->wall_ewald_zfac;
405         }
406
407         if (EEL_PME_EWALD(fr->eeltype) || EVDW_PME(fr->vdwtype))
408         {
409             real dvdl_long_range_correction_q   = 0;
410             real dvdl_long_range_correction_lj  = 0;
411             /* With the Verlet scheme exclusion forces are calculated
412              * in the non-bonded kernel.
413              */
414             /* The TPI molecule does not have exclusions with the rest
415              * of the system and no intra-molecular PME grid
416              * contributions will be calculated in
417              * gmx_pme_calc_energy.
418              */
419             if ((ir->cutoff_scheme == ecutsGROUP && fr->n_tpi == 0) ||
420                 ir->ewald_geometry != eewg3D ||
421                 ir->epsilon_surface != 0)
422             {
423                 int nthreads, t;
424
425                 wallcycle_sub_start(wcycle, ewcsEWALD_CORRECTION);
426
427                 if (fr->n_tpi > 0)
428                 {
429                     gmx_fatal(FARGS, "TPI with PME currently only works in a 3D geometry with tin-foil boundary conditions");
430                 }
431
432                 nthreads = gmx_omp_nthreads_get(emntBonded);
433 #pragma omp parallel for num_threads(nthreads) schedule(static)
434                 for (t = 0; t < nthreads; t++)
435                 {
436                     int     i;
437                     rvec   *fnv;
438                     tensor *vir_q, *vir_lj;
439                     real   *Vcorrt_q, *Vcorrt_lj, *dvdlt_q, *dvdlt_lj;
440                     if (t == 0)
441                     {
442                         fnv       = fr->f_novirsum;
443                         vir_q     = &fr->vir_el_recip;
444                         vir_lj    = &fr->vir_lj_recip;
445                         Vcorrt_q  = &Vcorr_q;
446                         Vcorrt_lj = &Vcorr_lj;
447                         dvdlt_q   = &dvdl_long_range_correction_q;
448                         dvdlt_lj  = &dvdl_long_range_correction_lj;
449                     }
450                     else
451                     {
452                         fnv       = fr->f_t[t].f;
453                         vir_q     = &fr->f_t[t].vir_q;
454                         vir_lj    = &fr->f_t[t].vir_lj;
455                         Vcorrt_q  = &fr->f_t[t].Vcorr_q;
456                         Vcorrt_lj = &fr->f_t[t].Vcorr_lj;
457                         dvdlt_q   = &fr->f_t[t].dvdl[efptCOUL];
458                         dvdlt_lj  = &fr->f_t[t].dvdl[efptVDW];
459                         for (i = 0; i < fr->natoms_force; i++)
460                         {
461                             clear_rvec(fnv[i]);
462                         }
463                         clear_mat(*vir_q);
464                         clear_mat(*vir_lj);
465                     }
466                     *dvdlt_q  = 0;
467                     *dvdlt_lj = 0;
468
469                     ewald_LRcorrection(fr->excl_load[t], fr->excl_load[t+1],
470                                        cr, t, fr,
471                                        md->chargeA,
472                                        md->nChargePerturbed ? md->chargeB : NULL,
473                                        md->sqrt_c6A,
474                                        md->nTypePerturbed ? md->sqrt_c6B : NULL,
475                                        md->sigmaA,
476                                        md->nTypePerturbed ? md->sigmaB : NULL,
477                                        md->sigma3A,
478                                        md->nTypePerturbed ? md->sigma3B : NULL,
479                                        ir->cutoff_scheme != ecutsVERLET,
480                                        excl, x, bSB ? boxs : box, mu_tot,
481                                        ir->ewald_geometry,
482                                        ir->epsilon_surface,
483                                        fnv, *vir_q, *vir_lj,
484                                        Vcorrt_q, Vcorrt_lj,
485                                        lambda[efptCOUL], lambda[efptVDW],
486                                        dvdlt_q, dvdlt_lj);
487                 }
488                 if (nthreads > 1)
489                 {
490                     reduce_thread_forces(fr->natoms_force, fr->f_novirsum,
491                                          fr->vir_el_recip, fr->vir_lj_recip,
492                                          &Vcorr_q, &Vcorr_lj,
493                                          &dvdl_long_range_correction_q,
494                                          &dvdl_long_range_correction_lj,
495                                          nthreads, fr->f_t);
496                 }
497                 wallcycle_sub_stop(wcycle, ewcsEWALD_CORRECTION);
498             }
499
500             if (EEL_PME_EWALD(fr->eeltype) && fr->n_tpi == 0)
501             {
502                 Vcorr_q += ewald_charge_correction(cr, fr, lambda[efptCOUL], box,
503                                                    &dvdl_long_range_correction_q,
504                                                    fr->vir_el_recip);
505             }
506
507             enerd->dvdl_lin[efptCOUL] += dvdl_long_range_correction_q;
508             enerd->dvdl_lin[efptVDW]  += dvdl_long_range_correction_lj;
509
510             if ((EEL_PME(fr->eeltype) || EVDW_PME(fr->vdwtype)) && (cr->duty & DUTY_PME))
511             {
512                 /* Do reciprocal PME for Coulomb and/or LJ. */
513                 assert(fr->n_tpi >= 0);
514                 if (fr->n_tpi == 0 || (flags & GMX_FORCE_STATECHANGED))
515                 {
516                     pme_flags = GMX_PME_SPREAD | GMX_PME_SOLVE;
517                     if (EEL_PME(fr->eeltype))
518                     {
519                         pme_flags     |= GMX_PME_DO_COULOMB;
520                     }
521                     if (EVDW_PME(fr->vdwtype))
522                     {
523                         pme_flags |= GMX_PME_DO_LJ;
524                     }
525                     if (flags & GMX_FORCE_FORCES)
526                     {
527                         pme_flags |= GMX_PME_CALC_F;
528                     }
529                     if (flags & GMX_FORCE_VIRIAL)
530                     {
531                         pme_flags |= GMX_PME_CALC_ENER_VIR;
532                     }
533                     if (fr->n_tpi > 0)
534                     {
535                         /* We don't calculate f, but we do want the potential */
536                         pme_flags |= GMX_PME_CALC_POT;
537                     }
538                     wallcycle_start(wcycle, ewcPMEMESH);
539                     status = gmx_pme_do(fr->pmedata,
540                                         0, md->homenr - fr->n_tpi,
541                                         x, fr->f_novirsum,
542                                         md->chargeA, md->chargeB,
543                                         md->sqrt_c6A, md->sqrt_c6B,
544                                         md->sigmaA, md->sigmaB,
545                                         bSB ? boxs : box, cr,
546                                         DOMAINDECOMP(cr) ? dd_pme_maxshift_x(cr->dd) : 0,
547                                         DOMAINDECOMP(cr) ? dd_pme_maxshift_y(cr->dd) : 0,
548                                         nrnb, wcycle,
549                                         fr->vir_el_recip, fr->ewaldcoeff_q,
550                                         fr->vir_lj_recip, fr->ewaldcoeff_lj,
551                                         &Vlr_q, &Vlr_lj,
552                                         lambda[efptCOUL], lambda[efptVDW],
553                                         &dvdl_long_range_q, &dvdl_long_range_lj, pme_flags);
554                     *cycles_pme = wallcycle_stop(wcycle, ewcPMEMESH);
555                     if (status != 0)
556                     {
557                         gmx_fatal(FARGS, "Error %d in reciprocal PME routine", status);
558                     }
559                     /* We should try to do as little computation after
560                      * this as possible, because parallel PME synchronizes
561                      * the nodes, so we want all load imbalance of the
562                      * rest of the force calculation to be before the PME
563                      * call.  DD load balancing is done on the whole time
564                      * of the force call (without PME).
565                      */
566                 }
567                 if (fr->n_tpi > 0)
568                 {
569                     if (EVDW_PME(ir->vdwtype))
570                     {
571
572                         gmx_fatal(FARGS, "Test particle insertion not implemented with LJ-PME");
573                     }
574                     /* Determine the PME grid energy of the test molecule
575                      * with the PME grid potential of the other charges.
576                      */
577                     gmx_pme_calc_energy(fr->pmedata, fr->n_tpi,
578                                         x + md->homenr - fr->n_tpi,
579                                         md->chargeA + md->homenr - fr->n_tpi,
580                                         &Vlr_q);
581                 }
582             }
583         }
584
585         if (!EEL_PME(fr->eeltype) && EEL_PME_EWALD(fr->eeltype))
586         {
587             Vlr_q = do_ewald(ir, x, fr->f_novirsum,
588                              md->chargeA, md->chargeB,
589                              box_size, cr, md->homenr,
590                              fr->vir_el_recip, fr->ewaldcoeff_q,
591                              lambda[efptCOUL], &dvdl_long_range_q, fr->ewald_table);
592         }
593
594         /* Note that with separate PME nodes we get the real energies later */
595         enerd->dvdl_lin[efptCOUL] += dvdl_long_range_q;
596         enerd->dvdl_lin[efptVDW]  += dvdl_long_range_lj;
597         enerd->term[F_COUL_RECIP]  = Vlr_q + Vcorr_q;
598         enerd->term[F_LJ_RECIP]    = Vlr_lj + Vcorr_lj;
599         if (debug)
600         {
601             fprintf(debug, "Vlr_q = %g, Vcorr_q = %g, Vlr_corr_q = %g\n",
602                     Vlr_q, Vcorr_q, enerd->term[F_COUL_RECIP]);
603             pr_rvecs(debug, 0, "vir_el_recip after corr", fr->vir_el_recip, DIM);
604             pr_rvecs(debug, 0, "fshift after LR Corrections", fr->fshift, SHIFTS);
605             fprintf(debug, "Vlr_lj: %g, Vcorr_lj = %g, Vlr_corr_lj = %g\n",
606                     Vlr_lj, Vcorr_lj, enerd->term[F_LJ_RECIP]);
607             pr_rvecs(debug, 0, "vir_lj_recip after corr", fr->vir_lj_recip, DIM);
608         }
609     }
610     else
611     {
612         /* Is there a reaction-field exclusion correction needed? */
613         if (EEL_RF(fr->eeltype) && eelRF_NEC != fr->eeltype)
614         {
615             /* With the Verlet scheme, exclusion forces are calculated
616              * in the non-bonded kernel.
617              */
618             if (ir->cutoff_scheme != ecutsVERLET)
619             {
620                 real dvdl_rf_excl      = 0;
621                 enerd->term[F_RF_EXCL] =
622                     RF_excl_correction(fr, graph, md, excl, x, f,
623                                        fr->fshift, &pbc, lambda[efptCOUL], &dvdl_rf_excl);
624
625                 enerd->dvdl_lin[efptCOUL] += dvdl_rf_excl;
626             }
627         }
628     }
629     where();
630     debug_gmx();
631
632     if (debug)
633     {
634         print_nrnb(debug, nrnb);
635     }
636     debug_gmx();
637
638 #ifdef GMX_MPI
639     if (TAKETIME)
640     {
641         t2 = MPI_Wtime();
642         MPI_Barrier(cr->mpi_comm_mygroup);
643         t3          = MPI_Wtime();
644         fr->t_wait += t3-t2;
645         if (fr->timesteps == 11)
646         {
647             char buf[22];
648             fprintf(stderr, "* PP load balancing info: rank %d, step %s, rel wait time=%3.0f%% , load string value: %7.2f\n",
649                     cr->nodeid, gmx_step_str(fr->timesteps, buf),
650                     100*fr->t_wait/(fr->t_wait+fr->t_fnbf),
651                     (fr->t_fnbf+fr->t_wait)/fr->t_fnbf);
652         }
653         fr->timesteps++;
654     }
655 #endif
656
657     if (debug)
658     {
659         pr_rvecs(debug, 0, "fshift after bondeds", fr->fshift, SHIFTS);
660     }
661
662 }
663
664 void init_enerdata(int ngener, int n_lambda, gmx_enerdata_t *enerd)
665 {
666     int i, n2;
667
668     for (i = 0; i < F_NRE; i++)
669     {
670         enerd->term[i]         = 0;
671         enerd->foreign_term[i] = 0;
672     }
673
674
675     for (i = 0; i < efptNR; i++)
676     {
677         enerd->dvdl_lin[i]     = 0;
678         enerd->dvdl_nonlin[i]  = 0;
679     }
680
681     n2 = ngener*ngener;
682     if (debug)
683     {
684         fprintf(debug, "Creating %d sized group matrix for energies\n", n2);
685     }
686     enerd->grpp.nener         = n2;
687     enerd->foreign_grpp.nener = n2;
688     for (i = 0; (i < egNR); i++)
689     {
690         snew(enerd->grpp.ener[i], n2);
691         snew(enerd->foreign_grpp.ener[i], n2);
692     }
693
694     if (n_lambda)
695     {
696         enerd->n_lambda = 1 + n_lambda;
697         snew(enerd->enerpart_lambda, enerd->n_lambda);
698     }
699     else
700     {
701         enerd->n_lambda = 0;
702     }
703 }
704
705 void destroy_enerdata(gmx_enerdata_t *enerd)
706 {
707     int i;
708
709     for (i = 0; (i < egNR); i++)
710     {
711         sfree(enerd->grpp.ener[i]);
712     }
713
714     for (i = 0; (i < egNR); i++)
715     {
716         sfree(enerd->foreign_grpp.ener[i]);
717     }
718
719     if (enerd->n_lambda)
720     {
721         sfree(enerd->enerpart_lambda);
722     }
723 }
724
725 static real sum_v(int n, real v[])
726 {
727     real t;
728     int  i;
729
730     t = 0.0;
731     for (i = 0; (i < n); i++)
732     {
733         t = t + v[i];
734     }
735
736     return t;
737 }
738
739 void sum_epot(gmx_grppairener_t *grpp, real *epot)
740 {
741     int i;
742
743     /* Accumulate energies */
744     epot[F_COUL_SR]  = sum_v(grpp->nener, grpp->ener[egCOULSR]);
745     epot[F_LJ]       = sum_v(grpp->nener, grpp->ener[egLJSR]);
746     epot[F_LJ14]     = sum_v(grpp->nener, grpp->ener[egLJ14]);
747     epot[F_COUL14]   = sum_v(grpp->nener, grpp->ener[egCOUL14]);
748     epot[F_COUL_LR]  = sum_v(grpp->nener, grpp->ener[egCOULLR]);
749     epot[F_LJ_LR]    = sum_v(grpp->nener, grpp->ener[egLJLR]);
750     /* We have already added 1-2,1-3, and 1-4 terms to F_GBPOL */
751     epot[F_GBPOL]   += sum_v(grpp->nener, grpp->ener[egGB]);
752
753 /* lattice part of LR doesnt belong to any group
754  * and has been added earlier
755  */
756     epot[F_BHAM]     = sum_v(grpp->nener, grpp->ener[egBHAMSR]);
757     epot[F_BHAM_LR]  = sum_v(grpp->nener, grpp->ener[egBHAMLR]);
758
759     epot[F_EPOT] = 0;
760     for (i = 0; (i < F_EPOT); i++)
761     {
762         if (i != F_DISRESVIOL && i != F_ORIRESDEV)
763         {
764             epot[F_EPOT] += epot[i];
765         }
766     }
767 }
768
769 void sum_dhdl(gmx_enerdata_t *enerd, real *lambda, t_lambda *fepvals)
770 {
771     int    i, j, index;
772     double dlam;
773
774     enerd->dvdl_lin[efptVDW] += enerd->term[F_DVDL_VDW];  /* include dispersion correction */
775     enerd->term[F_DVDL]       = 0.0;
776     for (i = 0; i < efptNR; i++)
777     {
778         if (fepvals->separate_dvdl[i])
779         {
780             /* could this be done more readably/compactly? */
781             switch (i)
782             {
783                 case (efptMASS):
784                     index = F_DKDL;
785                     break;
786                 case (efptCOUL):
787                     index = F_DVDL_COUL;
788                     break;
789                 case (efptVDW):
790                     index = F_DVDL_VDW;
791                     break;
792                 case (efptBONDED):
793                     index = F_DVDL_BONDED;
794                     break;
795                 case (efptRESTRAINT):
796                     index = F_DVDL_RESTRAINT;
797                     break;
798                 default:
799                     index = F_DVDL;
800                     break;
801             }
802             enerd->term[index] = enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
803             if (debug)
804             {
805                 fprintf(debug, "dvdl-%s[%2d]: %f: non-linear %f + linear %f\n",
806                         efpt_names[i], i, enerd->term[index], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
807             }
808         }
809         else
810         {
811             enerd->term[F_DVDL] += enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
812             if (debug)
813             {
814                 fprintf(debug, "dvd-%sl[%2d]: %f: non-linear %f + linear %f\n",
815                         efpt_names[0], i, enerd->term[F_DVDL], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
816             }
817         }
818     }
819
820     /* Notes on the foreign lambda free energy difference evaluation:
821      * Adding the potential and ekin terms that depend linearly on lambda
822      * as delta lam * dvdl to the energy differences is exact.
823      * For the constraints this is not exact, but we have no other option
824      * without literally changing the lengths and reevaluating the energies at each step.
825      * (try to remedy this post 4.6 - MRS)
826      * For the non-bonded LR term we assume that the soft-core (if present)
827      * no longer affects the energy beyond the short-range cut-off,
828      * which is a very good approximation (except for exotic settings).
829      * (investigate how to overcome this post 4.6 - MRS)
830      */
831     if (fepvals->separate_dvdl[efptBONDED])
832     {
833         enerd->term[F_DVDL_BONDED] += enerd->term[F_DVDL_CONSTR];
834     }
835     else
836     {
837         enerd->term[F_DVDL] += enerd->term[F_DVDL_CONSTR];
838     }
839     enerd->term[F_DVDL_CONSTR] = 0;
840
841     for (i = 0; i < fepvals->n_lambda; i++)
842     {
843         /* note we are iterating over fepvals here!
844            For the current lam, dlam = 0 automatically,
845            so we don't need to add anything to the
846            enerd->enerpart_lambda[0] */
847
848         /* we don't need to worry about dvdl_lin contributions to dE at
849            current lambda, because the contributions to the current
850            lambda are automatically zeroed */
851
852         for (j = 0; j < efptNR; j++)
853         {
854             /* Note that this loop is over all dhdl components, not just the separated ones */
855             dlam = (fepvals->all_lambda[j][i]-lambda[j]);
856             enerd->enerpart_lambda[i+1] += dlam*enerd->dvdl_lin[j];
857             if (debug)
858             {
859                 fprintf(debug, "enerdiff lam %g: (%15s), non-linear %f linear %f*%f\n",
860                         fepvals->all_lambda[j][i], efpt_names[j],
861                         (enerd->enerpart_lambda[i+1] - enerd->enerpart_lambda[0]),
862                         dlam, enerd->dvdl_lin[j]);
863             }
864         }
865     }
866 }
867
868
869 void reset_foreign_enerdata(gmx_enerdata_t *enerd)
870 {
871     int  i, j;
872
873     /* First reset all foreign energy components.  Foreign energies always called on
874        neighbor search steps */
875     for (i = 0; (i < egNR); i++)
876     {
877         for (j = 0; (j < enerd->grpp.nener); j++)
878         {
879             enerd->foreign_grpp.ener[i][j] = 0.0;
880         }
881     }
882
883     /* potential energy components */
884     for (i = 0; (i <= F_EPOT); i++)
885     {
886         enerd->foreign_term[i] = 0.0;
887     }
888 }
889
890 void reset_enerdata(t_forcerec *fr, gmx_bool bNS,
891                     gmx_enerdata_t *enerd,
892                     gmx_bool bMaster)
893 {
894     gmx_bool bKeepLR;
895     int      i, j;
896
897     /* First reset all energy components, except for the long range terms
898      * on the master at non neighbor search steps, since the long range
899      * terms have already been summed at the last neighbor search step.
900      */
901     bKeepLR = (fr->bTwinRange && !bNS);
902     for (i = 0; (i < egNR); i++)
903     {
904         if (!(bKeepLR && bMaster && (i == egCOULLR || i == egLJLR)))
905         {
906             for (j = 0; (j < enerd->grpp.nener); j++)
907             {
908                 enerd->grpp.ener[i][j] = 0.0;
909             }
910         }
911     }
912     for (i = 0; i < efptNR; i++)
913     {
914         enerd->dvdl_lin[i]    = 0.0;
915         enerd->dvdl_nonlin[i] = 0.0;
916     }
917
918     /* Normal potential energy components */
919     for (i = 0; (i <= F_EPOT); i++)
920     {
921         enerd->term[i] = 0.0;
922     }
923     /* Initialize the dVdlambda term with the long range contribution */
924     /* Initialize the dvdl term with the long range contribution */
925     enerd->term[F_DVDL]            = 0.0;
926     enerd->term[F_DVDL_COUL]       = 0.0;
927     enerd->term[F_DVDL_VDW]        = 0.0;
928     enerd->term[F_DVDL_BONDED]     = 0.0;
929     enerd->term[F_DVDL_RESTRAINT]  = 0.0;
930     enerd->term[F_DKDL]            = 0.0;
931     if (enerd->n_lambda > 0)
932     {
933         for (i = 0; i < enerd->n_lambda; i++)
934         {
935             enerd->enerpart_lambda[i] = 0.0;
936         }
937     }
938     /* reset foreign energy data - separate function since we also call it elsewhere */
939     reset_foreign_enerdata(enerd);
940 }