Remove support for implicit solvation
[alexxy/gromacs.git] / src / gromacs / mdlib / force.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017,2018, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #include "gmxpre.h"
38
39 #include "force.h"
40
41 #include "config.h"
42
43 #include <assert.h>
44 #include <string.h>
45
46 #include <cmath>
47
48 #include "gromacs/domdec/domdec.h"
49 #include "gromacs/domdec/domdec_struct.h"
50 #include "gromacs/ewald/ewald.h"
51 #include "gromacs/ewald/long-range-correction.h"
52 #include "gromacs/ewald/pme.h"
53 #include "gromacs/gmxlib/network.h"
54 #include "gromacs/gmxlib/nrnb.h"
55 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
56 #include "gromacs/listed-forces/listed-forces.h"
57 #include "gromacs/math/vec.h"
58 #include "gromacs/math/vecdump.h"
59 #include "gromacs/mdlib/forcerec-threading.h"
60 #include "gromacs/mdlib/mdrun.h"
61 #include "gromacs/mdlib/ns.h"
62 #include "gromacs/mdlib/qmmm.h"
63 #include "gromacs/mdtypes/commrec.h"
64 #include "gromacs/mdtypes/forceoutput.h"
65 #include "gromacs/mdtypes/inputrec.h"
66 #include "gromacs/mdtypes/md_enums.h"
67 #include "gromacs/pbcutil/ishift.h"
68 #include "gromacs/pbcutil/mshift.h"
69 #include "gromacs/pbcutil/pbc.h"
70 #include "gromacs/timing/wallcycle.h"
71 #include "gromacs/utility/cstringutil.h"
72 #include "gromacs/utility/exceptions.h"
73 #include "gromacs/utility/fatalerror.h"
74 #include "gromacs/utility/smalloc.h"
75
76 void ns(FILE              *fp,
77         t_forcerec        *fr,
78         matrix             box,
79         gmx_groups_t      *groups,
80         gmx_localtop_t    *top,
81         t_mdatoms         *md,
82         t_commrec         *cr,
83         t_nrnb            *nrnb,
84         gmx_bool           bFillGrid)
85 {
86     int     nsearch;
87
88
89     if (!fr->ns->nblist_initialized)
90     {
91         init_neighbor_list(fp, fr, md->homenr);
92     }
93
94     nsearch = search_neighbours(fp, fr, box, top, groups, cr, nrnb, md,
95                                 bFillGrid);
96     if (debug)
97     {
98         fprintf(debug, "nsearch = %d\n", nsearch);
99     }
100
101     /* Check whether we have to do dynamic load balancing */
102     /*if ((nsb->nstDlb > 0) && (mod(step,nsb->nstDlb) == 0))
103        count_nb(cr,nsb,&(top->blocks[ebCGS]),nns,fr->nlr,
104        &(top->idef),opts->ngener);
105      */
106     if (fr->ns->dump_nl > 0)
107     {
108         dump_nblist(fp, cr, fr, fr->ns->dump_nl);
109     }
110 }
111
112 static void clearEwaldThreadOutput(ewald_corr_thread_t *ewc_t)
113 {
114     ewc_t->Vcorr_q        = 0;
115     ewc_t->Vcorr_lj       = 0;
116     ewc_t->dvdl[efptCOUL] = 0;
117     ewc_t->dvdl[efptVDW]  = 0;
118     clear_mat(ewc_t->vir_q);
119     clear_mat(ewc_t->vir_lj);
120 }
121
122 static void reduceEwaldThreadOuput(int nthreads, ewald_corr_thread_t *ewc_t)
123 {
124     ewald_corr_thread_t &dest = ewc_t[0];
125
126     for (int t = 1; t < nthreads; t++)
127     {
128         dest.Vcorr_q        += ewc_t[t].Vcorr_q;
129         dest.Vcorr_lj       += ewc_t[t].Vcorr_lj;
130         dest.dvdl[efptCOUL] += ewc_t[t].dvdl[efptCOUL];
131         dest.dvdl[efptVDW]  += ewc_t[t].dvdl[efptVDW];
132         m_add(dest.vir_q,  ewc_t[t].vir_q,  dest.vir_q);
133         m_add(dest.vir_lj, ewc_t[t].vir_lj, dest.vir_lj);
134     }
135 }
136
137 void do_force_lowlevel(t_forcerec *fr,      t_inputrec *ir,
138                        t_idef     *idef,    t_commrec  *cr,
139                        t_nrnb     *nrnb,    gmx_wallcycle_t wcycle,
140                        t_mdatoms  *md,
141                        rvec       x[],      history_t  *hist,
142                        rvec      *forceForUseWithShiftForces,
143                        gmx::ForceWithVirial *forceWithVirial,
144                        gmx_enerdata_t *enerd,
145                        t_fcdata   *fcd,
146                        matrix     box,
147                        t_lambda   *fepvals,
148                        real       *lambda,
149                        t_graph    *graph,
150                        t_blocka   *excl,
151                        rvec       mu_tot[],
152                        int        flags,
153                        float     *cycles_pme)
154 {
155     int         i, j;
156     int         donb_flags;
157     int         pme_flags;
158     t_pbc       pbc;
159     real        dvdl_dum[efptNR], dvdl_nb[efptNR];
160
161 #if GMX_MPI
162     double  t0 = 0.0, t1, t2, t3; /* time measurement for coarse load balancing */
163 #endif
164
165     set_pbc(&pbc, fr->ePBC, box);
166
167     /* reset free energy components */
168     for (i = 0; i < efptNR; i++)
169     {
170         dvdl_nb[i]  = 0;
171         dvdl_dum[i] = 0;
172     }
173
174     /* do QMMM first if requested */
175     if (fr->bQMMM)
176     {
177         enerd->term[F_EQM] = calculate_QMMM(cr, forceForUseWithShiftForces, fr);
178     }
179
180     /* Call the short range functions all in one go. */
181
182 #if GMX_MPI
183     /*#define TAKETIME ((cr->npmenodes) && (fr->timesteps < 12))*/
184 #define TAKETIME FALSE
185     if (TAKETIME)
186     {
187         MPI_Barrier(cr->mpi_comm_mygroup);
188         t0 = MPI_Wtime();
189     }
190 #endif
191
192     if (ir->nwall)
193     {
194         /* foreign lambda component for walls */
195         real dvdl_walls = do_walls(ir, fr, box, md, x, forceForUseWithShiftForces, lambda[efptVDW],
196                                    enerd->grpp.ener[egLJSR], nrnb);
197         enerd->dvdl_lin[efptVDW] += dvdl_walls;
198     }
199
200     where();
201     /* We only do non-bonded calculation with group scheme here, the verlet
202      * calls are done from do_force_cutsVERLET(). */
203     if (fr->cutoff_scheme == ecutsGROUP && (flags & GMX_FORCE_NONBONDED))
204     {
205         donb_flags = 0;
206         /* Add short-range interactions */
207         donb_flags |= GMX_NONBONDED_DO_SR;
208
209         /* Currently all group scheme kernels always calculate (shift-)forces */
210         if (flags & GMX_FORCE_FORCES)
211         {
212             donb_flags |= GMX_NONBONDED_DO_FORCE;
213         }
214         if (flags & GMX_FORCE_VIRIAL)
215         {
216             donb_flags |= GMX_NONBONDED_DO_SHIFTFORCE;
217         }
218         if (flags & GMX_FORCE_ENERGY)
219         {
220             donb_flags |= GMX_NONBONDED_DO_POTENTIAL;
221         }
222
223         wallcycle_sub_start(wcycle, ewcsNONBONDED);
224         do_nonbonded(fr, x, forceForUseWithShiftForces, md, excl,
225                      &enerd->grpp, nrnb,
226                      lambda, dvdl_nb, -1, -1, donb_flags);
227
228         /* If we do foreign lambda and we have soft-core interactions
229          * we have to recalculate the (non-linear) energies contributions.
230          */
231         if (fepvals->n_lambda > 0 && (flags & GMX_FORCE_DHDL) && fepvals->sc_alpha != 0)
232         {
233             for (i = 0; i < enerd->n_lambda; i++)
234             {
235                 real lam_i[efptNR];
236
237                 for (j = 0; j < efptNR; j++)
238                 {
239                     lam_i[j] = (i == 0 ? lambda[j] : fepvals->all_lambda[j][i-1]);
240                 }
241                 reset_foreign_enerdata(enerd);
242                 do_nonbonded(fr, x, forceForUseWithShiftForces, md, excl,
243                              &(enerd->foreign_grpp), nrnb,
244                              lam_i, dvdl_dum, -1, -1,
245                              (donb_flags & ~GMX_NONBONDED_DO_FORCE) | GMX_NONBONDED_DO_FOREIGNLAMBDA);
246                 sum_epot(&(enerd->foreign_grpp), enerd->foreign_term);
247                 enerd->enerpart_lambda[i] += enerd->foreign_term[F_EPOT];
248             }
249         }
250         wallcycle_sub_stop(wcycle, ewcsNONBONDED);
251         where();
252     }
253
254 #if GMX_MPI
255     if (TAKETIME)
256     {
257         t1          = MPI_Wtime();
258         fr->t_fnbf += t1-t0;
259     }
260 #endif
261
262     if (fepvals->sc_alpha != 0)
263     {
264         enerd->dvdl_nonlin[efptVDW] += dvdl_nb[efptVDW];
265     }
266     else
267     {
268         enerd->dvdl_lin[efptVDW] += dvdl_nb[efptVDW];
269     }
270
271     if (fepvals->sc_alpha != 0)
272
273     /* even though coulomb part is linear, we already added it, beacuse we
274        need to go through the vdw calculation anyway */
275     {
276         enerd->dvdl_nonlin[efptCOUL] += dvdl_nb[efptCOUL];
277     }
278     else
279     {
280         enerd->dvdl_lin[efptCOUL] += dvdl_nb[efptCOUL];
281     }
282
283     if (debug)
284     {
285         pr_rvecs(debug, 0, "fshift after SR", fr->fshift, SHIFTS);
286     }
287
288     /* Shift the coordinates. Must be done before listed forces and PPPM,
289      * but is also necessary for SHAKE and update, therefore it can NOT
290      * go when no listed forces have to be evaluated.
291      *
292      * The shifting and PBC code is deliberately not timed, since with
293      * the Verlet scheme it only takes non-zero time with triclinic
294      * boxes, and even then the time is around a factor of 100 less
295      * than the next smallest counter.
296      */
297
298
299     /* Here sometimes we would not need to shift with NBFonly,
300      * but we do so anyhow for consistency of the returned coordinates.
301      */
302     if (graph)
303     {
304         shift_self(graph, box, x);
305         if (TRICLINIC(box))
306         {
307             inc_nrnb(nrnb, eNR_SHIFTX, 2*graph->nnodes);
308         }
309         else
310         {
311             inc_nrnb(nrnb, eNR_SHIFTX, graph->nnodes);
312         }
313     }
314     /* Check whether we need to do listed interactions or correct for exclusions */
315     if (fr->bMolPBC &&
316         ((flags & GMX_FORCE_LISTED)
317          || EEL_RF(fr->ic->eeltype) || EEL_FULL(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype)))
318     {
319         /* TODO There are no electrostatics methods that require this
320            transformation, when using the Verlet scheme, so update the
321            above conditional. */
322         /* Since all atoms are in the rectangular or triclinic unit-cell,
323          * only single box vector shifts (2 in x) are required.
324          */
325         set_pbc_dd(&pbc, fr->ePBC, DOMAINDECOMP(cr) ? cr->dd->nc : nullptr,
326                    TRUE, box);
327     }
328
329     do_force_listed(wcycle, box, ir->fepvals, cr,
330                     idef, (const rvec *) x, hist,
331                     forceForUseWithShiftForces, forceWithVirial,
332                     fr, &pbc, graph, enerd, nrnb, lambda, md, fcd,
333                     DOMAINDECOMP(cr) ? cr->dd->gatindex : nullptr,
334                     flags);
335
336     where();
337
338     *cycles_pme = 0;
339
340     /* Do long-range electrostatics and/or LJ-PME, including related short-range
341      * corrections.
342      */
343     if (EEL_FULL(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
344     {
345         int  status            = 0;
346         real Vlr_q             = 0, Vlr_lj = 0;
347
348         /* We reduce all virial, dV/dlambda and energy contributions, except
349          * for the reciprocal energies (Vlr_q, Vlr_lj) into the same struct.
350          */
351         ewald_corr_thread_t &ewaldOutput = fr->ewc_t[0];
352         clearEwaldThreadOutput(&ewaldOutput);
353
354         if (EEL_PME_EWALD(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
355         {
356             /* With the Verlet scheme exclusion forces are calculated
357              * in the non-bonded kernel.
358              */
359             /* The TPI molecule does not have exclusions with the rest
360              * of the system and no intra-molecular PME grid
361              * contributions will be calculated in
362              * gmx_pme_calc_energy.
363              */
364             if ((ir->cutoff_scheme == ecutsGROUP && fr->n_tpi == 0) ||
365                 ir->ewald_geometry != eewg3D ||
366                 ir->epsilon_surface != 0)
367             {
368                 int nthreads, t;
369
370                 wallcycle_sub_start(wcycle, ewcsEWALD_CORRECTION);
371
372                 if (fr->n_tpi > 0)
373                 {
374                     gmx_fatal(FARGS, "TPI with PME currently only works in a 3D geometry with tin-foil boundary conditions");
375                 }
376
377                 nthreads = fr->nthread_ewc;
378 #pragma omp parallel for num_threads(nthreads) schedule(static)
379                 for (t = 0; t < nthreads; t++)
380                 {
381                     try
382                     {
383                         ewald_corr_thread_t &ewc_t = fr->ewc_t[t];
384                         if (t > 0)
385                         {
386                             clearEwaldThreadOutput(&ewc_t);
387                         }
388
389                         /* Threading is only supported with the Verlet cut-off
390                          * scheme and then only single particle forces (no
391                          * exclusion forces) are calculated, so we can store
392                          * the forces in the normal, single forceWithVirial->force_ array.
393                          */
394                         ewald_LRcorrection(md->homenr, cr, nthreads, t, fr, ir,
395                                            md->chargeA, md->chargeB,
396                                            md->sqrt_c6A, md->sqrt_c6B,
397                                            md->sigmaA, md->sigmaB,
398                                            md->sigma3A, md->sigma3B,
399                                            md->nChargePerturbed || md->nTypePerturbed,
400                                            ir->cutoff_scheme != ecutsVERLET,
401                                            excl, x, box, mu_tot,
402                                            ir->ewald_geometry,
403                                            ir->epsilon_surface,
404                                            as_rvec_array(forceWithVirial->force_.data()),
405                                            ewc_t.vir_q, ewc_t.vir_lj,
406                                            &ewc_t.Vcorr_q, &ewc_t.Vcorr_lj,
407                                            lambda[efptCOUL], lambda[efptVDW],
408                                            &ewc_t.dvdl[efptCOUL], &ewc_t.dvdl[efptVDW]);
409                     }
410                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
411                 }
412                 if (nthreads > 1)
413                 {
414                     reduceEwaldThreadOuput(nthreads, fr->ewc_t);
415                 }
416                 wallcycle_sub_stop(wcycle, ewcsEWALD_CORRECTION);
417             }
418
419             if (EEL_PME_EWALD(fr->ic->eeltype) && fr->n_tpi == 0)
420             {
421                 /* This is not in a subcounter because it takes a
422                    negligible and constant-sized amount of time */
423                 ewaldOutput.Vcorr_q +=
424                     ewald_charge_correction(cr, fr, lambda[efptCOUL], box,
425                                             &ewaldOutput.dvdl[efptCOUL],
426                                             ewaldOutput.vir_q);
427             }
428
429             if ((EEL_PME(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype)) &&
430                 thisRankHasDuty(cr, DUTY_PME) && (pme_run_mode(fr->pmedata) == PmeRunMode::CPU))
431             {
432                 /* Do reciprocal PME for Coulomb and/or LJ. */
433                 assert(fr->n_tpi >= 0);
434                 if (fr->n_tpi == 0 || (flags & GMX_FORCE_STATECHANGED))
435                 {
436                     pme_flags = GMX_PME_SPREAD | GMX_PME_SOLVE;
437
438                     if (flags & GMX_FORCE_FORCES)
439                     {
440                         pme_flags |= GMX_PME_CALC_F;
441                     }
442                     if (flags & GMX_FORCE_VIRIAL)
443                     {
444                         pme_flags |= GMX_PME_CALC_ENER_VIR;
445                     }
446                     if (fr->n_tpi > 0)
447                     {
448                         /* We don't calculate f, but we do want the potential */
449                         pme_flags |= GMX_PME_CALC_POT;
450                     }
451
452                     /* With domain decomposition we close the CPU side load
453                      * balancing region here, because PME does global
454                      * communication that acts as a global barrier.
455                      */
456                     if (DOMAINDECOMP(cr))
457                     {
458                         ddCloseBalanceRegionCpu(cr->dd);
459                     }
460
461                     wallcycle_start(wcycle, ewcPMEMESH);
462                     status = gmx_pme_do(fr->pmedata,
463                                         0, md->homenr - fr->n_tpi,
464                                         x,
465                                         as_rvec_array(forceWithVirial->force_.data()),
466                                         md->chargeA, md->chargeB,
467                                         md->sqrt_c6A, md->sqrt_c6B,
468                                         md->sigmaA, md->sigmaB,
469                                         box, cr,
470                                         DOMAINDECOMP(cr) ? dd_pme_maxshift_x(cr->dd) : 0,
471                                         DOMAINDECOMP(cr) ? dd_pme_maxshift_y(cr->dd) : 0,
472                                         nrnb, wcycle,
473                                         ewaldOutput.vir_q, ewaldOutput.vir_lj,
474                                         &Vlr_q, &Vlr_lj,
475                                         lambda[efptCOUL], lambda[efptVDW],
476                                         &ewaldOutput.dvdl[efptCOUL],
477                                         &ewaldOutput.dvdl[efptVDW],
478                                         pme_flags);
479                     *cycles_pme = wallcycle_stop(wcycle, ewcPMEMESH);
480                     if (status != 0)
481                     {
482                         gmx_fatal(FARGS, "Error %d in reciprocal PME routine", status);
483                     }
484
485                     /* We should try to do as little computation after
486                      * this as possible, because parallel PME synchronizes
487                      * the nodes, so we want all load imbalance of the
488                      * rest of the force calculation to be before the PME
489                      * call.  DD load balancing is done on the whole time
490                      * of the force call (without PME).
491                      */
492                 }
493                 if (fr->n_tpi > 0)
494                 {
495                     if (EVDW_PME(ir->vdwtype))
496                     {
497
498                         gmx_fatal(FARGS, "Test particle insertion not implemented with LJ-PME");
499                     }
500                     /* Determine the PME grid energy of the test molecule
501                      * with the PME grid potential of the other charges.
502                      */
503                     gmx_pme_calc_energy(fr->pmedata, fr->n_tpi,
504                                         x + md->homenr - fr->n_tpi,
505                                         md->chargeA + md->homenr - fr->n_tpi,
506                                         &Vlr_q);
507                 }
508             }
509         }
510
511         if (!EEL_PME(fr->ic->eeltype) && EEL_PME_EWALD(fr->ic->eeltype))
512         {
513             Vlr_q = do_ewald(ir, x, as_rvec_array(forceWithVirial->force_.data()),
514                              md->chargeA, md->chargeB,
515                              box, cr, md->homenr,
516                              ewaldOutput.vir_q, fr->ic->ewaldcoeff_q,
517                              lambda[efptCOUL], &ewaldOutput.dvdl[efptCOUL],
518                              fr->ewald_table);
519         }
520
521         /* Note that with separate PME nodes we get the real energies later */
522         forceWithVirial->addVirialContribution(ewaldOutput.vir_q);
523         forceWithVirial->addVirialContribution(ewaldOutput.vir_lj);
524         enerd->dvdl_lin[efptCOUL] += ewaldOutput.dvdl[efptCOUL];
525         enerd->dvdl_lin[efptVDW]  += ewaldOutput.dvdl[efptVDW];
526         enerd->term[F_COUL_RECIP]  = Vlr_q + ewaldOutput.Vcorr_q;
527         enerd->term[F_LJ_RECIP]    = Vlr_lj + ewaldOutput.Vcorr_lj;
528
529         if (debug)
530         {
531             fprintf(debug, "Vlr_q = %g, Vcorr_q = %g, Vlr_corr_q = %g\n",
532                     Vlr_q, ewaldOutput.Vcorr_q, enerd->term[F_COUL_RECIP]);
533             pr_rvecs(debug, 0, "vir_el_recip after corr", ewaldOutput.vir_q, DIM);
534             pr_rvecs(debug, 0, "fshift after LR Corrections", fr->fshift, SHIFTS);
535             fprintf(debug, "Vlr_lj: %g, Vcorr_lj = %g, Vlr_corr_lj = %g\n",
536                     Vlr_lj, ewaldOutput.Vcorr_lj, enerd->term[F_LJ_RECIP]);
537             pr_rvecs(debug, 0, "vir_lj_recip after corr", ewaldOutput.vir_lj, DIM);
538         }
539     }
540     else
541     {
542         /* Is there a reaction-field exclusion correction needed?
543          * With the Verlet scheme, exclusion forces are calculated
544          * in the non-bonded kernel.
545          */
546         if (ir->cutoff_scheme != ecutsVERLET && EEL_RF(fr->ic->eeltype))
547         {
548             real dvdl_rf_excl      = 0;
549             enerd->term[F_RF_EXCL] =
550                 RF_excl_correction(fr, graph, md, excl, x, forceForUseWithShiftForces,
551                                    fr->fshift, &pbc, lambda[efptCOUL], &dvdl_rf_excl);
552
553             enerd->dvdl_lin[efptCOUL] += dvdl_rf_excl;
554         }
555     }
556     where();
557
558     if (debug)
559     {
560         print_nrnb(debug, nrnb);
561     }
562
563 #if GMX_MPI
564     if (TAKETIME)
565     {
566         t2 = MPI_Wtime();
567         MPI_Barrier(cr->mpi_comm_mygroup);
568         t3          = MPI_Wtime();
569         fr->t_wait += t3-t2;
570         if (fr->timesteps == 11)
571         {
572             char buf[22];
573             fprintf(stderr, "* PP load balancing info: rank %d, step %s, rel wait time=%3.0f%% , load string value: %7.2f\n",
574                     cr->nodeid, gmx_step_str(fr->timesteps, buf),
575                     100*fr->t_wait/(fr->t_wait+fr->t_fnbf),
576                     (fr->t_fnbf+fr->t_wait)/fr->t_fnbf);
577         }
578         fr->timesteps++;
579     }
580 #endif
581
582     if (debug)
583     {
584         pr_rvecs(debug, 0, "fshift after bondeds", fr->fshift, SHIFTS);
585     }
586
587 }
588
589 void init_enerdata(int ngener, int n_lambda, gmx_enerdata_t *enerd)
590 {
591     int i, n2;
592
593     for (i = 0; i < F_NRE; i++)
594     {
595         enerd->term[i]         = 0;
596         enerd->foreign_term[i] = 0;
597     }
598
599
600     for (i = 0; i < efptNR; i++)
601     {
602         enerd->dvdl_lin[i]     = 0;
603         enerd->dvdl_nonlin[i]  = 0;
604     }
605
606     n2 = ngener*ngener;
607     if (debug)
608     {
609         fprintf(debug, "Creating %d sized group matrix for energies\n", n2);
610     }
611     enerd->grpp.nener         = n2;
612     enerd->foreign_grpp.nener = n2;
613     for (i = 0; (i < egNR); i++)
614     {
615         snew(enerd->grpp.ener[i], n2);
616         snew(enerd->foreign_grpp.ener[i], n2);
617     }
618
619     if (n_lambda)
620     {
621         enerd->n_lambda = 1 + n_lambda;
622         snew(enerd->enerpart_lambda, enerd->n_lambda);
623     }
624     else
625     {
626         enerd->n_lambda = 0;
627     }
628 }
629
630 void destroy_enerdata(gmx_enerdata_t *enerd)
631 {
632     int i;
633
634     for (i = 0; (i < egNR); i++)
635     {
636         sfree(enerd->grpp.ener[i]);
637     }
638
639     for (i = 0; (i < egNR); i++)
640     {
641         sfree(enerd->foreign_grpp.ener[i]);
642     }
643
644     if (enerd->n_lambda)
645     {
646         sfree(enerd->enerpart_lambda);
647     }
648 }
649
650 static real sum_v(int n, real v[])
651 {
652     real t;
653     int  i;
654
655     t = 0.0;
656     for (i = 0; (i < n); i++)
657     {
658         t = t + v[i];
659     }
660
661     return t;
662 }
663
664 void sum_epot(gmx_grppairener_t *grpp, real *epot)
665 {
666     int i;
667
668     /* Accumulate energies */
669     epot[F_COUL_SR]  = sum_v(grpp->nener, grpp->ener[egCOULSR]);
670     epot[F_LJ]       = sum_v(grpp->nener, grpp->ener[egLJSR]);
671     epot[F_LJ14]     = sum_v(grpp->nener, grpp->ener[egLJ14]);
672     epot[F_COUL14]   = sum_v(grpp->nener, grpp->ener[egCOUL14]);
673
674 /* lattice part of LR doesnt belong to any group
675  * and has been added earlier
676  */
677     epot[F_BHAM]     = sum_v(grpp->nener, grpp->ener[egBHAMSR]);
678
679     epot[F_EPOT] = 0;
680     for (i = 0; (i < F_EPOT); i++)
681     {
682         if (i != F_DISRESVIOL && i != F_ORIRESDEV)
683         {
684             epot[F_EPOT] += epot[i];
685         }
686     }
687 }
688
689 void sum_dhdl(gmx_enerdata_t *enerd, gmx::ArrayRef<const real> lambda, t_lambda *fepvals)
690 {
691     int    index;
692     double dlam;
693
694     enerd->dvdl_lin[efptVDW] += enerd->term[F_DVDL_VDW];  /* include dispersion correction */
695     enerd->term[F_DVDL]       = 0.0;
696     for (int i = 0; i < efptNR; i++)
697     {
698         if (fepvals->separate_dvdl[i])
699         {
700             /* could this be done more readably/compactly? */
701             switch (i)
702             {
703                 case (efptMASS):
704                     index = F_DKDL;
705                     break;
706                 case (efptCOUL):
707                     index = F_DVDL_COUL;
708                     break;
709                 case (efptVDW):
710                     index = F_DVDL_VDW;
711                     break;
712                 case (efptBONDED):
713                     index = F_DVDL_BONDED;
714                     break;
715                 case (efptRESTRAINT):
716                     index = F_DVDL_RESTRAINT;
717                     break;
718                 default:
719                     index = F_DVDL;
720                     break;
721             }
722             enerd->term[index] = enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
723             if (debug)
724             {
725                 fprintf(debug, "dvdl-%s[%2d]: %f: non-linear %f + linear %f\n",
726                         efpt_names[i], i, enerd->term[index], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
727             }
728         }
729         else
730         {
731             enerd->term[F_DVDL] += enerd->dvdl_lin[i] + enerd->dvdl_nonlin[i];
732             if (debug)
733             {
734                 fprintf(debug, "dvd-%sl[%2d]: %f: non-linear %f + linear %f\n",
735                         efpt_names[0], i, enerd->term[F_DVDL], enerd->dvdl_nonlin[i], enerd->dvdl_lin[i]);
736             }
737         }
738     }
739
740     /* Notes on the foreign lambda free energy difference evaluation:
741      * Adding the potential and ekin terms that depend linearly on lambda
742      * as delta lam * dvdl to the energy differences is exact.
743      * For the constraints this is not exact, but we have no other option
744      * without literally changing the lengths and reevaluating the energies at each step.
745      * (try to remedy this post 4.6 - MRS)
746      */
747     if (fepvals->separate_dvdl[efptBONDED])
748     {
749         enerd->term[F_DVDL_BONDED] += enerd->term[F_DVDL_CONSTR];
750     }
751     else
752     {
753         enerd->term[F_DVDL] += enerd->term[F_DVDL_CONSTR];
754     }
755     enerd->term[F_DVDL_CONSTR] = 0;
756
757     for (int i = 0; i < fepvals->n_lambda; i++)
758     {
759         /* note we are iterating over fepvals here!
760            For the current lam, dlam = 0 automatically,
761            so we don't need to add anything to the
762            enerd->enerpart_lambda[0] */
763
764         /* we don't need to worry about dvdl_lin contributions to dE at
765            current lambda, because the contributions to the current
766            lambda are automatically zeroed */
767
768         for (size_t j = 0; j < lambda.size(); j++)
769         {
770             /* Note that this loop is over all dhdl components, not just the separated ones */
771             dlam = (fepvals->all_lambda[j][i] - lambda[j]);
772             enerd->enerpart_lambda[i+1] += dlam*enerd->dvdl_lin[j];
773             if (debug)
774             {
775                 fprintf(debug, "enerdiff lam %g: (%15s), non-linear %f linear %f*%f\n",
776                         fepvals->all_lambda[j][i], efpt_names[j],
777                         (enerd->enerpart_lambda[i+1] - enerd->enerpart_lambda[0]),
778                         dlam, enerd->dvdl_lin[j]);
779             }
780         }
781     }
782 }
783
784
785 void reset_foreign_enerdata(gmx_enerdata_t *enerd)
786 {
787     int  i, j;
788
789     /* First reset all foreign energy components.  Foreign energies always called on
790        neighbor search steps */
791     for (i = 0; (i < egNR); i++)
792     {
793         for (j = 0; (j < enerd->grpp.nener); j++)
794         {
795             enerd->foreign_grpp.ener[i][j] = 0.0;
796         }
797     }
798
799     /* potential energy components */
800     for (i = 0; (i <= F_EPOT); i++)
801     {
802         enerd->foreign_term[i] = 0.0;
803     }
804 }
805
806 void reset_enerdata(gmx_enerdata_t *enerd)
807 {
808     int      i, j;
809
810     /* First reset all energy components. */
811     for (i = 0; (i < egNR); i++)
812     {
813         for (j = 0; (j < enerd->grpp.nener); j++)
814         {
815             enerd->grpp.ener[i][j] = 0.0;
816         }
817     }
818     for (i = 0; i < efptNR; i++)
819     {
820         enerd->dvdl_lin[i]    = 0.0;
821         enerd->dvdl_nonlin[i] = 0.0;
822     }
823
824     /* Normal potential energy components */
825     for (i = 0; (i <= F_EPOT); i++)
826     {
827         enerd->term[i] = 0.0;
828     }
829     enerd->term[F_DVDL]            = 0.0;
830     enerd->term[F_DVDL_COUL]       = 0.0;
831     enerd->term[F_DVDL_VDW]        = 0.0;
832     enerd->term[F_DVDL_BONDED]     = 0.0;
833     enerd->term[F_DVDL_RESTRAINT]  = 0.0;
834     enerd->term[F_DKDL]            = 0.0;
835     if (enerd->n_lambda > 0)
836     {
837         for (i = 0; i < enerd->n_lambda; i++)
838         {
839             enerd->enerpart_lambda[i] = 0.0;
840         }
841     }
842     /* reset foreign energy data - separate function since we also call it elsewhere */
843     reset_foreign_enerdata(enerd);
844 }