Remove use of graph in do_force()
[alexxy/gromacs.git] / src / gromacs / mdlib / force.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017 by the GROMACS development team.
7  * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by
8  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9  * and including many others, as listed in the AUTHORS file in the
10  * top-level source directory and at http://www.gromacs.org.
11  *
12  * GROMACS is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU Lesser General Public License
14  * as published by the Free Software Foundation; either version 2.1
15  * of the License, or (at your option) any later version.
16  *
17  * GROMACS is distributed in the hope that it will be useful,
18  * but WITHOUT ANY WARRANTY; without even the implied warranty of
19  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  * Lesser General Public License for more details.
21  *
22  * You should have received a copy of the GNU Lesser General Public
23  * License along with GROMACS; if not, see
24  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
26  *
27  * If you want to redistribute modifications to GROMACS, please
28  * consider that scientific software is very special. Version
29  * control is crucial - bugs must be traceable. We will be happy to
30  * consider code for inclusion in the official distribution, but
31  * derived work must not be called official GROMACS. Details are found
32  * in the README & COPYING files - if they are missing, get the
33  * official version at http://www.gromacs.org.
34  *
35  * To help us fund GROMACS development, we humbly ask that you cite
36  * the research papers on the package. Check out http://www.gromacs.org.
37  */
38 #include "gmxpre.h"
39
40 #include "force.h"
41
42 #include <cassert>
43 #include <cmath>
44 #include <cstring>
45
46 #include "gromacs/domdec/dlbtiming.h"
47 #include "gromacs/domdec/domdec.h"
48 #include "gromacs/domdec/domdec_struct.h"
49 #include "gromacs/ewald/ewald.h"
50 #include "gromacs/ewald/long_range_correction.h"
51 #include "gromacs/ewald/pme.h"
52 #include "gromacs/gmxlib/network.h"
53 #include "gromacs/gmxlib/nrnb.h"
54 #include "gromacs/listed_forces/listed_forces.h"
55 #include "gromacs/math/vec.h"
56 #include "gromacs/math/vecdump.h"
57 #include "gromacs/mdlib/forcerec_threading.h"
58 #include "gromacs/mdlib/rf_util.h"
59 #include "gromacs/mdlib/wall.h"
60 #include "gromacs/mdtypes/commrec.h"
61 #include "gromacs/mdtypes/enerdata.h"
62 #include "gromacs/mdtypes/forceoutput.h"
63 #include "gromacs/mdtypes/forcerec.h"
64 #include "gromacs/mdtypes/inputrec.h"
65 #include "gromacs/mdtypes/interaction_const.h"
66 #include "gromacs/mdtypes/md_enums.h"
67 #include "gromacs/mdtypes/mdatom.h"
68 #include "gromacs/mdtypes/simulation_workload.h"
69 #include "gromacs/pbcutil/ishift.h"
70 #include "gromacs/pbcutil/pbc.h"
71 #include "gromacs/timing/wallcycle.h"
72 #include "gromacs/utility/exceptions.h"
73 #include "gromacs/utility/fatalerror.h"
74 #include "gromacs/utility/smalloc.h"
75
76 using gmx::ArrayRef;
77 using gmx::RVec;
78
79 static void clearEwaldThreadOutput(ewald_corr_thread_t* ewc_t)
80 {
81     ewc_t->Vcorr_q        = 0;
82     ewc_t->Vcorr_lj       = 0;
83     ewc_t->dvdl[efptCOUL] = 0;
84     ewc_t->dvdl[efptVDW]  = 0;
85     clear_mat(ewc_t->vir_q);
86     clear_mat(ewc_t->vir_lj);
87 }
88
89 static void reduceEwaldThreadOuput(int nthreads, ewald_corr_thread_t* ewc_t)
90 {
91     ewald_corr_thread_t& dest = ewc_t[0];
92
93     for (int t = 1; t < nthreads; t++)
94     {
95         dest.Vcorr_q += ewc_t[t].Vcorr_q;
96         dest.Vcorr_lj += ewc_t[t].Vcorr_lj;
97         dest.dvdl[efptCOUL] += ewc_t[t].dvdl[efptCOUL];
98         dest.dvdl[efptVDW] += ewc_t[t].dvdl[efptVDW];
99         m_add(dest.vir_q, ewc_t[t].vir_q, dest.vir_q);
100         m_add(dest.vir_lj, ewc_t[t].vir_lj, dest.vir_lj);
101     }
102 }
103
104 void do_force_lowlevel(t_forcerec*                          fr,
105                        const t_inputrec*                    ir,
106                        const InteractionDefinitions&        idef,
107                        const t_commrec*                     cr,
108                        const gmx_multisim_t*                ms,
109                        t_nrnb*                              nrnb,
110                        gmx_wallcycle_t                      wcycle,
111                        const t_mdatoms*                     md,
112                        gmx::ArrayRefWithPadding<const RVec> coordinates,
113                        ArrayRef<const RVec>                 xWholeMolecules,
114                        history_t*                           hist,
115                        gmx::ForceOutputs*                   forceOutputs,
116                        gmx_enerdata_t*                      enerd,
117                        t_fcdata*                            fcd,
118                        const matrix                         box,
119                        const real*                          lambda,
120                        const rvec*                          mu_tot,
121                        const gmx::StepWorkload&             stepWork,
122                        const DDBalanceRegionHandler&        ddBalanceRegionHandler)
123 {
124     // TODO: Replace all uses of x by const coordinates
125     const rvec* x = as_rvec_array(coordinates.paddedArrayRef().data());
126
127     auto& forceWithVirial = forceOutputs->forceWithVirial();
128
129     /* Call the short range functions all in one go. */
130
131     if (ir->nwall)
132     {
133         /* foreign lambda component for walls */
134         real dvdl_walls = do_walls(*ir, *fr, box, *md, x, &forceWithVirial, lambda[efptVDW],
135                                    enerd->grpp.ener[egLJSR].data(), nrnb);
136         enerd->dvdl_lin[efptVDW] += dvdl_walls;
137
138         for (auto& dhdl : enerd->dhdlLambda)
139         {
140             dhdl += dvdl_walls;
141         }
142     }
143
144     {
145         t_pbc pbc;
146
147         /* Check whether we need to take into account PBC in listed interactions. */
148         const auto needPbcForListedForces =
149                 fr->bMolPBC && stepWork.computeListedForces && haveCpuListedForces(*fr, idef, *fcd);
150         if (needPbcForListedForces)
151         {
152             /* Since all atoms are in the rectangular or triclinic unit-cell,
153              * only single box vector shifts (2 in x) are required.
154              */
155             set_pbc_dd(&pbc, fr->pbcType, DOMAINDECOMP(cr) ? cr->dd->numCells : nullptr, TRUE, box);
156         }
157
158         do_force_listed(wcycle, box, ir->fepvals, cr, ms, idef, x, xWholeMolecules, hist,
159                         forceOutputs, fr, &pbc, enerd, nrnb, lambda, md, fcd,
160                         DOMAINDECOMP(cr) ? cr->dd->globalAtomIndices.data() : nullptr, stepWork);
161     }
162
163     const bool computePmeOnCpu = (EEL_PME(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
164                                  && thisRankHasDuty(cr, DUTY_PME)
165                                  && (pme_run_mode(fr->pmedata) == PmeRunMode::CPU);
166
167     const bool haveEwaldSurfaceTerm = haveEwaldSurfaceContribution(*ir);
168
169     /* Do long-range electrostatics and/or LJ-PME
170      * and compute PME surface terms when necessary.
171      */
172     if (computePmeOnCpu || fr->ic->eeltype == eelEWALD || haveEwaldSurfaceTerm)
173     {
174         int  status = 0;
175         real Vlr_q = 0, Vlr_lj = 0;
176
177         /* We reduce all virial, dV/dlambda and energy contributions, except
178          * for the reciprocal energies (Vlr_q, Vlr_lj) into the same struct.
179          */
180         ewald_corr_thread_t& ewaldOutput = fr->ewc_t[0];
181         clearEwaldThreadOutput(&ewaldOutput);
182
183         if (EEL_PME_EWALD(fr->ic->eeltype) || EVDW_PME(fr->ic->vdwtype))
184         {
185             /* Calculate the Ewald surface force and energy contributions, when necessary */
186             if (haveEwaldSurfaceTerm)
187             {
188                 wallcycle_sub_start(wcycle, ewcsEWALD_CORRECTION);
189
190                 int nthreads = fr->nthread_ewc;
191 #pragma omp parallel for num_threads(nthreads) schedule(static)
192                 for (int t = 0; t < nthreads; t++)
193                 {
194                     try
195                     {
196                         ewald_corr_thread_t& ewc_t = fr->ewc_t[t];
197                         if (t > 0)
198                         {
199                             clearEwaldThreadOutput(&ewc_t);
200                         }
201
202                         /* Threading is only supported with the Verlet cut-off
203                          * scheme and then only single particle forces (no
204                          * exclusion forces) are calculated, so we can store
205                          * the forces in the normal, single forceWithVirial->force_ array.
206                          */
207                         ewald_LRcorrection(md->homenr, cr, nthreads, t, *fr, *ir, md->chargeA,
208                                            md->chargeB, (md->nChargePerturbed != 0), x, box, mu_tot,
209                                            as_rvec_array(forceWithVirial.force_.data()),
210                                            &ewc_t.Vcorr_q, lambda[efptCOUL], &ewc_t.dvdl[efptCOUL]);
211                     }
212                     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
213                 }
214                 if (nthreads > 1)
215                 {
216                     reduceEwaldThreadOuput(nthreads, fr->ewc_t);
217                 }
218                 wallcycle_sub_stop(wcycle, ewcsEWALD_CORRECTION);
219             }
220
221             if (EEL_PME_EWALD(fr->ic->eeltype) && fr->n_tpi == 0)
222             {
223                 /* This is not in a subcounter because it takes a
224                    negligible and constant-sized amount of time */
225                 ewaldOutput.Vcorr_q += ewald_charge_correction(
226                         cr, fr, lambda[efptCOUL], box, &ewaldOutput.dvdl[efptCOUL], ewaldOutput.vir_q);
227             }
228
229             if (computePmeOnCpu)
230             {
231                 /* Do reciprocal PME for Coulomb and/or LJ. */
232                 assert(fr->n_tpi >= 0);
233                 if (fr->n_tpi == 0 || stepWork.stateChanged)
234                 {
235                     /* With domain decomposition we close the CPU side load
236                      * balancing region here, because PME does global
237                      * communication that acts as a global barrier.
238                      */
239                     ddBalanceRegionHandler.closeAfterForceComputationCpu();
240
241                     wallcycle_start(wcycle, ewcPMEMESH);
242                     status = gmx_pme_do(
243                             fr->pmedata,
244                             gmx::constArrayRefFromArray(coordinates.unpaddedConstArrayRef().data(),
245                                                         md->homenr - fr->n_tpi),
246                             forceWithVirial.force_, md->chargeA, md->chargeB, md->sqrt_c6A,
247                             md->sqrt_c6B, md->sigmaA, md->sigmaB, box, cr,
248                             DOMAINDECOMP(cr) ? dd_pme_maxshift_x(cr->dd) : 0,
249                             DOMAINDECOMP(cr) ? dd_pme_maxshift_y(cr->dd) : 0, nrnb, wcycle,
250                             ewaldOutput.vir_q, ewaldOutput.vir_lj, &Vlr_q, &Vlr_lj,
251                             lambda[efptCOUL], lambda[efptVDW], &ewaldOutput.dvdl[efptCOUL],
252                             &ewaldOutput.dvdl[efptVDW], stepWork);
253                     wallcycle_stop(wcycle, ewcPMEMESH);
254                     if (status != 0)
255                     {
256                         gmx_fatal(FARGS, "Error %d in reciprocal PME routine", status);
257                     }
258
259                     /* We should try to do as little computation after
260                      * this as possible, because parallel PME synchronizes
261                      * the nodes, so we want all load imbalance of the
262                      * rest of the force calculation to be before the PME
263                      * call.  DD load balancing is done on the whole time
264                      * of the force call (without PME).
265                      */
266                 }
267                 if (fr->n_tpi > 0)
268                 {
269                     /* Determine the PME grid energy of the test molecule
270                      * with the PME grid potential of the other charges.
271                      */
272                     gmx_pme_calc_energy(
273                             fr->pmedata,
274                             coordinates.unpaddedConstArrayRef().subArray(md->homenr - fr->n_tpi, fr->n_tpi),
275                             gmx::arrayRefFromArray(md->chargeA + md->homenr - fr->n_tpi, fr->n_tpi),
276                             &Vlr_q);
277                 }
278             }
279         }
280
281         if (fr->ic->eeltype == eelEWALD)
282         {
283             Vlr_q = do_ewald(ir, x, as_rvec_array(forceWithVirial.force_.data()), md->chargeA,
284                              md->chargeB, box, cr, md->homenr, ewaldOutput.vir_q, fr->ic->ewaldcoeff_q,
285                              lambda[efptCOUL], &ewaldOutput.dvdl[efptCOUL], fr->ewald_table);
286         }
287
288         /* Note that with separate PME nodes we get the real energies later */
289         // TODO it would be simpler if we just accumulated a single
290         // long-range virial contribution.
291         forceWithVirial.addVirialContribution(ewaldOutput.vir_q);
292         forceWithVirial.addVirialContribution(ewaldOutput.vir_lj);
293         enerd->dvdl_lin[efptCOUL] += ewaldOutput.dvdl[efptCOUL];
294         enerd->dvdl_lin[efptVDW] += ewaldOutput.dvdl[efptVDW];
295         enerd->term[F_COUL_RECIP] = Vlr_q + ewaldOutput.Vcorr_q;
296         enerd->term[F_LJ_RECIP]   = Vlr_lj + ewaldOutput.Vcorr_lj;
297
298         for (auto& dhdl : enerd->dhdlLambda)
299         {
300             dhdl += ewaldOutput.dvdl[efptVDW] + ewaldOutput.dvdl[efptCOUL];
301         }
302
303         if (debug)
304         {
305             fprintf(debug, "Vlr_q = %g, Vcorr_q = %g, Vlr_corr_q = %g\n", Vlr_q,
306                     ewaldOutput.Vcorr_q, enerd->term[F_COUL_RECIP]);
307             pr_rvecs(debug, 0, "vir_el_recip after corr", ewaldOutput.vir_q, DIM);
308             rvec* fshift = as_rvec_array(forceOutputs->forceWithShiftForces().shiftForces().data());
309             pr_rvecs(debug, 0, "fshift after LR Corrections", fshift, SHIFTS);
310             fprintf(debug, "Vlr_lj: %g, Vcorr_lj = %g, Vlr_corr_lj = %g\n", Vlr_lj,
311                     ewaldOutput.Vcorr_lj, enerd->term[F_LJ_RECIP]);
312             pr_rvecs(debug, 0, "vir_lj_recip after corr", ewaldOutput.vir_lj, DIM);
313         }
314     }
315
316     if (debug)
317     {
318         print_nrnb(debug, nrnb);
319     }
320
321     if (debug)
322     {
323         rvec* fshift = as_rvec_array(forceOutputs->forceWithShiftForces().shiftForces().data());
324         pr_rvecs(debug, 0, "fshift after bondeds", fshift, SHIFTS);
325     }
326 }