Apply clang-tidy-8 readability-uppercase-literal-suffix
[alexxy/gromacs.git] / src / gromacs / mdlib / sim_util.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5  * Copyright (c) 2001-2004, The GROMACS development team.
6  * Copyright (c) 2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
7  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8  * and including many others, as listed in the AUTHORS file in the
9  * top-level source directory and at http://www.gromacs.org.
10  *
11  * GROMACS is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU Lesser General Public License
13  * as published by the Free Software Foundation; either version 2.1
14  * of the License, or (at your option) any later version.
15  *
16  * GROMACS is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * Lesser General Public License for more details.
20  *
21  * You should have received a copy of the GNU Lesser General Public
22  * License along with GROMACS; if not, see
23  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
25  *
26  * If you want to redistribute modifications to GROMACS, please
27  * consider that scientific software is very special. Version
28  * control is crucial - bugs must be traceable. We will be happy to
29  * consider code for inclusion in the official distribution, but
30  * derived work must not be called official GROMACS. Details are found
31  * in the README & COPYING files - if they are missing, get the
32  * official version at http://www.gromacs.org.
33  *
34  * To help us fund GROMACS development, we humbly ask that you cite
35  * the research papers on the package. Check out http://www.gromacs.org.
36  */
37 #include "gmxpre.h"
38
39 #include "config.h"
40
41 #include <cmath>
42 #include <cstdint>
43 #include <cstdio>
44 #include <cstring>
45
46 #include <array>
47
48 #include "gromacs/awh/awh.h"
49 #include "gromacs/domdec/dlbtiming.h"
50 #include "gromacs/domdec/domdec.h"
51 #include "gromacs/domdec/domdec_struct.h"
52 #include "gromacs/domdec/partition.h"
53 #include "gromacs/essentialdynamics/edsam.h"
54 #include "gromacs/ewald/pme.h"
55 #include "gromacs/gmxlib/chargegroup.h"
56 #include "gromacs/gmxlib/network.h"
57 #include "gromacs/gmxlib/nrnb.h"
58 #include "gromacs/gmxlib/nonbonded/nb_free_energy.h"
59 #include "gromacs/gmxlib/nonbonded/nb_kernel.h"
60 #include "gromacs/gmxlib/nonbonded/nonbonded.h"
61 #include "gromacs/gpu_utils/gpu_utils.h"
62 #include "gromacs/imd/imd.h"
63 #include "gromacs/listed_forces/bonded.h"
64 #include "gromacs/listed_forces/disre.h"
65 #include "gromacs/listed_forces/gpubonded.h"
66 #include "gromacs/listed_forces/listed_forces.h"
67 #include "gromacs/listed_forces/manage_threading.h"
68 #include "gromacs/listed_forces/orires.h"
69 #include "gromacs/math/arrayrefwithpadding.h"
70 #include "gromacs/math/functions.h"
71 #include "gromacs/math/units.h"
72 #include "gromacs/math/vec.h"
73 #include "gromacs/math/vecdump.h"
74 #include "gromacs/mdlib/calcmu.h"
75 #include "gromacs/mdlib/calcvir.h"
76 #include "gromacs/mdlib/constr.h"
77 #include "gromacs/mdlib/enerdata_utils.h"
78 #include "gromacs/mdlib/force.h"
79 #include "gromacs/mdlib/forcerec.h"
80 #include "gromacs/mdlib/gmx_omp_nthreads.h"
81 #include "gromacs/mdlib/ppforceworkload.h"
82 #include "gromacs/mdlib/qmmm.h"
83 #include "gromacs/mdlib/update.h"
84 #include "gromacs/mdtypes/commrec.h"
85 #include "gromacs/mdtypes/enerdata.h"
86 #include "gromacs/mdtypes/forceoutput.h"
87 #include "gromacs/mdtypes/iforceprovider.h"
88 #include "gromacs/mdtypes/inputrec.h"
89 #include "gromacs/mdtypes/md_enums.h"
90 #include "gromacs/mdtypes/state.h"
91 #include "gromacs/nbnxm/atomdata.h"
92 #include "gromacs/nbnxm/gpu_data_mgmt.h"
93 #include "gromacs/nbnxm/nbnxm.h"
94 #include "gromacs/pbcutil/ishift.h"
95 #include "gromacs/pbcutil/mshift.h"
96 #include "gromacs/pbcutil/pbc.h"
97 #include "gromacs/pulling/pull.h"
98 #include "gromacs/pulling/pull_rotation.h"
99 #include "gromacs/timing/cyclecounter.h"
100 #include "gromacs/timing/gpu_timing.h"
101 #include "gromacs/timing/wallcycle.h"
102 #include "gromacs/timing/wallcyclereporting.h"
103 #include "gromacs/timing/walltime_accounting.h"
104 #include "gromacs/topology/topology.h"
105 #include "gromacs/utility/arrayref.h"
106 #include "gromacs/utility/basedefinitions.h"
107 #include "gromacs/utility/cstringutil.h"
108 #include "gromacs/utility/exceptions.h"
109 #include "gromacs/utility/fatalerror.h"
110 #include "gromacs/utility/gmxassert.h"
111 #include "gromacs/utility/gmxmpi.h"
112 #include "gromacs/utility/logger.h"
113 #include "gromacs/utility/smalloc.h"
114 #include "gromacs/utility/strconvert.h"
115 #include "gromacs/utility/sysinfo.h"
116
117 using gmx::ForceOutputs;
118
119 // TODO: this environment variable allows us to verify before release
120 // that on less common architectures the total cost of polling is not larger than
121 // a blocking wait (so polling does not introduce overhead when the static
122 // PME-first ordering would suffice).
123 static const bool c_disableAlternatingWait = (getenv("GMX_DISABLE_ALTERNATING_GPU_WAIT") != nullptr);
124
125 // environment variable to enable GPU buffer ops, to allow incremental and optional
126 // introduction of this functionality.
127 // TODO eventially tie this in with other existing GPU flags.
128 static const bool c_enableGpuBufOps = (getenv("GMX_USE_GPU_BUFFER_OPS") != nullptr);
129
130 static void sum_forces(rvec f[], gmx::ArrayRef<const gmx::RVec> forceToAdd)
131 {
132     const int      end = forceToAdd.size();
133
134     int gmx_unused nt = gmx_omp_nthreads_get(emntDefault);
135 #pragma omp parallel for num_threads(nt) schedule(static)
136     for (int i = 0; i < end; i++)
137     {
138         rvec_inc(f[i], forceToAdd[i]);
139     }
140 }
141
142 static void calc_virial(int start, int homenr, const rvec x[],
143                         const gmx::ForceWithShiftForces &forceWithShiftForces,
144                         tensor vir_part, const t_graph *graph, const matrix box,
145                         t_nrnb *nrnb, const t_forcerec *fr, int ePBC)
146 {
147     /* The short-range virial from surrounding boxes */
148     const rvec *fshift = as_rvec_array(forceWithShiftForces.shiftForces().data());
149     calc_vir(SHIFTS, fr->shift_vec, fshift, vir_part, ePBC == epbcSCREW, box);
150     inc_nrnb(nrnb, eNR_VIRIAL, SHIFTS);
151
152     /* Calculate partial virial, for local atoms only, based on short range.
153      * Total virial is computed in global_stat, called from do_md
154      */
155     const rvec *f = as_rvec_array(forceWithShiftForces.force().data());
156     f_calc_vir(start, start+homenr, x, f, vir_part, graph, box);
157     inc_nrnb(nrnb, eNR_VIRIAL, homenr);
158
159     if (debug)
160     {
161         pr_rvecs(debug, 0, "vir_part", vir_part, DIM);
162     }
163 }
164
165 static void pull_potential_wrapper(const t_commrec *cr,
166                                    const t_inputrec *ir,
167                                    const matrix box, gmx::ArrayRef<const gmx::RVec> x,
168                                    gmx::ForceWithVirial *force,
169                                    const t_mdatoms *mdatoms,
170                                    gmx_enerdata_t *enerd,
171                                    pull_t *pull_work,
172                                    const real *lambda,
173                                    double t,
174                                    gmx_wallcycle_t wcycle)
175 {
176     t_pbc  pbc;
177     real   dvdl;
178
179     /* Calculate the center of mass forces, this requires communication,
180      * which is why pull_potential is called close to other communication.
181      */
182     wallcycle_start(wcycle, ewcPULLPOT);
183     set_pbc(&pbc, ir->ePBC, box);
184     dvdl                     = 0;
185     enerd->term[F_COM_PULL] +=
186         pull_potential(pull_work, mdatoms, &pbc,
187                        cr, t, lambda[efptRESTRAINT], as_rvec_array(x.data()), force, &dvdl);
188     enerd->dvdl_lin[efptRESTRAINT] += dvdl;
189     wallcycle_stop(wcycle, ewcPULLPOT);
190 }
191
192 static void pme_receive_force_ener(const t_commrec      *cr,
193                                    gmx::ForceWithVirial *forceWithVirial,
194                                    gmx_enerdata_t       *enerd,
195                                    gmx_wallcycle_t       wcycle)
196 {
197     real   e_q, e_lj, dvdl_q, dvdl_lj;
198     float  cycles_ppdpme, cycles_seppme;
199
200     cycles_ppdpme = wallcycle_stop(wcycle, ewcPPDURINGPME);
201     dd_cycles_add(cr->dd, cycles_ppdpme, ddCyclPPduringPME);
202
203     /* In case of node-splitting, the PP nodes receive the long-range
204      * forces, virial and energy from the PME nodes here.
205      */
206     wallcycle_start(wcycle, ewcPP_PMEWAITRECVF);
207     dvdl_q  = 0;
208     dvdl_lj = 0;
209     gmx_pme_receive_f(cr, forceWithVirial, &e_q, &e_lj, &dvdl_q, &dvdl_lj,
210                       &cycles_seppme);
211     enerd->term[F_COUL_RECIP] += e_q;
212     enerd->term[F_LJ_RECIP]   += e_lj;
213     enerd->dvdl_lin[efptCOUL] += dvdl_q;
214     enerd->dvdl_lin[efptVDW]  += dvdl_lj;
215
216     if (wcycle)
217     {
218         dd_cycles_add(cr->dd, cycles_seppme, ddCyclPME);
219     }
220     wallcycle_stop(wcycle, ewcPP_PMEWAITRECVF);
221 }
222
223 static void print_large_forces(FILE            *fp,
224                                const t_mdatoms *md,
225                                const t_commrec *cr,
226                                int64_t          step,
227                                real             forceTolerance,
228                                const rvec      *x,
229                                const rvec      *f)
230 {
231     real           force2Tolerance = gmx::square(forceTolerance);
232     gmx::index     numNonFinite    = 0;
233     for (int i = 0; i < md->homenr; i++)
234     {
235         real force2    = norm2(f[i]);
236         bool nonFinite = !std::isfinite(force2);
237         if (force2 >= force2Tolerance || nonFinite)
238         {
239             fprintf(fp, "step %" PRId64 " atom %6d  x %8.3f %8.3f %8.3f  force %12.5e\n",
240                     step,
241                     ddglatnr(cr->dd, i), x[i][XX], x[i][YY], x[i][ZZ], std::sqrt(force2));
242         }
243         if (nonFinite)
244         {
245             numNonFinite++;
246         }
247     }
248     if (numNonFinite > 0)
249     {
250         /* Note that with MPI this fatal call on one rank might interrupt
251          * the printing on other ranks. But we can only avoid that with
252          * an expensive MPI barrier that we would need at each step.
253          */
254         gmx_fatal(FARGS, "At step %" PRId64 " detected non-finite forces on %td atoms", step, numNonFinite);
255     }
256 }
257
258 static void post_process_forces(const t_commrec           *cr,
259                                 int64_t                    step,
260                                 t_nrnb                    *nrnb,
261                                 gmx_wallcycle_t            wcycle,
262                                 const gmx_localtop_t      *top,
263                                 const matrix               box,
264                                 const rvec                 x[],
265                                 ForceOutputs              *forceOutputs,
266                                 tensor                     vir_force,
267                                 const t_mdatoms           *mdatoms,
268                                 const t_graph             *graph,
269                                 const t_forcerec          *fr,
270                                 const gmx_vsite_t         *vsite,
271                                 int                        flags)
272 {
273     rvec *f = as_rvec_array(forceOutputs->forceWithShiftForces().force().data());
274
275     if (fr->haveDirectVirialContributions)
276     {
277         auto &forceWithVirial = forceOutputs->forceWithVirial();
278         rvec *fDirectVir      = as_rvec_array(forceWithVirial.force_.data());
279
280         if (vsite)
281         {
282             /* Spread the mesh force on virtual sites to the other particles...
283              * This is parallellized. MPI communication is performed
284              * if the constructing atoms aren't local.
285              */
286             matrix virial = { { 0 } };
287             spread_vsite_f(vsite, x, fDirectVir, nullptr,
288                            (flags & GMX_FORCE_VIRIAL) != 0, virial,
289                            nrnb,
290                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr, wcycle);
291             forceWithVirial.addVirialContribution(virial);
292         }
293
294         if (flags & GMX_FORCE_VIRIAL)
295         {
296             /* Now add the forces, this is local */
297             sum_forces(f, forceWithVirial.force_);
298
299             /* Add the direct virial contributions */
300             GMX_ASSERT(forceWithVirial.computeVirial_, "forceWithVirial should request virial computation when we request the virial");
301             m_add(vir_force, forceWithVirial.getVirial(), vir_force);
302
303             if (debug)
304             {
305                 pr_rvecs(debug, 0, "vir_force", vir_force, DIM);
306             }
307         }
308     }
309
310     if (fr->print_force >= 0)
311     {
312         print_large_forces(stderr, mdatoms, cr, step, fr->print_force, x, f);
313     }
314 }
315
316 static void do_nb_verlet(t_forcerec                       *fr,
317                          const interaction_const_t        *ic,
318                          gmx_enerdata_t                   *enerd,
319                          const int                         flags,
320                          const Nbnxm::InteractionLocality  ilocality,
321                          const int                         clearF,
322                          const int64_t                     step,
323                          t_nrnb                           *nrnb,
324                          gmx_wallcycle_t                   wcycle)
325 {
326     if (!(flags & GMX_FORCE_NONBONDED))
327     {
328         /* skip non-bonded calculation */
329         return;
330     }
331
332     nonbonded_verlet_t *nbv  = fr->nbv.get();
333
334     /* GPU kernel launch overhead is already timed separately */
335     if (fr->cutoff_scheme != ecutsVERLET)
336     {
337         gmx_incons("Invalid cut-off scheme passed!");
338     }
339
340     if (!nbv->useGpu())
341     {
342         /* When dynamic pair-list  pruning is requested, we need to prune
343          * at nstlistPrune steps.
344          */
345         if (nbv->isDynamicPruningStepCpu(step))
346         {
347             /* Prune the pair-list beyond fr->ic->rlistPrune using
348              * the current coordinates of the atoms.
349              */
350             wallcycle_sub_start(wcycle, ewcsNONBONDED_PRUNING);
351             nbv->dispatchPruneKernelCpu(ilocality, fr->shift_vec);
352             wallcycle_sub_stop(wcycle, ewcsNONBONDED_PRUNING);
353         }
354     }
355
356     nbv->dispatchNonbondedKernel(ilocality, *ic, flags, clearF, *fr, enerd, nrnb);
357 }
358
359 static inline void clear_rvecs_omp(int n, rvec v[])
360 {
361     int nth = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, n);
362
363     /* Note that we would like to avoid this conditional by putting it
364      * into the omp pragma instead, but then we still take the full
365      * omp parallel for overhead (at least with gcc5).
366      */
367     if (nth == 1)
368     {
369         for (int i = 0; i < n; i++)
370         {
371             clear_rvec(v[i]);
372         }
373     }
374     else
375     {
376 #pragma omp parallel for num_threads(nth) schedule(static)
377         for (int i = 0; i < n; i++)
378         {
379             clear_rvec(v[i]);
380         }
381     }
382 }
383
384 /*! \brief Return an estimate of the average kinetic energy or 0 when unreliable
385  *
386  * \param groupOptions  Group options, containing T-coupling options
387  */
388 static real averageKineticEnergyEstimate(const t_grpopts &groupOptions)
389 {
390     real nrdfCoupled   = 0;
391     real nrdfUncoupled = 0;
392     real kineticEnergy = 0;
393     for (int g = 0; g < groupOptions.ngtc; g++)
394     {
395         if (groupOptions.tau_t[g] >= 0)
396         {
397             nrdfCoupled   += groupOptions.nrdf[g];
398             kineticEnergy += groupOptions.nrdf[g]*0.5*groupOptions.ref_t[g]*BOLTZ;
399         }
400         else
401         {
402             nrdfUncoupled += groupOptions.nrdf[g];
403         }
404     }
405
406     /* This conditional with > also catches nrdf=0 */
407     if (nrdfCoupled > nrdfUncoupled)
408     {
409         return kineticEnergy*(nrdfCoupled + nrdfUncoupled)/nrdfCoupled;
410     }
411     else
412     {
413         return 0;
414     }
415 }
416
417 /*! \brief This routine checks that the potential energy is finite.
418  *
419  * Always checks that the potential energy is finite. If step equals
420  * inputrec.init_step also checks that the magnitude of the potential energy
421  * is reasonable. Terminates with a fatal error when a check fails.
422  * Note that passing this check does not guarantee finite forces,
423  * since those use slightly different arithmetics. But in most cases
424  * there is just a narrow coordinate range where forces are not finite
425  * and energies are finite.
426  *
427  * \param[in] step      The step number, used for checking and printing
428  * \param[in] enerd     The energy data; the non-bonded group energies need to be added to enerd.term[F_EPOT] before calling this routine
429  * \param[in] inputrec  The input record
430  */
431 static void checkPotentialEnergyValidity(int64_t               step,
432                                          const gmx_enerdata_t &enerd,
433                                          const t_inputrec     &inputrec)
434 {
435     /* Threshold valid for comparing absolute potential energy against
436      * the kinetic energy. Normally one should not consider absolute
437      * potential energy values, but with a factor of one million
438      * we should never get false positives.
439      */
440     constexpr real c_thresholdFactor = 1e6;
441
442     bool           energyIsNotFinite    = !std::isfinite(enerd.term[F_EPOT]);
443     real           averageKineticEnergy = 0;
444     /* We only check for large potential energy at the initial step,
445      * because that is by far the most likely step for this too occur
446      * and because computing the average kinetic energy is not free.
447      * Note: nstcalcenergy >> 1 often does not allow to catch large energies
448      * before they become NaN.
449      */
450     if (step == inputrec.init_step && EI_DYNAMICS(inputrec.eI))
451     {
452         averageKineticEnergy = averageKineticEnergyEstimate(inputrec.opts);
453     }
454
455     if (energyIsNotFinite || (averageKineticEnergy > 0 &&
456                               enerd.term[F_EPOT] > c_thresholdFactor*averageKineticEnergy))
457     {
458         gmx_fatal(FARGS, "Step %" PRId64 ": The total potential energy is %g, which is %s. The LJ and electrostatic contributions to the energy are %g and %g, respectively. A %s potential energy can be caused by overlapping interactions in bonded interactions or very large%s coordinate values. Usually this is caused by a badly- or non-equilibrated initial configuration, incorrect interactions or parameters in the topology.",
459                   step,
460                   enerd.term[F_EPOT],
461                   energyIsNotFinite ? "not finite" : "extremely high",
462                   enerd.term[F_LJ],
463                   enerd.term[F_COUL_SR],
464                   energyIsNotFinite ? "non-finite" : "very high",
465                   energyIsNotFinite ? " or Nan" : "");
466     }
467 }
468
469 /*! \brief Return true if there are special forces computed this step.
470  *
471  * The conditionals exactly correspond to those in computeSpecialForces().
472  */
473 static bool
474 haveSpecialForces(const t_inputrec              *inputrec,
475                   ForceProviders                *forceProviders,
476                   const pull_t                  *pull_work,
477                   int                            forceFlags,
478                   const gmx_edsam               *ed)
479 {
480     const bool computeForces = (forceFlags & GMX_FORCE_FORCES) != 0;
481
482     return
483         ((computeForces && forceProviders->hasForceProvider()) ||         // forceProviders
484          (inputrec->bPull && pull_have_potential(pull_work)) ||           // pull
485          inputrec->bRot ||                                                // enforced rotation
486          (ed != nullptr) ||                                               // flooding
487          (inputrec->bIMD && computeForces));                              // IMD
488 }
489
490 /*! \brief Compute forces and/or energies for special algorithms
491  *
492  * The intention is to collect all calls to algorithms that compute
493  * forces on local atoms only and that do not contribute to the local
494  * virial sum (but add their virial contribution separately).
495  * Eventually these should likely all become ForceProviders.
496  * Within this function the intention is to have algorithms that do
497  * global communication at the end, so global barriers within the MD loop
498  * are as close together as possible.
499  *
500  * \param[in]     fplog            The log file
501  * \param[in]     cr               The communication record
502  * \param[in]     inputrec         The input record
503  * \param[in]     awh              The Awh module (nullptr if none in use).
504  * \param[in]     enforcedRotation Enforced rotation module.
505  * \param[in]     imdSession       The IMD session
506  * \param[in]     pull_work        The pull work structure.
507  * \param[in]     step             The current MD step
508  * \param[in]     t                The current time
509  * \param[in,out] wcycle           Wallcycle accounting struct
510  * \param[in,out] forceProviders   Pointer to a list of force providers
511  * \param[in]     box              The unit cell
512  * \param[in]     x                The coordinates
513  * \param[in]     mdatoms          Per atom properties
514  * \param[in]     lambda           Array of free-energy lambda values
515  * \param[in]     forceFlags       Flags that tell whether we should compute forces/energies/virial
516  * \param[in,out] forceWithVirial  Force and virial buffers
517  * \param[in,out] enerd            Energy buffer
518  * \param[in,out] ed               Essential dynamics pointer
519  * \param[in]     bNS              Tells if we did neighbor searching this step, used for ED sampling
520  *
521  * \todo Remove bNS, which is used incorrectly.
522  * \todo Convert all other algorithms called here to ForceProviders.
523  */
524 static void
525 computeSpecialForces(FILE                          *fplog,
526                      const t_commrec               *cr,
527                      const t_inputrec              *inputrec,
528                      gmx::Awh                      *awh,
529                      gmx_enfrot                    *enforcedRotation,
530                      gmx::ImdSession               *imdSession,
531                      pull_t                        *pull_work,
532                      int64_t                        step,
533                      double                         t,
534                      gmx_wallcycle_t                wcycle,
535                      ForceProviders                *forceProviders,
536                      const matrix                   box,
537                      gmx::ArrayRef<const gmx::RVec> x,
538                      const t_mdatoms               *mdatoms,
539                      real                          *lambda,
540                      int                            forceFlags,
541                      gmx::ForceWithVirial          *forceWithVirial,
542                      gmx_enerdata_t                *enerd,
543                      gmx_edsam                     *ed,
544                      gmx_bool                       bNS)
545 {
546     const bool computeForces = (forceFlags & GMX_FORCE_FORCES) != 0;
547
548     /* NOTE: Currently all ForceProviders only provide forces.
549      *       When they also provide energies, remove this conditional.
550      */
551     if (computeForces)
552     {
553         gmx::ForceProviderInput  forceProviderInput(x, *mdatoms, t, box, *cr);
554         gmx::ForceProviderOutput forceProviderOutput(forceWithVirial, enerd);
555
556         /* Collect forces from modules */
557         forceProviders->calculateForces(forceProviderInput, &forceProviderOutput);
558     }
559
560     if (inputrec->bPull && pull_have_potential(pull_work))
561     {
562         pull_potential_wrapper(cr, inputrec, box, x,
563                                forceWithVirial,
564                                mdatoms, enerd, pull_work, lambda, t,
565                                wcycle);
566
567         if (awh)
568         {
569             enerd->term[F_COM_PULL] +=
570                 awh->applyBiasForcesAndUpdateBias(inputrec->ePBC, *mdatoms, box,
571                                                   forceWithVirial,
572                                                   t, step, wcycle, fplog);
573         }
574     }
575
576     rvec *f = as_rvec_array(forceWithVirial->force_.data());
577
578     /* Add the forces from enforced rotation potentials (if any) */
579     if (inputrec->bRot)
580     {
581         wallcycle_start(wcycle, ewcROTadd);
582         enerd->term[F_COM_PULL] += add_rot_forces(enforcedRotation, f, cr, step, t);
583         wallcycle_stop(wcycle, ewcROTadd);
584     }
585
586     if (ed)
587     {
588         /* Note that since init_edsam() is called after the initialization
589          * of forcerec, edsam doesn't request the noVirSum force buffer.
590          * Thus if no other algorithm (e.g. PME) requires it, the forces
591          * here will contribute to the virial.
592          */
593         do_flood(cr, inputrec, as_rvec_array(x.data()), f, ed, box, step, bNS);
594     }
595
596     /* Add forces from interactive molecular dynamics (IMD), if any */
597     if (inputrec->bIMD && computeForces)
598     {
599         imdSession->applyForces(f);
600     }
601 }
602
603 /*! \brief Launch the prepare_step and spread stages of PME GPU.
604  *
605  * \param[in]  pmedata       The PME structure
606  * \param[in]  box           The box matrix
607  * \param[in]  x             Coordinate array
608  * \param[in]  flags         Force flags
609  * \param[in]  pmeFlags      PME flags
610  * \param[in]  wcycle        The wallcycle structure
611  */
612 static inline void launchPmeGpuSpread(gmx_pme_t      *pmedata,
613                                       const matrix    box,
614                                       const rvec      x[],
615                                       int             flags,
616                                       int             pmeFlags,
617                                       gmx_wallcycle_t wcycle)
618 {
619     pme_gpu_prepare_computation(pmedata, (flags & GMX_FORCE_DYNAMICBOX) != 0, box, wcycle, pmeFlags);
620     pme_gpu_launch_spread(pmedata, x, wcycle);
621 }
622
623 /*! \brief Launch the FFT and gather stages of PME GPU
624  *
625  * This function only implements setting the output forces (no accumulation).
626  *
627  * \param[in]  pmedata        The PME structure
628  * \param[in]  wcycle         The wallcycle structure
629  * \param[in]  useGpuFPmeReduction Whether forces will be reduced on GPU
630  */
631 static void launchPmeGpuFftAndGather(gmx_pme_t        *pmedata,
632                                      gmx_wallcycle_t   wcycle,
633                                      bool              useGpuFPmeReduction)
634 {
635     pme_gpu_launch_complex_transforms(pmedata, wcycle);
636     pme_gpu_launch_gather(pmedata, wcycle, PmeForceOutputHandling::Set, useGpuFPmeReduction);
637 }
638
639 /*! \brief
640  *  Polling wait for either of the PME or nonbonded GPU tasks.
641  *
642  * Instead of a static order in waiting for GPU tasks, this function
643  * polls checking which of the two tasks completes first, and does the
644  * associated force buffer reduction overlapped with the other task.
645  * By doing that, unlike static scheduling order, it can always overlap
646  * one of the reductions, regardless of the GPU task completion order.
647  *
648  * \param[in]     nbv              Nonbonded verlet structure
649  * \param[in,out] pmedata          PME module data
650  * \param[in,out] forceOutputs     Output buffer for the forces and virial
651  * \param[in,out] enerd            Energy data structure results are reduced into
652  * \param[in]     flags            Force flags
653  * \param[in]     pmeFlags         PME flags
654  * \param[in]     wcycle           The wallcycle structure
655  */
656 static void alternatePmeNbGpuWaitReduce(nonbonded_verlet_t                  *nbv,
657                                         gmx_pme_t                           *pmedata,
658                                         gmx::ForceOutputs                   *forceOutputs,
659                                         gmx_enerdata_t                      *enerd,
660                                         int                                  flags,
661                                         int                                  pmeFlags,
662                                         gmx_wallcycle_t                      wcycle)
663 {
664     bool isPmeGpuDone = false;
665     bool isNbGpuDone  = false;
666
667
668
669     gmx::ForceWithShiftForces      &forceWithShiftForces = forceOutputs->forceWithShiftForces();
670     gmx::ForceWithVirial           &forceWithVirial      = forceOutputs->forceWithVirial();
671
672     gmx::ArrayRef<const gmx::RVec>  pmeGpuForces;
673
674     while (!isPmeGpuDone || !isNbGpuDone)
675     {
676         if (!isPmeGpuDone)
677         {
678             GpuTaskCompletion completionType = (isNbGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
679             isPmeGpuDone = pme_gpu_try_finish_task(pmedata, pmeFlags, wcycle, &forceWithVirial, enerd, completionType);
680         }
681
682         if (!isNbGpuDone)
683         {
684             GpuTaskCompletion completionType = (isPmeGpuDone) ? GpuTaskCompletion::Wait : GpuTaskCompletion::Check;
685             wallcycle_start_nocount(wcycle, ewcWAIT_GPU_NB_L);
686             isNbGpuDone = Nbnxm::gpu_try_finish_task(nbv->gpu_nbv,
687                                                      flags,
688                                                      Nbnxm::AtomLocality::Local,
689                                                      enerd->grpp.ener[egLJSR].data(),
690                                                      enerd->grpp.ener[egCOULSR].data(),
691                                                      forceWithShiftForces.shiftForces(), completionType);
692             wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
693             // To get the call count right, when the task finished we
694             // issue a start/stop.
695             // TODO: move the ewcWAIT_GPU_NB_L cycle counting into nbnxn_gpu_try_finish_task()
696             // and ewcNB_XF_BUF_OPS counting into nbnxn_atomdata_add_nbat_f_to_f().
697             if (isNbGpuDone)
698             {
699                 wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
700                 wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
701
702                 nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::Local,
703                                               forceWithShiftForces.force());
704             }
705         }
706     }
707 }
708
709 /*! \brief Set up the different force buffers; also does clearing.
710  *
711  * \param[in] fr        force record pointer
712  * \param[in] pull_work The pull work object.
713  * \param[in] inputrec  input record
714  * \param[in] force     force array
715  * \param[in] bDoForces True if force are computed this step
716  * \param[in] doVirial  True if virial is computed this step
717  * \param[out] wcycle   wallcycle recording structure
718  *
719  * \returns             Cleared force output structure
720  */
721 static ForceOutputs
722 setupForceOutputs(t_forcerec                          *fr,
723                   pull_t                              *pull_work,
724                   const t_inputrec                    &inputrec,
725                   gmx::ArrayRefWithPadding<gmx::RVec>  force,
726                   const bool                           bDoForces,
727                   const bool                           doVirial,
728                   gmx_wallcycle_t                      wcycle)
729 {
730     wallcycle_sub_start(wcycle, ewcsCLEAR_FORCE_BUFFER);
731
732     /* NOTE: We assume fr->shiftForces is all zeros here */
733     gmx::ForceWithShiftForces forceWithShiftForces(force, doVirial, fr->shiftForces);
734
735     if (bDoForces)
736     {
737         /* Clear the short- and long-range forces */
738         clear_rvecs_omp(fr->natoms_force_constr,
739                         as_rvec_array(forceWithShiftForces.force().data()));
740     }
741
742     /* If we need to compute the virial, we might need a separate
743      * force buffer for algorithms for which the virial is calculated
744      * directly, such as PME. Otherwise, forceWithVirial uses the
745      * the same force (f in legacy calls) buffer as other algorithms.
746      */
747     const bool useSeparateForceWithVirialBuffer = (bDoForces && (doVirial && fr->haveDirectVirialContributions));
748
749     /* forceWithVirial uses the local atom range only */
750     gmx::ForceWithVirial forceWithVirial(useSeparateForceWithVirialBuffer ?
751                                          fr->forceBufferForDirectVirialContributions : force.unpaddedArrayRef(),
752                                          doVirial);
753
754     if (useSeparateForceWithVirialBuffer)
755     {
756         /* TODO: update comment
757          * We only compute forces on local atoms. Note that vsites can
758          * spread to non-local atoms, but that part of the buffer is
759          * cleared separately in the vsite spreading code.
760          */
761         clear_rvecs_omp(forceWithVirial.force_.size(), as_rvec_array(forceWithVirial.force_.data()));
762     }
763
764     if (inputrec.bPull && pull_have_constraint(pull_work))
765     {
766         clear_pull_forces(pull_work);
767     }
768
769     wallcycle_sub_stop(wcycle, ewcsCLEAR_FORCE_BUFFER);
770
771     return ForceOutputs(forceWithShiftForces, forceWithVirial);
772 }
773
774
775 /*! \brief Set up flags that indicate what type of work is there to compute.
776  *
777  * Currently we only update it at search steps,
778  * but some properties may change more frequently (e.g. virial/non-virial step),
779  * so when including those either the frequency of update (per-step) or the scope
780  * of a flag will change (i.e. a set of flags for nstlist steps).
781  *
782  */
783 static void
784 setupForceWorkload(gmx::PpForceWorkload *forceWork,
785                    const t_inputrec     *inputrec,
786                    const t_forcerec     *fr,
787                    const pull_t         *pull_work,
788                    const gmx_edsam      *ed,
789                    const t_idef         &idef,
790                    const t_fcdata       *fcd,
791                    const int             forceFlags
792                    )
793 {
794     forceWork->haveSpecialForces      = haveSpecialForces(inputrec, fr->forceProviders, pull_work, forceFlags, ed);
795     forceWork->haveCpuBondedWork      = haveCpuBondeds(*fr);
796     forceWork->haveGpuBondedWork      = ((fr->gpuBonded != nullptr) && fr->gpuBonded->haveInteractions());
797     forceWork->haveRestraintsWork     = havePositionRestraints(idef, *fcd);
798     forceWork->haveCpuListedForceWork = haveCpuListedForces(*fr, idef, *fcd);
799 }
800
801
802 /* \brief Launch end-of-step GPU tasks: buffer clearing and rolling pruning.
803  *
804  * TODO: eliminate the \p useGpuNonbonded and \p useGpuNonbonded when these are
805  * incorporated in PpForceWorkload.
806  */
807 static void
808 launchGpuEndOfStepTasks(nonbonded_verlet_t         *nbv,
809                         gmx::GpuBonded             *gpuBonded,
810                         gmx_pme_t                  *pmedata,
811                         gmx_enerdata_t             *enerd,
812                         const gmx::PpForceWorkload &forceWorkload,
813                         bool                        useGpuNonbonded,
814                         bool                        useGpuPme,
815                         int64_t                     step,
816                         int                         flags,
817                         gmx_wallcycle_t             wcycle)
818 {
819     if (useGpuNonbonded)
820     {
821         /* Launch pruning before buffer clearing because the API overhead of the
822          * clear kernel launches can leave the GPU idle while it could be running
823          * the prune kernel.
824          */
825         if (nbv->isDynamicPruningStepGpu(step))
826         {
827             nbv->dispatchPruneKernelGpu(step);
828         }
829
830         /* now clear the GPU outputs while we finish the step on the CPU */
831         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
832         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
833         Nbnxm::gpu_clear_outputs(nbv->gpu_nbv, flags);
834         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
835         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
836     }
837
838     if (useGpuPme)
839     {
840         pme_gpu_reinit_computation(pmedata, wcycle);
841     }
842
843     if (forceWorkload.haveGpuBondedWork && (flags & GMX_FORCE_ENERGY))
844     {
845         // in principle this should be included in the DD balancing region,
846         // but generally it is infrequent so we'll omit it for the sake of
847         // simpler code
848         gpuBonded->waitAccumulateEnergyTerms(enerd);
849
850         gpuBonded->clearEnergies();
851     }
852 }
853
854
855 void do_force(FILE                                     *fplog,
856               const t_commrec                          *cr,
857               const gmx_multisim_t                     *ms,
858               const t_inputrec                         *inputrec,
859               gmx::Awh                                 *awh,
860               gmx_enfrot                               *enforcedRotation,
861               gmx::ImdSession                          *imdSession,
862               pull_t                                   *pull_work,
863               int64_t                                   step,
864               t_nrnb                                   *nrnb,
865               gmx_wallcycle_t                           wcycle,
866               const gmx_localtop_t                     *top,
867               const matrix                              box,
868               gmx::ArrayRefWithPadding<gmx::RVec>       x,     //NOLINT(performance-unnecessary-value-param)
869               history_t                                *hist,
870               gmx::ArrayRefWithPadding<gmx::RVec>       force, //NOLINT(performance-unnecessary-value-param)
871               tensor                                    vir_force,
872               const t_mdatoms                          *mdatoms,
873               gmx_enerdata_t                           *enerd,
874               t_fcdata                                 *fcd,
875               gmx::ArrayRef<real>                       lambda,
876               t_graph                                  *graph,
877               t_forcerec                               *fr,
878               gmx::PpForceWorkload                     *ppForceWorkload,
879               const gmx_vsite_t                        *vsite,
880               rvec                                      mu_tot,
881               double                                    t,
882               gmx_edsam                                *ed,
883               int                                       flags,
884               const DDBalanceRegionHandler             &ddBalanceRegionHandler)
885 {
886     int                  i, j;
887     double               mu[2*DIM];
888     gmx_bool             bStateChanged, bNS, bFillGrid, bCalcCGCM;
889     gmx_bool             bDoForces, bUseGPU, bUseOrEmulGPU;
890     nonbonded_verlet_t  *nbv = fr->nbv.get();
891     interaction_const_t *ic  = fr->ic;
892
893     /* modify force flag if not doing nonbonded */
894     if (!fr->bNonbonded)
895     {
896         flags &= ~GMX_FORCE_NONBONDED;
897     }
898     bStateChanged = ((flags & GMX_FORCE_STATECHANGED) != 0);
899     bNS           = ((flags & GMX_FORCE_NS) != 0);
900     bFillGrid     = (bNS && bStateChanged);
901     bCalcCGCM     = (bFillGrid && !DOMAINDECOMP(cr));
902     bDoForces     = ((flags & GMX_FORCE_FORCES) != 0);
903     bUseGPU       = fr->nbv->useGpu();
904     bUseOrEmulGPU = bUseGPU || fr->nbv->emulateGpu();
905
906     const auto pmeRunMode = fr->pmedata ? pme_run_mode(fr->pmedata) : PmeRunMode::CPU;
907     // TODO slim this conditional down - inputrec and duty checks should mean the same in proper code!
908     const bool useGpuPme  = EEL_PME(fr->ic->eeltype) && thisRankHasDuty(cr, DUTY_PME) &&
909         ((pmeRunMode == PmeRunMode::GPU) || (pmeRunMode == PmeRunMode::Mixed));
910     const int  pmeFlags = GMX_PME_SPREAD | GMX_PME_SOLVE |
911         ((flags & GMX_FORCE_VIRIAL) ? GMX_PME_CALC_ENER_VIR : 0) |
912         ((flags & GMX_FORCE_ENERGY) ? GMX_PME_CALC_ENER_VIR : 0) |
913         ((flags & GMX_FORCE_FORCES) ? GMX_PME_CALC_F : 0);
914
915     // Switches on whether to use GPU for position and force buffer operations
916     // TODO consider all possible combinations of triggers, and how to combine optimally in each case.
917     const BufferOpsUseGpu useGpuXBufOps = (c_enableGpuBufOps && bUseGPU && (GMX_GPU == GMX_GPU_CUDA)) ?
918         BufferOpsUseGpu::True : BufferOpsUseGpu::False;;
919     // GPU Force buffer ops are disabled on virial steps, because the virial calc is not yet ported to GPU
920     const BufferOpsUseGpu useGpuFBufOps = (c_enableGpuBufOps && bUseGPU && (GMX_GPU == GMX_GPU_CUDA))
921         && !(flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)) ?
922         BufferOpsUseGpu::True : BufferOpsUseGpu::False;
923     // TODO: move / add this flag to the internal PME GPU data structures
924     const bool useGpuFPmeReduction = (useGpuFBufOps == BufferOpsUseGpu::True) &&
925         thisRankHasDuty(cr, DUTY_PME) && useGpuPme; // only supported if this rank is perfoming PME on the GPU
926
927     /* At a search step we need to start the first balancing region
928      * somewhere early inside the step after communication during domain
929      * decomposition (and not during the previous step as usual).
930      */
931     if (bNS)
932     {
933         ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::yes);
934     }
935
936     const int start  = 0;
937     const int homenr = mdatoms->homenr;
938
939     clear_mat(vir_force);
940
941     if (bStateChanged)
942     {
943         if (inputrecNeedMutot(inputrec))
944         {
945             /* Calculate total (local) dipole moment in a temporary common array.
946              * This makes it possible to sum them over nodes faster.
947              */
948             calc_mu(start, homenr,
949                     x.unpaddedArrayRef(), mdatoms->chargeA, mdatoms->chargeB, mdatoms->nChargePerturbed,
950                     mu, mu+DIM);
951         }
952     }
953
954     if (fr->ePBC != epbcNONE)
955     {
956         /* Compute shift vectors every step,
957          * because of pressure coupling or box deformation!
958          */
959         if ((flags & GMX_FORCE_DYNAMICBOX) && bStateChanged)
960         {
961             calc_shifts(box, fr->shift_vec);
962         }
963
964         if (bCalcCGCM)
965         {
966             put_atoms_in_box_omp(fr->ePBC, box, x.unpaddedArrayRef().subArray(0, homenr), gmx_omp_nthreads_get(emntDefault));
967             inc_nrnb(nrnb, eNR_SHIFTX, homenr);
968         }
969         else if (EI_ENERGY_MINIMIZATION(inputrec->eI) && graph)
970         {
971             unshift_self(graph, box, as_rvec_array(x.unpaddedArrayRef().data()));
972         }
973     }
974
975     nbnxn_atomdata_copy_shiftvec((flags & GMX_FORCE_DYNAMICBOX) != 0,
976                                  fr->shift_vec, nbv->nbat.get());
977
978 #if GMX_MPI
979     if (!thisRankHasDuty(cr, DUTY_PME))
980     {
981         /* Send particle coordinates to the pme nodes.
982          * Since this is only implemented for domain decomposition
983          * and domain decomposition does not use the graph,
984          * we do not need to worry about shifting.
985          */
986         gmx_pme_send_coordinates(cr, box, as_rvec_array(x.unpaddedArrayRef().data()),
987                                  lambda[efptCOUL], lambda[efptVDW],
988                                  (flags & (GMX_FORCE_VIRIAL | GMX_FORCE_ENERGY)) != 0,
989                                  step, wcycle);
990     }
991 #endif /* GMX_MPI */
992
993     if (useGpuPme)
994     {
995         launchPmeGpuSpread(fr->pmedata, box, as_rvec_array(x.unpaddedArrayRef().data()), flags, pmeFlags, wcycle);
996     }
997
998     /* do gridding for pair search */
999     if (bNS)
1000     {
1001         if (graph && bStateChanged)
1002         {
1003             /* Calculate intramolecular shift vectors to make molecules whole */
1004             mk_mshift(fplog, graph, fr->ePBC, box, as_rvec_array(x.unpaddedArrayRef().data()));
1005         }
1006
1007         // TODO
1008         // - vzero is constant, do we need to pass it?
1009         // - box_diag should be passed directly to nbnxn_put_on_grid
1010         //
1011         rvec vzero;
1012         clear_rvec(vzero);
1013
1014         rvec box_diag;
1015         box_diag[XX] = box[XX][XX];
1016         box_diag[YY] = box[YY][YY];
1017         box_diag[ZZ] = box[ZZ][ZZ];
1018
1019         wallcycle_start(wcycle, ewcNS);
1020         if (!DOMAINDECOMP(cr))
1021         {
1022             wallcycle_sub_start(wcycle, ewcsNBS_GRID_LOCAL);
1023             nbnxn_put_on_grid(nbv, box,
1024                               0, vzero, box_diag,
1025                               nullptr, 0, mdatoms->homenr, -1,
1026                               fr->cginfo, x.unpaddedArrayRef(),
1027                               0, nullptr);
1028             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_LOCAL);
1029         }
1030         else
1031         {
1032             wallcycle_sub_start(wcycle, ewcsNBS_GRID_NONLOCAL);
1033             nbnxn_put_on_grid_nonlocal(nbv, domdec_zones(cr->dd),
1034                                        fr->cginfo, x.unpaddedArrayRef());
1035             wallcycle_sub_stop(wcycle, ewcsNBS_GRID_NONLOCAL);
1036         }
1037
1038         nbv->setAtomProperties(*mdatoms, fr->cginfo);
1039
1040         wallcycle_stop(wcycle, ewcNS);
1041
1042         /* initialize the GPU nbnxm atom data and bonded data structures */
1043         if (bUseGPU)
1044         {
1045             wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1046
1047             wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1048             Nbnxm::gpu_init_atomdata(nbv->gpu_nbv, nbv->nbat.get());
1049             wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1050
1051             if (fr->gpuBonded)
1052             {
1053                 /* Now we put all atoms on the grid, we can assign bonded
1054                  * interactions to the GPU, where the grid order is
1055                  * needed. Also the xq, f and fshift device buffers have
1056                  * been reallocated if needed, so the bonded code can
1057                  * learn about them. */
1058                 // TODO the xq, f, and fshift buffers are now shared
1059                 // resources, so they should be maintained by a
1060                 // higher-level object than the nb module.
1061                 fr->gpuBonded->updateInteractionListsAndDeviceBuffers(nbv->getGridIndices(),
1062                                                                       top->idef,
1063                                                                       Nbnxm::gpu_get_xq(nbv->gpu_nbv),
1064                                                                       Nbnxm::gpu_get_f(nbv->gpu_nbv),
1065                                                                       Nbnxm::gpu_get_fshift(nbv->gpu_nbv));
1066             }
1067             wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1068         }
1069
1070         // Need to run after the GPU-offload bonded interaction lists
1071         // are set up to be able to determine whether there is bonded work.
1072         setupForceWorkload(ppForceWorkload,
1073                            inputrec,
1074                            fr,
1075                            pull_work,
1076                            ed,
1077                            top->idef,
1078                            fcd,
1079                            flags);
1080     }
1081
1082     /* do local pair search */
1083     if (bNS)
1084     {
1085         // TODO: fuse this branch with the above bNS block
1086         wallcycle_start_nocount(wcycle, ewcNS);
1087         wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_LOCAL);
1088         /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1089         nbv->constructPairlist(Nbnxm::InteractionLocality::Local,
1090                                &top->excls, step, nrnb);
1091
1092         nbv->setupGpuShortRangeWork(fr->gpuBonded, Nbnxm::InteractionLocality::Local);
1093
1094         wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_LOCAL);
1095         wallcycle_stop(wcycle, ewcNS);
1096
1097         if (useGpuXBufOps == BufferOpsUseGpu::True)
1098         {
1099             nbv->atomdata_init_copy_x_to_nbat_x_gpu();
1100         }
1101         // For force buffer ops, we use the below conditon rather than
1102         // useGpuFBufOps to ensure that init is performed even if this
1103         // NS step is also a virial step (on which f buf ops are deactivated).
1104         if (c_enableGpuBufOps && bUseGPU && (GMX_GPU == GMX_GPU_CUDA))
1105         {
1106             nbv->atomdata_init_add_nbat_f_to_f_gpu();
1107         }
1108     }
1109     else
1110     {
1111         nbv->setCoordinates(Nbnxm::AtomLocality::Local, false,
1112                             x.unpaddedArrayRef(), useGpuXBufOps, pme_gpu_get_device_x(fr->pmedata));
1113     }
1114
1115     if (bUseGPU)
1116     {
1117         ddBalanceRegionHandler.openBeforeForceComputationGpu();
1118
1119         wallcycle_start(wcycle, ewcLAUNCH_GPU);
1120
1121         wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1122         Nbnxm::gpu_upload_shiftvec(nbv->gpu_nbv, nbv->nbat.get());
1123         if (bNS || (useGpuXBufOps == BufferOpsUseGpu::False))
1124         {
1125             Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(),
1126                                       Nbnxm::AtomLocality::Local);
1127         }
1128         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1129         // with X buffer ops offloaded to the GPU on all but the search steps
1130
1131         // bonded work not split into separate local and non-local, so with DD
1132         // we can only launch the kernel after non-local coordinates have been received.
1133         if (ppForceWorkload->haveGpuBondedWork && !havePPDomainDecomposition(cr))
1134         {
1135             wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1136             fr->gpuBonded->launchKernel(fr, flags, box);
1137             wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1138         }
1139
1140         /* launch local nonbonded work on GPU */
1141         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1142         do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::Local, enbvClearFNo,
1143                      step, nrnb, wcycle);
1144         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1145         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1146     }
1147
1148     if (useGpuPme)
1149     {
1150         // In PME GPU and mixed mode we launch FFT / gather after the
1151         // X copy/transform to allow overlap as well as after the GPU NB
1152         // launch to avoid FFT launch overhead hijacking the CPU and delaying
1153         // the nonbonded kernel.
1154         launchPmeGpuFftAndGather(fr->pmedata, wcycle, useGpuFPmeReduction);
1155     }
1156
1157     /* Communicate coordinates and sum dipole if necessary +
1158        do non-local pair search */
1159     if (havePPDomainDecomposition(cr))
1160     {
1161         if (bNS)
1162         {
1163             // TODO: fuse this branch with the above large bNS block
1164             wallcycle_start_nocount(wcycle, ewcNS);
1165             wallcycle_sub_start(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1166             /* Note that with a GPU the launch overhead of the list transfer is not timed separately */
1167             nbv->constructPairlist(Nbnxm::InteractionLocality::NonLocal,
1168                                    &top->excls, step, nrnb);
1169
1170             nbv->setupGpuShortRangeWork(fr->gpuBonded, Nbnxm::InteractionLocality::NonLocal);
1171             wallcycle_sub_stop(wcycle, ewcsNBS_SEARCH_NONLOCAL);
1172             wallcycle_stop(wcycle, ewcNS);
1173         }
1174         else
1175         {
1176             dd_move_x(cr->dd, box, x.unpaddedArrayRef(), wcycle);
1177
1178             nbv->setCoordinates(Nbnxm::AtomLocality::NonLocal, false,
1179                                 x.unpaddedArrayRef(), useGpuXBufOps, pme_gpu_get_device_x(fr->pmedata));
1180
1181         }
1182
1183         if (bUseGPU)
1184         {
1185             wallcycle_start(wcycle, ewcLAUNCH_GPU);
1186
1187             if (bNS || (useGpuXBufOps == BufferOpsUseGpu::False))
1188             {
1189                 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1190                 Nbnxm::gpu_copy_xq_to_gpu(nbv->gpu_nbv, nbv->nbat.get(),
1191                                           Nbnxm::AtomLocality::NonLocal);
1192                 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1193             }
1194
1195             if (ppForceWorkload->haveGpuBondedWork)
1196             {
1197                 wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_BONDED);
1198                 fr->gpuBonded->launchKernel(fr, flags, box);
1199                 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_BONDED);
1200             }
1201
1202             /* launch non-local nonbonded tasks on GPU */
1203             wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1204             do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::NonLocal, enbvClearFNo,
1205                          step, nrnb, wcycle);
1206             wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1207
1208             wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1209         }
1210     }
1211
1212     if (bUseGPU)
1213     {
1214         /* launch D2H copy-back F */
1215         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
1216         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1217
1218         bool copyBackNbForce  = (useGpuFBufOps == BufferOpsUseGpu::False);
1219
1220         if (havePPDomainDecomposition(cr))
1221         {
1222             Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(),
1223                                       flags, Nbnxm::AtomLocality::NonLocal, copyBackNbForce);
1224         }
1225         Nbnxm::gpu_launch_cpyback(nbv->gpu_nbv, nbv->nbat.get(),
1226                                   flags, Nbnxm::AtomLocality::Local, copyBackNbForce);
1227
1228         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_NONBONDED);
1229
1230         if (ppForceWorkload->haveGpuBondedWork && (flags & GMX_FORCE_ENERGY))
1231         {
1232             fr->gpuBonded->launchEnergyTransfer();
1233         }
1234         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
1235     }
1236
1237     if (bStateChanged && inputrecNeedMutot(inputrec))
1238     {
1239         if (PAR(cr))
1240         {
1241             gmx_sumd(2*DIM, mu, cr);
1242
1243             ddBalanceRegionHandler.reopenRegionCpu();
1244         }
1245
1246         for (i = 0; i < 2; i++)
1247         {
1248             for (j = 0; j < DIM; j++)
1249             {
1250                 fr->mu_tot[i][j] = mu[i*DIM + j];
1251             }
1252         }
1253     }
1254     if (fr->efep == efepNO)
1255     {
1256         copy_rvec(fr->mu_tot[0], mu_tot);
1257     }
1258     else
1259     {
1260         for (j = 0; j < DIM; j++)
1261         {
1262             mu_tot[j] =
1263                 (1.0 - lambda[efptCOUL])*fr->mu_tot[0][j] +
1264                 lambda[efptCOUL]*fr->mu_tot[1][j];
1265         }
1266     }
1267
1268     /* Reset energies */
1269     reset_enerdata(enerd);
1270     /* Clear the shift forces */
1271     // TODO: This should be linked to the shift force buffer in use, or cleared before use instead
1272     for (gmx::RVec &elem : fr->shiftForces)
1273     {
1274         elem = { 0.0_real, 0.0_real, 0.0_real };
1275     }
1276
1277     if (DOMAINDECOMP(cr) && !thisRankHasDuty(cr, DUTY_PME))
1278     {
1279         wallcycle_start(wcycle, ewcPPDURINGPME);
1280         dd_force_flop_start(cr->dd, nrnb);
1281     }
1282
1283     if (inputrec->bRot)
1284     {
1285         wallcycle_start(wcycle, ewcROT);
1286         do_rotation(cr, enforcedRotation, box, as_rvec_array(x.unpaddedArrayRef().data()), t, step, bNS);
1287         wallcycle_stop(wcycle, ewcROT);
1288     }
1289
1290     /* Start the force cycle counter.
1291      * Note that a different counter is used for dynamic load balancing.
1292      */
1293     wallcycle_start(wcycle, ewcFORCE);
1294
1295     // Set up and clear force outputs.
1296     // We use std::move to keep the compiler happy, it has no effect.
1297     ForceOutputs forceOut = setupForceOutputs(fr, pull_work, *inputrec, std::move(force), bDoForces,
1298                                               ((flags & GMX_FORCE_VIRIAL) != 0), wcycle);
1299
1300     /* We calculate the non-bonded forces, when done on the CPU, here.
1301      * We do this before calling do_force_lowlevel, because in that
1302      * function, the listed forces are calculated before PME, which
1303      * does communication.  With this order, non-bonded and listed
1304      * force calculation imbalance can be balanced out by the domain
1305      * decomposition load balancing.
1306      */
1307
1308     if (!bUseOrEmulGPU)
1309     {
1310         do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::Local, enbvClearFYes,
1311                      step, nrnb, wcycle);
1312     }
1313
1314     if (fr->efep != efepNO)
1315     {
1316         /* Calculate the local and non-local free energy interactions here.
1317          * Happens here on the CPU both with and without GPU.
1318          */
1319         nbv->dispatchFreeEnergyKernel(Nbnxm::InteractionLocality::Local,
1320                                       fr, as_rvec_array(x.unpaddedArrayRef().data()), &forceOut.forceWithShiftForces(), *mdatoms,
1321                                       inputrec->fepvals, lambda.data(),
1322                                       enerd, flags, nrnb);
1323
1324         if (havePPDomainDecomposition(cr))
1325         {
1326             nbv->dispatchFreeEnergyKernel(Nbnxm::InteractionLocality::NonLocal,
1327                                           fr, as_rvec_array(x.unpaddedArrayRef().data()), &forceOut.forceWithShiftForces(), *mdatoms,
1328                                           inputrec->fepvals, lambda.data(),
1329                                           enerd, flags, nrnb);
1330         }
1331     }
1332
1333     if (!bUseOrEmulGPU)
1334     {
1335         if (havePPDomainDecomposition(cr))
1336         {
1337             do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::NonLocal, enbvClearFNo,
1338                          step, nrnb, wcycle);
1339         }
1340
1341         /* Add all the non-bonded force to the normal force array.
1342          * This can be split into a local and a non-local part when overlapping
1343          * communication with calculation with domain decomposition.
1344          */
1345         wallcycle_stop(wcycle, ewcFORCE);
1346         nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::All, forceOut.forceWithShiftForces().force());
1347         wallcycle_start_nocount(wcycle, ewcFORCE);
1348
1349         /* If there are multiple fshift output buffers we need to reduce them */
1350         if (flags & GMX_FORCE_VIRIAL)
1351         {
1352             /* This is not in a subcounter because it takes a
1353                negligible and constant-sized amount of time */
1354             nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat,
1355                                                      forceOut.forceWithShiftForces().shiftForces());
1356         }
1357     }
1358
1359     /* update QMMMrec, if necessary */
1360     if (fr->bQMMM)
1361     {
1362         update_QMMMrec(cr, fr, as_rvec_array(x.unpaddedArrayRef().data()), mdatoms, box);
1363     }
1364
1365     /* Compute the bonded and non-bonded energies and optionally forces */
1366     do_force_lowlevel(fr, inputrec, &(top->idef),
1367                       cr, ms, nrnb, wcycle, mdatoms,
1368                       x, hist, &forceOut, enerd, fcd,
1369                       box, lambda.data(), graph, fr->mu_tot,
1370                       flags,
1371                       ddBalanceRegionHandler);
1372
1373     wallcycle_stop(wcycle, ewcFORCE);
1374
1375     computeSpecialForces(fplog, cr, inputrec, awh, enforcedRotation,
1376                          imdSession, pull_work, step, t, wcycle,
1377                          fr->forceProviders, box, x.unpaddedArrayRef(), mdatoms, lambda.data(),
1378                          flags, &forceOut.forceWithVirial(), enerd,
1379                          ed, bNS);
1380
1381     bool                   useCpuFPmeReduction = thisRankHasDuty(cr, DUTY_PME) && !useGpuFPmeReduction;
1382     bool                   haveCpuForces       = (ppForceWorkload->haveSpecialForces || ppForceWorkload->haveCpuListedForceWork || useCpuFPmeReduction);
1383
1384     // Will store the amount of cycles spent waiting for the GPU that
1385     // will be later used in the DLB accounting.
1386     float cycles_wait_gpu = 0;
1387     if (bUseOrEmulGPU)
1388     {
1389         auto &forceWithShiftForces = forceOut.forceWithShiftForces();
1390         rvec *f                    = as_rvec_array(forceWithShiftForces.force().data());
1391
1392         /* wait for non-local forces (or calculate in emulation mode) */
1393         if (havePPDomainDecomposition(cr))
1394         {
1395             if (bUseGPU)
1396             {
1397                 wallcycle_start(wcycle, ewcWAIT_GPU_NB_NL);
1398                 Nbnxm::gpu_wait_finish_task(nbv->gpu_nbv,
1399                                             flags, Nbnxm::AtomLocality::NonLocal,
1400                                             enerd->grpp.ener[egLJSR].data(),
1401                                             enerd->grpp.ener[egCOULSR].data(),
1402                                             forceWithShiftForces.shiftForces());
1403                 cycles_wait_gpu += wallcycle_stop(wcycle, ewcWAIT_GPU_NB_NL);
1404             }
1405             else
1406             {
1407                 wallcycle_start_nocount(wcycle, ewcFORCE);
1408                 do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::NonLocal, enbvClearFYes,
1409                              step, nrnb, wcycle);
1410                 wallcycle_stop(wcycle, ewcFORCE);
1411             }
1412
1413             if (useGpuFBufOps == BufferOpsUseGpu::True && haveCpuForces)
1414             {
1415                 nbv->launch_copy_f_to_gpu(f, Nbnxm::AtomLocality::NonLocal);
1416             }
1417
1418             // flag to specify if forces should be accumulated in force buffer
1419             // ops. For non-local part, this just depends on whether CPU forces are present.
1420             bool accumulateForce = (useGpuFBufOps == BufferOpsUseGpu::True) && haveCpuForces;
1421             nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::NonLocal,
1422                                           forceWithShiftForces.force(), pme_gpu_get_device_f(fr->pmedata),
1423                                           pme_gpu_get_f_ready_synchronizer(fr->pmedata),
1424                                           useGpuFBufOps, useGpuFPmeReduction, accumulateForce);
1425             if (useGpuFBufOps == BufferOpsUseGpu::True)
1426             {
1427                 nbv->launch_copy_f_from_gpu(f, Nbnxm::AtomLocality::NonLocal);
1428             }
1429
1430             if (fr->nbv->emulateGpu() && (flags & GMX_FORCE_VIRIAL))
1431             {
1432                 nbnxn_atomdata_add_nbat_fshift_to_fshift(*nbv->nbat,
1433                                                          forceWithShiftForces.shiftForces());
1434             }
1435         }
1436     }
1437
1438     if (havePPDomainDecomposition(cr))
1439     {
1440         /* We are done with the CPU compute.
1441          * We will now communicate the non-local forces.
1442          * If we use a GPU this will overlap with GPU work, so in that case
1443          * we do not close the DD force balancing region here.
1444          */
1445         ddBalanceRegionHandler.closeAfterForceComputationCpu();
1446
1447         if (bDoForces)
1448         {
1449             if (useGpuFBufOps == BufferOpsUseGpu::True)
1450             {
1451                 nbv->wait_for_gpu_force_reduction(Nbnxm::AtomLocality::NonLocal);
1452             }
1453             dd_move_f(cr->dd, &forceOut.forceWithShiftForces(), wcycle);
1454         }
1455     }
1456
1457     // With both nonbonded and PME offloaded a GPU on the same rank, we use
1458     // an alternating wait/reduction scheme.
1459     bool alternateGpuWait = (!c_disableAlternatingWait && useGpuPme && bUseGPU && !DOMAINDECOMP(cr) &&
1460                              (useGpuFBufOps == BufferOpsUseGpu::False));
1461     if (alternateGpuWait)
1462     {
1463         alternatePmeNbGpuWaitReduce(fr->nbv.get(), fr->pmedata, &forceOut, enerd,
1464                                     flags, pmeFlags, wcycle);
1465     }
1466
1467     if (!alternateGpuWait && useGpuPme)
1468     {
1469         pme_gpu_wait_and_reduce(fr->pmedata, pmeFlags, wcycle, &forceOut.forceWithVirial(), enerd, useGpuFPmeReduction);
1470     }
1471
1472     /* Wait for local GPU NB outputs on the non-alternating wait path */
1473     if (!alternateGpuWait && bUseGPU)
1474     {
1475         /* Measured overhead on CUDA and OpenCL with(out) GPU sharing
1476          * is between 0.5 and 1.5 Mcycles. So 2 MCycles is an overestimate,
1477          * but even with a step of 0.1 ms the difference is less than 1%
1478          * of the step time.
1479          */
1480         const float gpuWaitApiOverheadMargin = 2e6F; /* cycles */
1481
1482         wallcycle_start(wcycle, ewcWAIT_GPU_NB_L);
1483         Nbnxm::gpu_wait_finish_task(nbv->gpu_nbv,
1484                                     flags, Nbnxm::AtomLocality::Local,
1485                                     enerd->grpp.ener[egLJSR].data(),
1486                                     enerd->grpp.ener[egCOULSR].data(),
1487                                     forceOut.forceWithShiftForces().shiftForces());
1488         float cycles_tmp = wallcycle_stop(wcycle, ewcWAIT_GPU_NB_L);
1489
1490         if (ddBalanceRegionHandler.useBalancingRegion())
1491         {
1492             DdBalanceRegionWaitedForGpu waitedForGpu = DdBalanceRegionWaitedForGpu::yes;
1493             if (bDoForces && cycles_tmp <= gpuWaitApiOverheadMargin)
1494             {
1495                 /* We measured few cycles, it could be that the kernel
1496                  * and transfer finished earlier and there was no actual
1497                  * wait time, only API call overhead.
1498                  * Then the actual time could be anywhere between 0 and
1499                  * cycles_wait_est. We will use half of cycles_wait_est.
1500                  */
1501                 waitedForGpu = DdBalanceRegionWaitedForGpu::no;
1502             }
1503             ddBalanceRegionHandler.closeAfterForceComputationGpu(cycles_wait_gpu, waitedForGpu);
1504         }
1505     }
1506
1507     if (fr->nbv->emulateGpu())
1508     {
1509         // NOTE: emulation kernel is not included in the balancing region,
1510         // but emulation mode does not target performance anyway
1511         wallcycle_start_nocount(wcycle, ewcFORCE);
1512         do_nb_verlet(fr, ic, enerd, flags, Nbnxm::InteractionLocality::Local,
1513                      DOMAINDECOMP(cr) ? enbvClearFNo : enbvClearFYes,
1514                      step, nrnb, wcycle);
1515         wallcycle_stop(wcycle, ewcFORCE);
1516     }
1517
1518     /* Do the nonbonded GPU (or emulation) force buffer reduction
1519      * on the non-alternating path. */
1520     if (bUseOrEmulGPU && !alternateGpuWait)
1521     {
1522         gmx::ArrayRef<gmx::RVec>  force = forceOut.forceWithShiftForces().force();
1523         rvec                     *f     = as_rvec_array(force.data());
1524
1525         // TODO: move these steps as early as possible:
1526         // - CPU f H2D should be as soon as all CPU-side forces are done
1527         // - wait for force reduction does not need to block host (at least not here, it's sufficient to wait
1528         //   before the next CPU task that consumes the forces: vsite spread or update)
1529         //
1530         if (useGpuFBufOps == BufferOpsUseGpu::True && (haveCpuForces || DOMAINDECOMP(cr)))
1531         {
1532             nbv->launch_copy_f_to_gpu(f, Nbnxm::AtomLocality::Local);
1533         }
1534         // flag to specify if forces should be accumulated in force
1535         // buffer ops. For local part, this depends on whether CPU
1536         // forces are present, or if DD is active (in which case the
1537         // halo exchange has resulted in contributions from the
1538         // non-local part).
1539         bool accumulateForce = (useGpuFBufOps == BufferOpsUseGpu::True) &&
1540             (haveCpuForces || DOMAINDECOMP(cr));
1541         nbv->atomdata_add_nbat_f_to_f(Nbnxm::AtomLocality::Local,
1542                                       force, pme_gpu_get_device_f(fr->pmedata),
1543                                       pme_gpu_get_f_ready_synchronizer(fr->pmedata),
1544                                       useGpuFBufOps, useGpuFPmeReduction, accumulateForce);
1545         if (useGpuFBufOps == BufferOpsUseGpu::True)
1546         {
1547             nbv->launch_copy_f_from_gpu(f, Nbnxm::AtomLocality::Local);
1548             nbv->wait_for_gpu_force_reduction(Nbnxm::AtomLocality::Local);
1549         }
1550     }
1551
1552     launchGpuEndOfStepTasks(nbv, fr->gpuBonded, fr->pmedata, enerd,
1553                             *ppForceWorkload,
1554                             bUseGPU, useGpuPme,
1555                             step,
1556                             flags,
1557                             wcycle);
1558
1559     if (DOMAINDECOMP(cr))
1560     {
1561         dd_force_flop_stop(cr->dd, nrnb);
1562     }
1563
1564     if (bDoForces)
1565     {
1566         rvec *f = as_rvec_array(forceOut.forceWithShiftForces().force().data());
1567
1568         /* If we have NoVirSum forces, but we do not calculate the virial,
1569          * we sum fr->f_novirsum=forceOut.f later.
1570          */
1571         if (vsite && !(fr->haveDirectVirialContributions && !(flags & GMX_FORCE_VIRIAL)))
1572         {
1573             rvec *fshift = as_rvec_array(forceOut.forceWithShiftForces().shiftForces().data());
1574             spread_vsite_f(vsite, as_rvec_array(x.unpaddedArrayRef().data()), f, fshift, FALSE, nullptr, nrnb,
1575                            &top->idef, fr->ePBC, fr->bMolPBC, graph, box, cr, wcycle);
1576         }
1577
1578         if (flags & GMX_FORCE_VIRIAL)
1579         {
1580             /* Calculation of the virial must be done after vsites! */
1581             calc_virial(0, mdatoms->homenr, as_rvec_array(x.unpaddedArrayRef().data()),
1582                         forceOut.forceWithShiftForces(),
1583                         vir_force, graph, box, nrnb, fr, inputrec->ePBC);
1584         }
1585     }
1586
1587     if (PAR(cr) && !thisRankHasDuty(cr, DUTY_PME))
1588     {
1589         /* In case of node-splitting, the PP nodes receive the long-range
1590          * forces, virial and energy from the PME nodes here.
1591          */
1592         pme_receive_force_ener(cr, &forceOut.forceWithVirial(), enerd, wcycle);
1593     }
1594
1595     if (bDoForces)
1596     {
1597         post_process_forces(cr, step, nrnb, wcycle,
1598                             top, box, as_rvec_array(x.unpaddedArrayRef().data()), &forceOut,
1599                             vir_force, mdatoms, graph, fr, vsite,
1600                             flags);
1601     }
1602
1603     if (flags & GMX_FORCE_ENERGY)
1604     {
1605         /* Sum the potential energy terms from group contributions */
1606         sum_epot(&(enerd->grpp), enerd->term);
1607
1608         if (!EI_TPI(inputrec->eI))
1609         {
1610             checkPotentialEnergyValidity(step, *enerd, *inputrec);
1611         }
1612     }
1613
1614     /* In case we don't have constraints and are using GPUs, the next balancing
1615      * region starts here.
1616      * Some "special" work at the end of do_force_cuts?, such as vsite spread,
1617      * virial calculation and COM pulling, is not thus not included in
1618      * the balance timing, which is ok as most tasks do communication.
1619      */
1620     ddBalanceRegionHandler.openBeforeForceComputationCpu(DdAllowBalanceRegionReopen::no);
1621 }