680fcec0fc24d809eb17dca3f251b5e30a387aed
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019,2020,2021, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  * \brief Implements high-level PME GPU functions which do not require GPU framework-specific code.
38  *
39  * \author Aleksei Iupinov <a.yupinov@gmail.com>
40  * \ingroup module_ewald
41  */
42
43 #include "gmxpre.h"
44
45 #include "config.h"
46
47 #include <list>
48
49 #include "gromacs/ewald/ewald_utils.h"
50 #include "gromacs/ewald/pme.h"
51 #include "gromacs/fft/parallel_3dfft.h"
52 #include "gromacs/math/invertmatrix.h"
53 #include "gromacs/mdlib/gmx_omp_nthreads.h"
54 #include "gromacs/mdtypes/enerdata.h"
55 #include "gromacs/mdtypes/forceoutput.h"
56 #include "gromacs/mdtypes/inputrec.h"
57 #include "gromacs/mdtypes/simulation_workload.h"
58 #include "gromacs/utility/exceptions.h"
59 #include "gromacs/utility/fatalerror.h"
60 #include "gromacs/utility/gmxassert.h"
61 #include "gromacs/utility/stringutil.h"
62 #include "gromacs/ewald/pme_coordinate_receiver_gpu.h"
63
64 #include "pme_gpu_internal.h"
65 #include "pme_gpu_settings.h"
66 #include "pme_gpu_timings.h"
67 #include "pme_gpu_types_host.h"
68 #include "pme_grid.h"
69 #include "pme_internal.h"
70 #include "pme_solve.h"
71
72 /*! \brief
73  * Finds out if PME is currently running on GPU.
74  *
75  * \todo The GPU module should not be constructed (or at least called)
76  * when it is not active, so there should be no need to check whether
77  * it is active. An assertion that this is true makes sense.
78  *
79  * \param[in] pme  The PME structure.
80  * \returns        True if PME runs on GPU currently, false otherwise.
81  */
82 static inline bool pme_gpu_active(const gmx_pme_t* pme)
83 {
84     return (pme != nullptr) && (pme->runMode != PmeRunMode::CPU);
85 }
86
87 void pme_gpu_reset_timings(const gmx_pme_t* pme)
88 {
89     if (pme_gpu_active(pme))
90     {
91         pme_gpu_reset_timings(pme->gpu);
92     }
93 }
94
95 void pme_gpu_get_timings(const gmx_pme_t* pme, gmx_wallclock_gpu_pme_t* timings)
96 {
97     if (pme_gpu_active(pme))
98     {
99         pme_gpu_get_timings(pme->gpu, timings);
100     }
101 }
102
103 int pme_gpu_get_block_size(const gmx_pme_t* pme)
104 {
105
106     if (!pme || !pme_gpu_active(pme))
107     {
108         return 0;
109     }
110     else
111     {
112         return pme_gpu_get_atom_data_block_size();
113     }
114 }
115
116 /*! \brief
117  * A convenience wrapper for launching either the GPU or CPU FFT.
118  *
119  * \param[in] pme            The PME structure.
120  * \param[in] gridIndex      The grid index - should currently always be 0.
121  * \param[in] dir            The FFT direction enum.
122  * \param[in] wcycle         The wallclock counter.
123  */
124 void inline parallel_3dfft_execute_gpu_wrapper(gmx_pme_t*             pme,
125                                                const int              gridIndex,
126                                                enum gmx_fft_direction dir,
127                                                gmx_wallcycle*         wcycle)
128 {
129     if (pme_gpu_settings(pme->gpu).performGPUFFT)
130     {
131         wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
132         wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuPme);
133         pme_gpu_3dfft(pme->gpu, dir, gridIndex);
134         wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuPme);
135         wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
136     }
137     else
138     {
139         wallcycle_start(wcycle, WallCycleCounter::PmeFftMixedMode);
140 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
141         for (int thread = 0; thread < pme->nthread; thread++)
142         {
143             gmx_parallel_3dfft_execute(pme->pfft_setup[gridIndex], dir, thread, wcycle);
144         }
145         wallcycle_stop(wcycle, WallCycleCounter::PmeFftMixedMode);
146     }
147 }
148
149 /* The PME computation code split into a few separate functions. */
150
151 void pme_gpu_prepare_computation(gmx_pme_t*               pme,
152                                  const matrix             box,
153                                  gmx_wallcycle*           wcycle,
154                                  const gmx::StepWorkload& stepWork)
155 {
156     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
157     GMX_ASSERT(pme->nnodes > 0, "");
158     GMX_ASSERT(pme->nnodes == 1 || pme->ndecompdim > 0, "");
159
160     PmeGpu* pmeGpu = pme->gpu;
161     // TODO these flags are only here to honor the CPU PME code, and probably should be removed
162     pmeGpu->settings.useGpuForceReduction = stepWork.useGpuPmeFReduction;
163
164     bool shouldUpdateBox = false;
165     for (int i = 0; i < DIM; ++i)
166     {
167         for (int j = 0; j <= i; ++j)
168         {
169             shouldUpdateBox |= (pmeGpu->common->previousBox[i][j] != box[i][j]);
170             pmeGpu->common->previousBox[i][j] = box[i][j];
171         }
172     }
173
174     if (stepWork.haveDynamicBox || shouldUpdateBox) // || is to make the first computation always update
175     {
176         wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
177         wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuPme);
178         pme_gpu_update_input_box(pmeGpu, box);
179         wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuPme);
180         wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
181
182         if (!pme_gpu_settings(pmeGpu).performGPUSolve)
183         {
184             // TODO remove code duplication and add test coverage
185             matrix scaledBox;
186             pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
187             gmx::invertBoxMatrix(scaledBox, pme->recipbox);
188             pme->boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
189         }
190     }
191 }
192
193 void pme_gpu_launch_spread(gmx_pme_t*                     pme,
194                            GpuEventSynchronizer*          xReadyOnDevice,
195                            gmx_wallcycle*                 wcycle,
196                            const real                     lambdaQ,
197                            const bool                     useGpuDirectComm,
198                            gmx::PmeCoordinateReceiverGpu* pmeCoordinateReceiverGpu)
199 {
200     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
201     GMX_ASSERT(!GMX_GPU_CUDA || xReadyOnDevice || !pme->bPPnode,
202                "Need a valid xReadyOnDevice on PP+PME ranks with CUDA.");
203     GMX_ASSERT(pme->doCoulomb, "Only Coulomb PME can be run on GPU.");
204
205     PmeGpu* pmeGpu = pme->gpu;
206
207     GMX_ASSERT(pmeGpu->common->ngrids == 1 || (pmeGpu->common->ngrids == 2 && pme->bFEP_q),
208                "If not decoupling Coulomb interactions there should only be one FEP grid. If "
209                "decoupling Coulomb interactions there should be two grids.");
210
211     /* PME on GPU can currently manage two grids:
212      * grid_index=0: Coulomb PME with charges in the normal state or from FEP state A.
213      * grid_index=1: Coulomb PME with charges from FEP state B.
214      */
215     real** fftgrids = pme->fftgrid;
216     /* Spread the coefficients on a grid */
217     const bool computeSplines = true;
218     const bool spreadCharges  = true;
219     wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
220     wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuPme);
221     pme_gpu_spread(
222             pmeGpu, xReadyOnDevice, fftgrids, computeSplines, spreadCharges, lambdaQ, useGpuDirectComm, pmeCoordinateReceiverGpu);
223     wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuPme);
224     wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
225 }
226
227 void pme_gpu_launch_complex_transforms(gmx_pme_t* pme, gmx_wallcycle* wcycle, const gmx::StepWorkload& stepWork)
228 {
229     PmeGpu*     pmeGpu   = pme->gpu;
230     const auto& settings = pmeGpu->settings;
231     // There's no support for computing energy without virial, or vice versa
232     const bool computeEnergyAndVirial = stepWork.computeEnergy || stepWork.computeVirial;
233     if (!settings.performGPUFFT)
234     {
235         wallcycle_start(wcycle, WallCycleCounter::WaitGpuPmeSpread);
236         pme_gpu_sync_spread_grid(pme->gpu);
237         wallcycle_stop(wcycle, WallCycleCounter::WaitGpuPmeSpread);
238     }
239
240     try
241     {
242         /* The 3dffts and the solve are done in a loop to simplify things, even if this means that
243          * there will be two kernel launches for solve. */
244         for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
245         {
246             /* do R2C 3D-FFT */
247             t_complex* cfftgrid = pme->cfftgrid[gridIndex];
248             parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_REAL_TO_COMPLEX, wcycle);
249
250             /* solve in k-space for our local cells */
251             if (settings.performGPUSolve)
252             {
253                 const auto gridOrdering =
254                         settings.useDecomposition ? GridOrdering::YZX : GridOrdering::XYZ;
255                 wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
256                 wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuPme);
257                 pme_gpu_solve(pmeGpu, gridIndex, cfftgrid, gridOrdering, computeEnergyAndVirial);
258                 wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuPme);
259                 wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
260             }
261             else
262             {
263                 wallcycle_start(wcycle, WallCycleCounter::PmeSolveMixedMode);
264 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
265                 for (int thread = 0; thread < pme->nthread; thread++)
266                 {
267                     solve_pme_yzx(pme, cfftgrid, pme->boxVolume, computeEnergyAndVirial, pme->nthread, thread);
268                 }
269                 wallcycle_stop(wcycle, WallCycleCounter::PmeSolveMixedMode);
270             }
271
272             parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_COMPLEX_TO_REAL, wcycle);
273         }
274     }
275     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
276 }
277
278 void pme_gpu_launch_gather(const gmx_pme_t* pme, gmx_wallcycle gmx_unused* wcycle, const real lambdaQ)
279 {
280     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
281
282     if (!pme_gpu_settings(pme->gpu).performGPUGather)
283     {
284         return;
285     }
286
287     wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
288     wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuPme);
289
290     float** fftgrids = pme->fftgrid;
291     pme_gpu_gather(pme->gpu, fftgrids, lambdaQ);
292     wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuPme);
293     wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
294 }
295
296 //! Accumulate the \c forcesToAdd to \c f, using the available threads.
297 static void sum_forces(gmx::ArrayRef<gmx::RVec> f, gmx::ArrayRef<const gmx::RVec> forceToAdd)
298 {
299     const int end = forceToAdd.size();
300
301     int gmx_unused nt = gmx_omp_nthreads_get(ModuleMultiThread::Pme);
302 #pragma omp parallel for num_threads(nt) schedule(static)
303     for (int i = 0; i < end; i++)
304     {
305         f[i] += forceToAdd[i];
306     }
307 }
308
309 //! Reduce quantities from \c output to \c forceWithVirial and \c enerd.
310 static void pme_gpu_reduce_outputs(const bool            computeEnergyAndVirial,
311                                    const PmeOutput&      output,
312                                    gmx_wallcycle*        wcycle,
313                                    gmx::ForceWithVirial* forceWithVirial,
314                                    gmx_enerdata_t*       enerd)
315 {
316     wallcycle_start(wcycle, WallCycleCounter::PmeGpuFReduction);
317     GMX_ASSERT(forceWithVirial, "Invalid force pointer");
318
319     if (computeEnergyAndVirial)
320     {
321         GMX_ASSERT(enerd, "Invalid energy output manager");
322         forceWithVirial->addVirialContribution(output.coulombVirial_);
323         enerd->term[F_COUL_RECIP] += output.coulombEnergy_;
324         enerd->dvdl_lin[FreeEnergyPerturbationCouplingType::Coul] += output.coulombDvdl_;
325     }
326     if (output.haveForceOutput_)
327     {
328         sum_forces(forceWithVirial->force_, output.forces_);
329     }
330     wallcycle_stop(wcycle, WallCycleCounter::PmeGpuFReduction);
331 }
332
333 bool pme_gpu_try_finish_task(gmx_pme_t*               pme,
334                              const gmx::StepWorkload& stepWork,
335                              gmx_wallcycle*           wcycle,
336                              gmx::ForceWithVirial*    forceWithVirial,
337                              gmx_enerdata_t*          enerd,
338                              const real               lambdaQ,
339                              GpuTaskCompletion        completionKind)
340 {
341     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
342     GMX_ASSERT(!pme->gpu->settings.useGpuForceReduction,
343                "GPU force reduction should not be active on the pme_gpu_try_finish_task() path");
344
345     // First, if possible, check whether all tasks on the stream have
346     // completed, and return fast if not. Accumulate to wcycle the
347     // time needed for that checking, but do not yet record that the
348     // gather has occured.
349     bool           needToSynchronize      = true;
350     constexpr bool c_streamQuerySupported = GMX_GPU_CUDA;
351
352     // TODO: implement c_streamQuerySupported with an additional GpuEventSynchronizer per stream (#2521)
353     if ((completionKind == GpuTaskCompletion::Check) && c_streamQuerySupported)
354     {
355         wallcycle_start_nocount(wcycle, WallCycleCounter::WaitGpuPmeGather);
356         // Query the PME stream for completion of all tasks enqueued and
357         // if we're not done, stop the timer before early return.
358         const bool pmeGpuDone = pme_gpu_stream_query(pme->gpu);
359         wallcycle_stop(wcycle, WallCycleCounter::WaitGpuPmeGather);
360
361         if (!pmeGpuDone)
362         {
363             return false;
364         }
365         needToSynchronize = false;
366     }
367
368     wallcycle_start(wcycle, WallCycleCounter::WaitGpuPmeGather);
369     // If the above check passed, then there is no need to make an
370     // explicit synchronization call.
371     if (needToSynchronize)
372     {
373         // Synchronize the whole PME stream at once, including D2H result transfers.
374         pme_gpu_synchronize(pme->gpu);
375     }
376     pme_gpu_update_timings(pme->gpu);
377     // There's no support for computing energy without virial, or vice versa
378     const bool computeEnergyAndVirial = stepWork.computeEnergy || stepWork.computeVirial;
379     PmeOutput  output                 = pme_gpu_getOutput(
380             *pme, computeEnergyAndVirial, pme->gpu->common->ngrids > 1 ? lambdaQ : 1.0);
381     wallcycle_stop(wcycle, WallCycleCounter::WaitGpuPmeGather);
382
383     GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_,
384                "When forces are reduced on the CPU, there needs to be force output");
385     pme_gpu_reduce_outputs(computeEnergyAndVirial, output, wcycle, forceWithVirial, enerd);
386
387     return true;
388 }
389
390 // This is used by PME-only ranks
391 PmeOutput pme_gpu_wait_finish_task(gmx_pme_t*     pme,
392                                    const bool     computeEnergyAndVirial,
393                                    const real     lambdaQ,
394                                    gmx_wallcycle* wcycle)
395 {
396     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
397
398     wallcycle_start(wcycle, WallCycleCounter::WaitGpuPmeGather);
399
400     // Synchronize the whole PME stream at once, including D2H result transfers
401     // if there are outputs we need to wait for at this step; we still call getOutputs
402     // for uniformity and because it sets the PmeOutput.haveForceOutput_.
403     if (!pme->gpu->settings.useGpuForceReduction || computeEnergyAndVirial)
404     {
405         pme_gpu_synchronize(pme->gpu);
406     }
407
408     PmeOutput output = pme_gpu_getOutput(
409             *pme, computeEnergyAndVirial, pme->gpu->common->ngrids > 1 ? lambdaQ : 1.0);
410     wallcycle_stop(wcycle, WallCycleCounter::WaitGpuPmeGather);
411     return output;
412 }
413
414 // This is used when not using the alternate-waiting reduction
415 void pme_gpu_wait_and_reduce(gmx_pme_t*               pme,
416                              const gmx::StepWorkload& stepWork,
417                              gmx_wallcycle*           wcycle,
418                              gmx::ForceWithVirial*    forceWithVirial,
419                              gmx_enerdata_t*          enerd,
420                              const real               lambdaQ)
421 {
422     // There's no support for computing energy without virial, or vice versa
423     const bool computeEnergyAndVirial = stepWork.computeEnergy || stepWork.computeVirial;
424     PmeOutput  output                 = pme_gpu_wait_finish_task(
425             pme, computeEnergyAndVirial, pme->gpu->common->ngrids > 1 ? lambdaQ : 1.0, wcycle);
426     GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_,
427                "When forces are reduced on the CPU, there needs to be force output");
428     pme_gpu_reduce_outputs(computeEnergyAndVirial, output, wcycle, forceWithVirial, enerd);
429 }
430
431 void pme_gpu_reinit_computation(const gmx_pme_t* pme, gmx_wallcycle* wcycle)
432 {
433     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
434
435     wallcycle_start_nocount(wcycle, WallCycleCounter::LaunchGpu);
436     wallcycle_sub_start_nocount(wcycle, WallCycleSubCounter::LaunchGpuPme);
437
438     pme_gpu_update_timings(pme->gpu);
439
440     pme_gpu_clear_grids(pme->gpu);
441     pme_gpu_clear_energy_virial(pme->gpu);
442
443     wallcycle_sub_stop(wcycle, WallCycleSubCounter::LaunchGpuPme);
444     wallcycle_stop(wcycle, WallCycleCounter::LaunchGpu);
445 }
446
447 DeviceBuffer<gmx::RVec> pme_gpu_get_device_f(const gmx_pme_t* pme)
448 {
449     if (!pme || !pme_gpu_active(pme))
450     {
451         return DeviceBuffer<gmx::RVec>{};
452     }
453     return pme_gpu_get_kernelparam_forces(pme->gpu);
454 }
455
456 void pme_gpu_set_device_x(const gmx_pme_t* pme, DeviceBuffer<gmx::RVec> d_x)
457 {
458     GMX_ASSERT(pme != nullptr, "Null pointer is passed as a PME to the set coordinates function.");
459     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
460
461     pme_gpu_set_kernelparam_coordinates(pme->gpu, d_x);
462 }
463
464 GpuEventSynchronizer* pme_gpu_get_f_ready_synchronizer(const gmx_pme_t* pme)
465 {
466     if (!pme || !pme_gpu_active(pme))
467     {
468         return nullptr;
469     }
470
471     return pme_gpu_get_forces_ready_synchronizer(pme->gpu);
472 }