aafd5c1a4d5a4fe888e99f940eb8000f7c396cfd
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019,2020, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  * \brief Implements high-level PME GPU functions which do not require GPU framework-specific code.
38  *
39  * \author Aleksei Iupinov <a.yupinov@gmail.com>
40  * \ingroup module_ewald
41  */
42
43 #include "gmxpre.h"
44
45 #include "config.h"
46
47 #include <list>
48
49 #include "gromacs/ewald/ewald_utils.h"
50 #include "gromacs/ewald/pme.h"
51 #include "gromacs/fft/parallel_3dfft.h"
52 #include "gromacs/math/invertmatrix.h"
53 #include "gromacs/mdlib/gmx_omp_nthreads.h"
54 #include "gromacs/mdtypes/enerdata.h"
55 #include "gromacs/mdtypes/forceoutput.h"
56 #include "gromacs/mdtypes/inputrec.h"
57 #include "gromacs/utility/exceptions.h"
58 #include "gromacs/utility/fatalerror.h"
59 #include "gromacs/utility/gmxassert.h"
60 #include "gromacs/utility/stringutil.h"
61
62 #include "pme_gpu_internal.h"
63 #include "pme_gpu_settings.h"
64 #include "pme_gpu_timings.h"
65 #include "pme_gpu_types_host.h"
66 #include "pme_grid.h"
67 #include "pme_internal.h"
68 #include "pme_solve.h"
69
70 /*! \brief
71  * Finds out if PME is currently running on GPU.
72  *
73  * \todo The GPU module should not be constructed (or at least called)
74  * when it is not active, so there should be no need to check whether
75  * it is active. An assertion that this is true makes sense.
76  *
77  * \param[in] pme  The PME structure.
78  * \returns        True if PME runs on GPU currently, false otherwise.
79  */
80 static inline bool pme_gpu_active(const gmx_pme_t* pme)
81 {
82     return (pme != nullptr) && (pme->runMode != PmeRunMode::CPU);
83 }
84
85 void pme_gpu_reset_timings(const gmx_pme_t* pme)
86 {
87     if (pme_gpu_active(pme))
88     {
89         pme_gpu_reset_timings(pme->gpu);
90     }
91 }
92
93 void pme_gpu_get_timings(const gmx_pme_t* pme, gmx_wallclock_gpu_pme_t* timings)
94 {
95     if (pme_gpu_active(pme))
96     {
97         pme_gpu_get_timings(pme->gpu, timings);
98     }
99 }
100
101 int pme_gpu_get_padding_size(const gmx_pme_t* pme)
102 {
103
104     if (!pme || !pme_gpu_active(pme))
105     {
106         return 0;
107     }
108     else
109     {
110         return pme_gpu_get_atom_data_alignment(pme->gpu);
111     }
112 }
113
114 /*! \brief
115  * A convenience wrapper for launching either the GPU or CPU FFT.
116  *
117  * \param[in] pme            The PME structure.
118  * \param[in] gridIndex      The grid index - should currently always be 0.
119  * \param[in] dir            The FFT direction enum.
120  * \param[in] wcycle         The wallclock counter.
121  */
122 void inline parallel_3dfft_execute_gpu_wrapper(gmx_pme_t*             pme,
123                                                const int              gridIndex,
124                                                enum gmx_fft_direction dir,
125                                                gmx_wallcycle_t        wcycle)
126 {
127     GMX_ASSERT(gridIndex == 0, "Only single grid supported");
128     if (pme_gpu_settings(pme->gpu).performGPUFFT)
129     {
130         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
131         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
132         pme_gpu_3dfft(pme->gpu, dir, gridIndex);
133         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
134         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
135     }
136     else
137     {
138         wallcycle_start(wcycle, ewcPME_FFT_MIXED_MODE);
139 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
140         for (int thread = 0; thread < pme->nthread; thread++)
141         {
142             gmx_parallel_3dfft_execute(pme->pfft_setup[gridIndex], dir, thread, wcycle);
143         }
144         wallcycle_stop(wcycle, ewcPME_FFT_MIXED_MODE);
145     }
146 }
147
148 /* The PME computation code split into a few separate functions. */
149
150 void pme_gpu_prepare_computation(gmx_pme_t*     pme,
151                                  bool           needToUpdateBox,
152                                  const matrix   box,
153                                  gmx_wallcycle* wcycle,
154                                  int            flags,
155                                  bool           useGpuForceReduction)
156 {
157     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
158     GMX_ASSERT(pme->nnodes > 0, "");
159     GMX_ASSERT(pme->nnodes == 1 || pme->ndecompdim > 0, "");
160
161     PmeGpu* pmeGpu                = pme->gpu;
162     pmeGpu->settings.currentFlags = flags;
163     // TODO these flags are only here to honor the CPU PME code, and probably should be removed
164     pmeGpu->settings.useGpuForceReduction = useGpuForceReduction;
165
166     bool shouldUpdateBox = false;
167     for (int i = 0; i < DIM; ++i)
168     {
169         for (int j = 0; j <= i; ++j)
170         {
171             shouldUpdateBox |= (pmeGpu->common->previousBox[i][j] != box[i][j]);
172             pmeGpu->common->previousBox[i][j] = box[i][j];
173         }
174     }
175
176     if (needToUpdateBox || shouldUpdateBox) // || is to make the first computation always update
177     {
178         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
179         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
180         pme_gpu_update_input_box(pmeGpu, box);
181         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
182         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
183
184         if (!pme_gpu_settings(pmeGpu).performGPUSolve)
185         {
186             // TODO remove code duplication and add test coverage
187             matrix scaledBox;
188             pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
189             gmx::invertBoxMatrix(scaledBox, pme->recipbox);
190             pme->boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
191         }
192     }
193 }
194
195 void pme_gpu_launch_spread(gmx_pme_t* pme, GpuEventSynchronizer* xReadyOnDevice, gmx_wallcycle* wcycle)
196 {
197     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
198     GMX_ASSERT(xReadyOnDevice || !pme->bPPnode || (GMX_GPU != GMX_GPU_CUDA),
199                "Need a valid xReadyOnDevice on PP+PME ranks with CUDA.");
200
201     PmeGpu* pmeGpu = pme->gpu;
202
203     const unsigned int gridIndex = 0;
204     real*              fftgrid   = pme->fftgrid[gridIndex];
205     if (pmeGpu->settings.currentFlags & GMX_PME_SPREAD)
206     {
207         /* Spread the coefficients on a grid */
208         const bool computeSplines = true;
209         const bool spreadCharges  = true;
210         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
211         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
212         pme_gpu_spread(pmeGpu, xReadyOnDevice, gridIndex, fftgrid, computeSplines, spreadCharges);
213         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
214         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
215     }
216 }
217
218 void pme_gpu_launch_complex_transforms(gmx_pme_t* pme, gmx_wallcycle* wcycle)
219 {
220     PmeGpu*     pmeGpu                 = pme->gpu;
221     const auto& settings               = pmeGpu->settings;
222     const bool  computeEnergyAndVirial = (settings.currentFlags & GMX_PME_CALC_ENER_VIR) != 0;
223     const bool  performBackFFT = (settings.currentFlags & (GMX_PME_CALC_F | GMX_PME_CALC_POT)) != 0;
224     const unsigned int gridIndex = 0;
225     t_complex*         cfftgrid  = pme->cfftgrid[gridIndex];
226
227     if (settings.currentFlags & GMX_PME_SPREAD)
228     {
229         if (!settings.performGPUFFT)
230         {
231             wallcycle_start(wcycle, ewcWAIT_GPU_PME_SPREAD);
232             pme_gpu_sync_spread_grid(pme->gpu);
233             wallcycle_stop(wcycle, ewcWAIT_GPU_PME_SPREAD);
234         }
235     }
236
237     try
238     {
239         if (settings.currentFlags & GMX_PME_SOLVE)
240         {
241             /* do R2C 3D-FFT */
242             parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_REAL_TO_COMPLEX, wcycle);
243
244             /* solve in k-space for our local cells */
245             if (settings.performGPUSolve)
246             {
247                 // TODO grid ordering should be set up at pme init time.
248                 const auto gridOrdering =
249                         settings.useDecomposition ? GridOrdering::YZX : GridOrdering::XYZ;
250                 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
251                 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
252                 pme_gpu_solve(pmeGpu, cfftgrid, gridOrdering, computeEnergyAndVirial);
253                 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
254                 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
255             }
256             else
257             {
258                 wallcycle_start(wcycle, ewcPME_SOLVE_MIXED_MODE);
259 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
260                 for (int thread = 0; thread < pme->nthread; thread++)
261                 {
262                     solve_pme_yzx(pme, cfftgrid, pme->boxVolume, computeEnergyAndVirial,
263                                   pme->nthread, thread);
264                 }
265                 wallcycle_stop(wcycle, ewcPME_SOLVE_MIXED_MODE);
266             }
267         }
268
269         if (performBackFFT)
270         {
271             parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_COMPLEX_TO_REAL, wcycle);
272         }
273     }
274     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
275 }
276
277 void pme_gpu_launch_gather(const gmx_pme_t* pme, gmx_wallcycle gmx_unused* wcycle, PmeForceOutputHandling forceTreatment)
278 {
279     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
280
281     if (!pme_gpu_settings(pme->gpu).performGPUGather)
282     {
283         return;
284     }
285
286     wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
287     wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
288     const unsigned int gridIndex = 0;
289     real*              fftgrid   = pme->fftgrid[gridIndex];
290     pme_gpu_gather(pme->gpu, forceTreatment, reinterpret_cast<float*>(fftgrid));
291     wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
292     wallcycle_stop(wcycle, ewcLAUNCH_GPU);
293 }
294
295 //! Accumulate the \c forcesToAdd to \c f, using the available threads.
296 static void sum_forces(gmx::ArrayRef<gmx::RVec> f, gmx::ArrayRef<const gmx::RVec> forceToAdd)
297 {
298     const int end = forceToAdd.size();
299
300     int gmx_unused nt = gmx_omp_nthreads_get(emntPME);
301 #pragma omp parallel for num_threads(nt) schedule(static)
302     for (int i = 0; i < end; i++)
303     {
304         f[i] += forceToAdd[i];
305     }
306 }
307
308 //! Reduce quantities from \c output to \c forceWithVirial and \c enerd.
309 static void pme_gpu_reduce_outputs(const int             flags,
310                                    const PmeOutput&      output,
311                                    gmx_wallcycle*        wcycle,
312                                    gmx::ForceWithVirial* forceWithVirial,
313                                    gmx_enerdata_t*       enerd)
314 {
315     wallcycle_start(wcycle, ewcPME_GPU_F_REDUCTION);
316     GMX_ASSERT(forceWithVirial, "Invalid force pointer");
317
318     const bool haveComputedEnergyAndVirial = (flags & GMX_PME_CALC_ENER_VIR) != 0;
319     if (haveComputedEnergyAndVirial)
320     {
321         GMX_ASSERT(enerd, "Invalid energy output manager");
322         forceWithVirial->addVirialContribution(output.coulombVirial_);
323         enerd->term[F_COUL_RECIP] += output.coulombEnergy_;
324     }
325     if (output.haveForceOutput_)
326     {
327         sum_forces(forceWithVirial->force_, output.forces_);
328     }
329     wallcycle_stop(wcycle, ewcPME_GPU_F_REDUCTION);
330 }
331
332 bool pme_gpu_try_finish_task(gmx_pme_t*            pme,
333                              const int             flags,
334                              gmx_wallcycle*        wcycle,
335                              gmx::ForceWithVirial* forceWithVirial,
336                              gmx_enerdata_t*       enerd,
337                              GpuTaskCompletion     completionKind)
338 {
339     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
340     GMX_ASSERT(!pme->gpu->settings.useGpuForceReduction,
341                "GPU force reduction should not be active on the pme_gpu_try_finish_task() path");
342
343     // First, if possible, check whether all tasks on the stream have
344     // completed, and return fast if not. Accumulate to wcycle the
345     // time needed for that checking, but do not yet record that the
346     // gather has occured.
347     bool           needToSynchronize      = true;
348     constexpr bool c_streamQuerySupported = (GMX_GPU == GMX_GPU_CUDA);
349     // TODO: implement c_streamQuerySupported with an additional GpuEventSynchronizer per stream (#2521)
350     if ((completionKind == GpuTaskCompletion::Check) && c_streamQuerySupported)
351     {
352         wallcycle_start_nocount(wcycle, ewcWAIT_GPU_PME_GATHER);
353         // Query the PME stream for completion of all tasks enqueued and
354         // if we're not done, stop the timer before early return.
355         const bool pmeGpuDone = pme_gpu_stream_query(pme->gpu);
356         wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
357
358         if (!pmeGpuDone)
359         {
360             return false;
361         }
362         needToSynchronize = false;
363     }
364
365     wallcycle_start(wcycle, ewcWAIT_GPU_PME_GATHER);
366     // If the above check passed, then there is no need to make an
367     // explicit synchronization call.
368     if (needToSynchronize)
369     {
370         // Synchronize the whole PME stream at once, including D2H result transfers.
371         pme_gpu_synchronize(pme->gpu);
372     }
373     pme_gpu_update_timings(pme->gpu);
374     PmeOutput output = pme_gpu_getOutput(*pme, flags);
375     wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
376
377     GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_,
378                "When forces are reduced on the CPU, there needs to be force output");
379     pme_gpu_reduce_outputs(flags, output, wcycle, forceWithVirial, enerd);
380
381     return true;
382 }
383
384 // This is used by PME-only ranks
385 PmeOutput pme_gpu_wait_finish_task(gmx_pme_t* pme, const int flags, gmx_wallcycle* wcycle)
386 {
387     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
388
389     wallcycle_start(wcycle, ewcWAIT_GPU_PME_GATHER);
390
391     // Synchronize the whole PME stream at once, including D2H result transfers
392     // if there are outputs we need to wait for at this step; we still call getOutputs
393     // for uniformity and because it sets the PmeOutput.haveForceOutput_.
394     const bool haveComputedEnergyAndVirial = (flags & GMX_PME_CALC_ENER_VIR) != 0;
395     if (!pme->gpu->settings.useGpuForceReduction || haveComputedEnergyAndVirial)
396     {
397         pme_gpu_synchronize(pme->gpu);
398     }
399
400     PmeOutput output = pme_gpu_getOutput(*pme, flags);
401     wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
402     return output;
403 }
404
405 // This is used when not using the alternate-waiting reduction
406 void pme_gpu_wait_and_reduce(gmx_pme_t*            pme,
407                              const int             flags,
408                              gmx_wallcycle*        wcycle,
409                              gmx::ForceWithVirial* forceWithVirial,
410                              gmx_enerdata_t*       enerd)
411 {
412     PmeOutput output = pme_gpu_wait_finish_task(pme, flags, wcycle);
413     GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_,
414                "When forces are reduced on the CPU, there needs to be force output");
415     pme_gpu_reduce_outputs(flags, output, wcycle, forceWithVirial, enerd);
416 }
417
418 void pme_gpu_reinit_computation(const gmx_pme_t* pme, gmx_wallcycle* wcycle)
419 {
420     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
421
422     wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
423     wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
424
425     pme_gpu_update_timings(pme->gpu);
426
427     pme_gpu_clear_grids(pme->gpu);
428     pme_gpu_clear_energy_virial(pme->gpu);
429
430     wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
431     wallcycle_stop(wcycle, ewcLAUNCH_GPU);
432 }
433
434 void* pme_gpu_get_device_f(const gmx_pme_t* pme)
435 {
436     if (!pme || !pme_gpu_active(pme))
437     {
438         return nullptr;
439     }
440     return pme_gpu_get_kernelparam_forces(pme->gpu);
441 }
442
443 void pme_gpu_set_device_x(const gmx_pme_t* pme, DeviceBuffer<float> d_x)
444 {
445     GMX_ASSERT(pme != nullptr, "Null pointer is passed as a PME to the set coordinates function.");
446     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
447
448     pme_gpu_set_kernelparam_coordinates(pme->gpu, d_x);
449 }
450
451 void* pme_gpu_get_device_stream(const gmx_pme_t* pme)
452 {
453     if (!pme || !pme_gpu_active(pme))
454     {
455         return nullptr;
456     }
457     return pme_gpu_get_stream(pme->gpu);
458 }
459
460 void* pme_gpu_get_device_context(const gmx_pme_t* pme)
461 {
462     if (!pme || !pme_gpu_active(pme))
463     {
464         return nullptr;
465     }
466     return pme_gpu_get_context(pme->gpu);
467 }
468
469 GpuEventSynchronizer* pme_gpu_get_f_ready_synchronizer(const gmx_pme_t* pme)
470 {
471     if (!pme || !pme_gpu_active(pme))
472     {
473         return nullptr;
474     }
475
476     return pme_gpu_get_forces_ready_synchronizer(pme->gpu);
477 }