Merge release-2019 into release-2020
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  * \brief Implements high-level PME GPU functions which do not require GPU framework-specific code.
38  *
39  * \author Aleksei Iupinov <a.yupinov@gmail.com>
40  * \ingroup module_ewald
41  */
42
43 #include "gmxpre.h"
44
45 #include "config.h"
46
47 #include <list>
48
49 #include "gromacs/ewald/ewald_utils.h"
50 #include "gromacs/ewald/pme.h"
51 #include "gromacs/fft/parallel_3dfft.h"
52 #include "gromacs/math/invertmatrix.h"
53 #include "gromacs/mdlib/gmx_omp_nthreads.h"
54 #include "gromacs/mdtypes/enerdata.h"
55 #include "gromacs/mdtypes/forceoutput.h"
56 #include "gromacs/mdtypes/inputrec.h"
57 #include "gromacs/utility/exceptions.h"
58 #include "gromacs/utility/fatalerror.h"
59 #include "gromacs/utility/gmxassert.h"
60 #include "gromacs/utility/stringutil.h"
61
62 #include "pme_gpu_internal.h"
63 #include "pme_grid.h"
64 #include "pme_internal.h"
65 #include "pme_solve.h"
66
67 void pme_gpu_reset_timings(const gmx_pme_t* pme)
68 {
69     if (pme_gpu_active(pme))
70     {
71         pme_gpu_reset_timings(pme->gpu);
72     }
73 }
74
75 void pme_gpu_get_timings(const gmx_pme_t* pme, gmx_wallclock_gpu_pme_t* timings)
76 {
77     if (pme_gpu_active(pme))
78     {
79         pme_gpu_get_timings(pme->gpu, timings);
80     }
81 }
82
83 int pme_gpu_get_padding_size(const gmx_pme_t* pme)
84 {
85
86     if (!pme || !pme_gpu_active(pme))
87     {
88         return 0;
89     }
90     else
91     {
92         return pme_gpu_get_atom_data_alignment(pme->gpu);
93     }
94 }
95
96 /*! \brief
97  * A convenience wrapper for launching either the GPU or CPU FFT.
98  *
99  * \param[in] pme            The PME structure.
100  * \param[in] gridIndex      The grid index - should currently always be 0.
101  * \param[in] dir            The FFT direction enum.
102  * \param[in] wcycle         The wallclock counter.
103  */
104 void inline parallel_3dfft_execute_gpu_wrapper(gmx_pme_t*             pme,
105                                                const int              gridIndex,
106                                                enum gmx_fft_direction dir,
107                                                gmx_wallcycle_t        wcycle)
108 {
109     GMX_ASSERT(gridIndex == 0, "Only single grid supported");
110     if (pme_gpu_performs_FFT(pme->gpu))
111     {
112         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
113         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
114         pme_gpu_3dfft(pme->gpu, dir, gridIndex);
115         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
116         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
117     }
118     else
119     {
120         wallcycle_start(wcycle, ewcPME_FFT_MIXED_MODE);
121 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
122         for (int thread = 0; thread < pme->nthread; thread++)
123         {
124             gmx_parallel_3dfft_execute(pme->pfft_setup[gridIndex], dir, thread, wcycle);
125         }
126         wallcycle_stop(wcycle, ewcPME_FFT_MIXED_MODE);
127     }
128 }
129
130 /* The PME computation code split into a few separate functions. */
131
132 void pme_gpu_prepare_computation(gmx_pme_t*     pme,
133                                  bool           needToUpdateBox,
134                                  const matrix   box,
135                                  gmx_wallcycle* wcycle,
136                                  int            flags,
137                                  bool           useGpuForceReduction)
138 {
139     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
140     GMX_ASSERT(pme->nnodes > 0, "");
141     GMX_ASSERT(pme->nnodes == 1 || pme->ndecompdim > 0, "");
142
143     PmeGpu* pmeGpu                = pme->gpu;
144     pmeGpu->settings.currentFlags = flags;
145     // TODO these flags are only here to honor the CPU PME code, and probably should be removed
146     pmeGpu->settings.useGpuForceReduction = useGpuForceReduction;
147
148     bool shouldUpdateBox = false;
149     for (int i = 0; i < DIM; ++i)
150     {
151         for (int j = 0; j <= i; ++j)
152         {
153             shouldUpdateBox |= (pmeGpu->common->previousBox[i][j] != box[i][j]);
154             pmeGpu->common->previousBox[i][j] = box[i][j];
155         }
156     }
157
158     if (needToUpdateBox || shouldUpdateBox) // || is to make the first computation always update
159     {
160         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
161         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
162         pme_gpu_update_input_box(pmeGpu, box);
163         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
164         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
165
166         if (!pme_gpu_performs_solve(pmeGpu))
167         {
168             // TODO remove code duplication and add test coverage
169             matrix scaledBox;
170             pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
171             gmx::invertBoxMatrix(scaledBox, pme->recipbox);
172             pme->boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
173         }
174     }
175 }
176
177 void pme_gpu_launch_spread(gmx_pme_t* pme, GpuEventSynchronizer* xReadyOnDevice, gmx_wallcycle* wcycle)
178 {
179     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
180     GMX_ASSERT(xReadyOnDevice || !pme->bPPnode || (GMX_GPU != GMX_GPU_CUDA),
181                "Need a valid xReadyOnDevice on PP+PME ranks with CUDA.");
182
183     PmeGpu* pmeGpu = pme->gpu;
184
185     const unsigned int gridIndex = 0;
186     real*              fftgrid   = pme->fftgrid[gridIndex];
187     if (pmeGpu->settings.currentFlags & GMX_PME_SPREAD)
188     {
189         /* Spread the coefficients on a grid */
190         const bool computeSplines = true;
191         const bool spreadCharges  = true;
192         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
193         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
194         pme_gpu_spread(pmeGpu, xReadyOnDevice, gridIndex, fftgrid, computeSplines, spreadCharges);
195         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
196         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
197     }
198 }
199
200 void pme_gpu_launch_complex_transforms(gmx_pme_t* pme, gmx_wallcycle* wcycle)
201 {
202     PmeGpu*    pmeGpu                 = pme->gpu;
203     const bool computeEnergyAndVirial = (pmeGpu->settings.currentFlags & GMX_PME_CALC_ENER_VIR) != 0;
204     const bool performBackFFT = (pmeGpu->settings.currentFlags & (GMX_PME_CALC_F | GMX_PME_CALC_POT)) != 0;
205     const unsigned int gridIndex = 0;
206     t_complex*         cfftgrid  = pme->cfftgrid[gridIndex];
207
208     if (pmeGpu->settings.currentFlags & GMX_PME_SPREAD)
209     {
210         if (!pme_gpu_performs_FFT(pmeGpu))
211         {
212             wallcycle_start(wcycle, ewcWAIT_GPU_PME_SPREAD);
213             pme_gpu_sync_spread_grid(pme->gpu);
214             wallcycle_stop(wcycle, ewcWAIT_GPU_PME_SPREAD);
215         }
216     }
217
218     try
219     {
220         if (pmeGpu->settings.currentFlags & GMX_PME_SOLVE)
221         {
222             /* do R2C 3D-FFT */
223             parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_REAL_TO_COMPLEX, wcycle);
224
225             /* solve in k-space for our local cells */
226             if (pme_gpu_performs_solve(pmeGpu))
227             {
228                 const auto gridOrdering = pme_gpu_uses_dd(pmeGpu) ? GridOrdering::YZX : GridOrdering::XYZ;
229                 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
230                 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
231                 pme_gpu_solve(pmeGpu, cfftgrid, gridOrdering, computeEnergyAndVirial);
232                 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
233                 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
234             }
235             else
236             {
237                 wallcycle_start(wcycle, ewcPME_SOLVE_MIXED_MODE);
238 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
239                 for (int thread = 0; thread < pme->nthread; thread++)
240                 {
241                     solve_pme_yzx(pme, cfftgrid, pme->boxVolume, computeEnergyAndVirial,
242                                   pme->nthread, thread);
243                 }
244                 wallcycle_stop(wcycle, ewcPME_SOLVE_MIXED_MODE);
245             }
246         }
247
248         if (performBackFFT)
249         {
250             parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_COMPLEX_TO_REAL, wcycle);
251         }
252     }
253     GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
254 }
255
256 void pme_gpu_launch_gather(const gmx_pme_t* pme, gmx_wallcycle gmx_unused* wcycle, PmeForceOutputHandling forceTreatment)
257 {
258     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
259
260     if (!pme_gpu_performs_gather(pme->gpu))
261     {
262         return;
263     }
264
265     wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
266     wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
267     const unsigned int gridIndex = 0;
268     real*              fftgrid   = pme->fftgrid[gridIndex];
269     pme_gpu_gather(pme->gpu, forceTreatment, reinterpret_cast<float*>(fftgrid));
270     wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
271     wallcycle_stop(wcycle, ewcLAUNCH_GPU);
272 }
273
274 //! Accumulate the \c forcesToAdd to \c f, using the available threads.
275 static void sum_forces(gmx::ArrayRef<gmx::RVec> f, gmx::ArrayRef<const gmx::RVec> forceToAdd)
276 {
277     const int end = forceToAdd.size();
278
279     int gmx_unused nt = gmx_omp_nthreads_get(emntPME);
280 #pragma omp parallel for num_threads(nt) schedule(static)
281     for (int i = 0; i < end; i++)
282     {
283         f[i] += forceToAdd[i];
284     }
285 }
286
287 //! Reduce quantities from \c output to \c forceWithVirial and \c enerd.
288 static void pme_gpu_reduce_outputs(const int             flags,
289                                    const PmeOutput&      output,
290                                    gmx_wallcycle*        wcycle,
291                                    gmx::ForceWithVirial* forceWithVirial,
292                                    gmx_enerdata_t*       enerd)
293 {
294     wallcycle_start(wcycle, ewcPME_GPU_F_REDUCTION);
295     GMX_ASSERT(forceWithVirial, "Invalid force pointer");
296
297     const bool haveComputedEnergyAndVirial = (flags & GMX_PME_CALC_ENER_VIR) != 0;
298     if (haveComputedEnergyAndVirial)
299     {
300         GMX_ASSERT(enerd, "Invalid energy output manager");
301         forceWithVirial->addVirialContribution(output.coulombVirial_);
302         enerd->term[F_COUL_RECIP] += output.coulombEnergy_;
303     }
304     if (output.haveForceOutput_)
305     {
306         sum_forces(forceWithVirial->force_, output.forces_);
307     }
308     wallcycle_stop(wcycle, ewcPME_GPU_F_REDUCTION);
309 }
310
311 bool pme_gpu_try_finish_task(gmx_pme_t*            pme,
312                              const int             flags,
313                              gmx_wallcycle*        wcycle,
314                              gmx::ForceWithVirial* forceWithVirial,
315                              gmx_enerdata_t*       enerd,
316                              GpuTaskCompletion     completionKind)
317 {
318     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
319     GMX_ASSERT(!pme->gpu->settings.useGpuForceReduction,
320                "GPU force reduction should not be active on the pme_gpu_try_finish_task() path");
321
322     // First, if possible, check whether all tasks on the stream have
323     // completed, and return fast if not. Accumulate to wcycle the
324     // time needed for that checking, but do not yet record that the
325     // gather has occured.
326     bool           needToSynchronize      = true;
327     constexpr bool c_streamQuerySupported = (GMX_GPU == GMX_GPU_CUDA);
328     // TODO: implement c_streamQuerySupported with an additional GpuEventSynchronizer per stream (#2521)
329     if ((completionKind == GpuTaskCompletion::Check) && c_streamQuerySupported)
330     {
331         wallcycle_start_nocount(wcycle, ewcWAIT_GPU_PME_GATHER);
332         // Query the PME stream for completion of all tasks enqueued and
333         // if we're not done, stop the timer before early return.
334         const bool pmeGpuDone = pme_gpu_stream_query(pme->gpu);
335         wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
336
337         if (!pmeGpuDone)
338         {
339             return false;
340         }
341         needToSynchronize = false;
342     }
343
344     wallcycle_start(wcycle, ewcWAIT_GPU_PME_GATHER);
345     // If the above check passed, then there is no need to make an
346     // explicit synchronization call.
347     if (needToSynchronize)
348     {
349         // Synchronize the whole PME stream at once, including D2H result transfers.
350         pme_gpu_synchronize(pme->gpu);
351     }
352     pme_gpu_update_timings(pme->gpu);
353     PmeOutput output = pme_gpu_getOutput(*pme, flags);
354     wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
355
356     GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_,
357                "When forces are reduced on the CPU, there needs to be force output");
358     pme_gpu_reduce_outputs(flags, output, wcycle, forceWithVirial, enerd);
359
360     return true;
361 }
362
363 // This is used by PME-only ranks
364 PmeOutput pme_gpu_wait_finish_task(gmx_pme_t* pme, const int flags, gmx_wallcycle* wcycle)
365 {
366     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
367
368     wallcycle_start(wcycle, ewcWAIT_GPU_PME_GATHER);
369
370     // Synchronize the whole PME stream at once, including D2H result transfers
371     // if there are outputs we need to wait for at this step; we still call getOutputs
372     // for uniformity and because it sets the PmeOutput.haveForceOutput_.
373     const bool haveComputedEnergyAndVirial = (flags & GMX_PME_CALC_ENER_VIR) != 0;
374     if (!pme->gpu->settings.useGpuForceReduction || haveComputedEnergyAndVirial)
375     {
376         pme_gpu_synchronize(pme->gpu);
377     }
378
379     PmeOutput output = pme_gpu_getOutput(*pme, flags);
380     wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
381     return output;
382 }
383
384 // This is used when not using the alternate-waiting reduction
385 void pme_gpu_wait_and_reduce(gmx_pme_t*            pme,
386                              const int             flags,
387                              gmx_wallcycle*        wcycle,
388                              gmx::ForceWithVirial* forceWithVirial,
389                              gmx_enerdata_t*       enerd)
390 {
391     PmeOutput output = pme_gpu_wait_finish_task(pme, flags, wcycle);
392     GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_,
393                "When forces are reduced on the CPU, there needs to be force output");
394     pme_gpu_reduce_outputs(flags, output, wcycle, forceWithVirial, enerd);
395 }
396
397 void pme_gpu_reinit_computation(const gmx_pme_t* pme, gmx_wallcycle* wcycle)
398 {
399     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
400
401     wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
402     wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
403
404     pme_gpu_update_timings(pme->gpu);
405
406     pme_gpu_clear_grids(pme->gpu);
407     pme_gpu_clear_energy_virial(pme->gpu);
408
409     wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
410     wallcycle_stop(wcycle, ewcLAUNCH_GPU);
411 }
412
413 DeviceBuffer<float> pme_gpu_get_device_x(const gmx_pme_t* pme)
414 {
415     GMX_ASSERT((pme && pme_gpu_active(pme)),
416                "PME GPU coordinates buffer was requested from uninitialized PME module");
417     return pme_gpu_get_kernelparam_coordinates(pme->gpu);
418 }
419
420 void* pme_gpu_get_device_f(const gmx_pme_t* pme)
421 {
422     if (!pme || !pme_gpu_active(pme))
423     {
424         return nullptr;
425     }
426     return pme_gpu_get_kernelparam_forces(pme->gpu);
427 }
428
429 void pme_gpu_set_device_x(const gmx_pme_t* pme, DeviceBuffer<float> d_x)
430 {
431     GMX_ASSERT(pme != nullptr, "Null pointer is passed as a PME to the set coordinates function.");
432     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
433
434     pme_gpu_set_kernelparam_coordinates(pme->gpu, d_x);
435 }
436
437 void* pme_gpu_get_device_stream(const gmx_pme_t* pme)
438 {
439     if (!pme || !pme_gpu_active(pme))
440     {
441         return nullptr;
442     }
443     return pme_gpu_get_stream(pme->gpu);
444 }
445
446 void* pme_gpu_get_device_context(const gmx_pme_t* pme)
447 {
448     if (!pme || !pme_gpu_active(pme))
449     {
450         return nullptr;
451     }
452     return pme_gpu_get_context(pme->gpu);
453 }
454
455 GpuEventSynchronizer* pme_gpu_get_f_ready_synchronizer(const gmx_pme_t* pme)
456 {
457     if (!pme || !pme_gpu_active(pme))
458     {
459         return nullptr;
460     }
461
462     return pme_gpu_get_forces_ready_synchronizer(pme->gpu);
463 }