ed7cd19f2e89447f3ff1d7fac728ce5fd1e7356f
[alexxy/gromacs.git] / src / gromacs / ewald / pme_gpu.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018,2019, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  * \brief Implements high-level PME GPU functions which do not require GPU framework-specific code.
38  *
39  * \author Aleksei Iupinov <a.yupinov@gmail.com>
40  * \ingroup module_ewald
41  */
42
43 #include "gmxpre.h"
44
45 #include "config.h"
46
47 #include <list>
48
49 #include "gromacs/ewald/ewald_utils.h"
50 #include "gromacs/ewald/pme.h"
51 #include "gromacs/fft/parallel_3dfft.h"
52 #include "gromacs/math/invertmatrix.h"
53 #include "gromacs/mdlib/gmx_omp_nthreads.h"
54 #include "gromacs/mdlib/ppforceworkload.h"
55 #include "gromacs/mdtypes/enerdata.h"
56 #include "gromacs/mdtypes/forceoutput.h"
57 #include "gromacs/mdtypes/inputrec.h"
58 #include "gromacs/utility/exceptions.h"
59 #include "gromacs/utility/fatalerror.h"
60 #include "gromacs/utility/gmxassert.h"
61 #include "gromacs/utility/stringutil.h"
62
63 #include "pme_gpu_internal.h"
64 #include "pme_grid.h"
65 #include "pme_internal.h"
66 #include "pme_solve.h"
67
68 void pme_gpu_reset_timings(const gmx_pme_t *pme)
69 {
70     if (pme_gpu_active(pme))
71     {
72         pme_gpu_reset_timings(pme->gpu);
73     }
74 }
75
76 void pme_gpu_get_timings(const gmx_pme_t *pme, gmx_wallclock_gpu_pme_t *timings)
77 {
78     if (pme_gpu_active(pme))
79     {
80         pme_gpu_get_timings(pme->gpu, timings);
81     }
82 }
83
84 int pme_gpu_get_padding_size(const gmx_pme_t *pme)
85 {
86
87     if (!pme || !pme_gpu_active(pme))
88     {
89         return 0;
90     }
91     else
92     {
93         return pme_gpu_get_atom_data_alignment(pme->gpu);
94     }
95 }
96
97 /*! \brief
98  * A convenience wrapper for launching either the GPU or CPU FFT.
99  *
100  * \param[in] pme            The PME structure.
101  * \param[in] gridIndex      The grid index - should currently always be 0.
102  * \param[in] dir            The FFT direction enum.
103  * \param[in] wcycle         The wallclock counter.
104  */
105 void inline parallel_3dfft_execute_gpu_wrapper(gmx_pme_t              *pme,
106                                                const int               gridIndex,
107                                                enum gmx_fft_direction  dir,
108                                                gmx_wallcycle_t         wcycle)
109 {
110     GMX_ASSERT(gridIndex == 0, "Only single grid supported");
111     if (pme_gpu_performs_FFT(pme->gpu))
112     {
113         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
114         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
115         pme_gpu_3dfft(pme->gpu, dir, gridIndex);
116         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
117         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
118     }
119     else
120     {
121         wallcycle_start(wcycle, ewcPME_FFT_MIXED_MODE);
122 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
123         for (int thread = 0; thread < pme->nthread; thread++)
124         {
125             gmx_parallel_3dfft_execute(pme->pfft_setup[gridIndex], dir, thread, wcycle);
126         }
127         wallcycle_stop(wcycle, ewcPME_FFT_MIXED_MODE);
128     }
129 }
130
131 /* The PME computation code split into a few separate functions. */
132
133 void pme_gpu_prepare_computation(gmx_pme_t            *pme,
134                                  bool                  needToUpdateBox,
135                                  const matrix          box,
136                                  gmx_wallcycle        *wcycle,
137                                  int                   flags,
138                                  bool                  useGpuForceReduction)
139 {
140     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
141     GMX_ASSERT(pme->nnodes > 0, "");
142     GMX_ASSERT(pme->nnodes == 1 || pme->ndecompdim > 0, "");
143
144     PmeGpu *pmeGpu = pme->gpu;
145     pmeGpu->settings.currentFlags         = flags;
146     // TODO these flags are only here to honor the CPU PME code, and probably should be removed
147     pmeGpu->settings.useGpuForceReduction = useGpuForceReduction;
148
149     bool shouldUpdateBox = false;
150     for (int i = 0; i < DIM; ++i)
151     {
152         for (int j = 0; j <= i; ++j)
153         {
154             shouldUpdateBox                  |= (pmeGpu->common->previousBox[i][j] != box[i][j]);
155             pmeGpu->common->previousBox[i][j] = box[i][j];
156         }
157     }
158
159     if (needToUpdateBox || shouldUpdateBox) // || is to make the first computation always update
160     {
161         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
162         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
163         pme_gpu_update_input_box(pmeGpu, box);
164         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
165         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
166
167         if (!pme_gpu_performs_solve(pmeGpu))
168         {
169             // TODO remove code duplication and add test coverage
170             matrix scaledBox;
171             pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
172             gmx::invertBoxMatrix(scaledBox, pme->recipbox);
173             pme->boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
174         }
175     }
176 }
177
178 void pme_gpu_copy_coordinates_to_gpu(gmx_pme_t            *pme,
179                                      const rvec           *coordinatesHost,
180                                      gmx_wallcycle        *wcycle)
181 {
182     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
183
184     PmeGpu *pmeGpu = pme->gpu;
185
186     // The only spot of PME GPU where LAUNCH_GPU counter increases call-count
187     wallcycle_start(wcycle, ewcLAUNCH_GPU);
188     // The only spot of PME GPU where ewcsLAUNCH_GPU_PME subcounter increases call-count
189     wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_PME);
190     pme_gpu_copy_input_coordinates(pmeGpu, coordinatesHost);
191     wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
192     wallcycle_stop(wcycle, ewcLAUNCH_GPU);
193 }
194
195 void pme_gpu_launch_spread(gmx_pme_t            *pme,
196                            gmx_wallcycle        *wcycle)
197 {
198     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
199
200     PmeGpu            *pmeGpu = pme->gpu;
201
202     const unsigned int gridIndex  = 0;
203     real              *fftgrid    = pme->fftgrid[gridIndex];
204     if (pmeGpu->settings.currentFlags & GMX_PME_SPREAD)
205     {
206         /* Spread the coefficients on a grid */
207         const bool computeSplines = true;
208         const bool spreadCharges  = true;
209         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
210         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
211         pme_gpu_spread(pmeGpu, gridIndex, fftgrid, computeSplines, spreadCharges);
212         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
213         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
214     }
215 }
216
217 void pme_gpu_launch_complex_transforms(gmx_pme_t      *pme,
218                                        gmx_wallcycle  *wcycle)
219 {
220     PmeGpu            *pmeGpu                 = pme->gpu;
221     const bool         computeEnergyAndVirial = (pmeGpu->settings.currentFlags & GMX_PME_CALC_ENER_VIR) != 0;
222     const bool         performBackFFT         = (pmeGpu->settings.currentFlags & (GMX_PME_CALC_F | GMX_PME_CALC_POT)) != 0;
223     const unsigned int gridIndex              = 0;
224     t_complex         *cfftgrid               = pme->cfftgrid[gridIndex];
225
226     if (pmeGpu->settings.currentFlags & GMX_PME_SPREAD)
227     {
228         if (!pme_gpu_performs_FFT(pmeGpu))
229         {
230             wallcycle_start(wcycle, ewcWAIT_GPU_PME_SPREAD);
231             pme_gpu_sync_spread_grid(pme->gpu);
232             wallcycle_stop(wcycle, ewcWAIT_GPU_PME_SPREAD);
233         }
234     }
235
236     try
237     {
238         if (pmeGpu->settings.currentFlags & GMX_PME_SOLVE)
239         {
240             /* do R2C 3D-FFT */
241             parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_REAL_TO_COMPLEX, wcycle);
242
243             /* solve in k-space for our local cells */
244             if (pme_gpu_performs_solve(pmeGpu))
245             {
246                 const auto gridOrdering = pme_gpu_uses_dd(pmeGpu) ? GridOrdering::YZX : GridOrdering::XYZ;
247                 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
248                 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
249                 pme_gpu_solve(pmeGpu, cfftgrid, gridOrdering, computeEnergyAndVirial);
250                 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
251                 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
252             }
253             else
254             {
255                 wallcycle_start(wcycle, ewcPME_SOLVE_MIXED_MODE);
256 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
257                 for (int thread = 0; thread < pme->nthread; thread++)
258                 {
259                     solve_pme_yzx(pme, cfftgrid, pme->boxVolume,
260                                   computeEnergyAndVirial, pme->nthread, thread);
261                 }
262                 wallcycle_stop(wcycle, ewcPME_SOLVE_MIXED_MODE);
263             }
264         }
265
266         if (performBackFFT)
267         {
268             parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_COMPLEX_TO_REAL, wcycle);
269         }
270     } GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
271 }
272
273 void pme_gpu_launch_gather(const gmx_pme_t                 *pme,
274                            gmx_wallcycle gmx_unused        *wcycle,
275                            PmeForceOutputHandling           forceTreatment)
276 {
277     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
278
279     if (!pme_gpu_performs_gather(pme->gpu))
280     {
281         return;
282     }
283
284     wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
285     wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
286     const unsigned int gridIndex  = 0;
287     real              *fftgrid    = pme->fftgrid[gridIndex];
288     pme_gpu_gather(pme->gpu, forceTreatment, reinterpret_cast<float *>(fftgrid));
289     wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
290     wallcycle_stop(wcycle, ewcLAUNCH_GPU);
291 }
292
293 //! Accumulate the \c forcesToAdd to \c f, using the available threads.
294 static void sum_forces(gmx::ArrayRef<gmx::RVec>       f,
295                        gmx::ArrayRef<const gmx::RVec> forceToAdd)
296 {
297     const int      end = forceToAdd.size();
298
299     int gmx_unused nt = gmx_omp_nthreads_get(emntPME);
300 #pragma omp parallel for num_threads(nt) schedule(static)
301     for (int i = 0; i < end; i++)
302     {
303         f[i] += forceToAdd[i];
304     }
305 }
306
307 //! Reduce quantities from \c output to \c forceWithVirial and \c enerd.
308 static void pme_gpu_reduce_outputs(const int             flags,
309                                    const PmeOutput      &output,
310                                    gmx_wallcycle        *wcycle,
311                                    gmx::ForceWithVirial *forceWithVirial,
312                                    gmx_enerdata_t       *enerd)
313 {
314     wallcycle_start(wcycle, ewcPME_GPU_F_REDUCTION);
315     GMX_ASSERT(forceWithVirial, "Invalid force pointer");
316
317     const bool haveComputedEnergyAndVirial = (flags & GMX_PME_CALC_ENER_VIR) != 0;
318     if (haveComputedEnergyAndVirial)
319     {
320         GMX_ASSERT(enerd, "Invalid energy output manager");
321         forceWithVirial->addVirialContribution(output.coulombVirial_);
322         enerd->term[F_COUL_RECIP] += output.coulombEnergy_;
323     }
324     if (output.haveForceOutput_)
325     {
326         sum_forces(forceWithVirial->force_, output.forces_);
327     }
328     wallcycle_stop(wcycle, ewcPME_GPU_F_REDUCTION);
329 }
330
331 bool pme_gpu_try_finish_task(gmx_pme_t            *pme,
332                              const int             flags,
333                              gmx_wallcycle        *wcycle,
334                              gmx::ForceWithVirial *forceWithVirial,
335                              gmx_enerdata_t       *enerd,
336                              GpuTaskCompletion     completionKind)
337 {
338     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
339     GMX_ASSERT(!pme->gpu->settings.useGpuForceReduction, "GPU force reduction should not be active on the pme_gpu_try_finish_task() path");
340
341     // First, if possible, check whether all tasks on the stream have
342     // completed, and return fast if not. Accumulate to wcycle the
343     // time needed for that checking, but do not yet record that the
344     // gather has occured.
345     bool           needToSynchronize      = true;
346     constexpr bool c_streamQuerySupported = (GMX_GPU == GMX_GPU_CUDA);
347     // TODO: implement c_streamQuerySupported with an additional GpuEventSynchronizer per stream (#2521)
348     if ((completionKind == GpuTaskCompletion::Check) && c_streamQuerySupported)
349     {
350         wallcycle_start_nocount(wcycle, ewcWAIT_GPU_PME_GATHER);
351         // Query the PME stream for completion of all tasks enqueued and
352         // if we're not done, stop the timer before early return.
353         const bool pmeGpuDone = pme_gpu_stream_query(pme->gpu);
354         wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
355
356         if (!pmeGpuDone)
357         {
358             return false;
359         }
360         needToSynchronize = false;
361     }
362
363     wallcycle_start(wcycle, ewcWAIT_GPU_PME_GATHER);
364     // If the above check passed, then there is no need to make an
365     // explicit synchronization call.
366     if (needToSynchronize)
367     {
368         // Synchronize the whole PME stream at once, including D2H result transfers.
369         pme_gpu_synchronize(pme->gpu);
370     }
371     pme_gpu_update_timings(pme->gpu);
372     PmeOutput output = pme_gpu_getOutput(*pme, flags);
373     wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
374
375     GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_, "When forces are reduced on the CPU, there needs to be force output");
376     pme_gpu_reduce_outputs(flags, output, wcycle, forceWithVirial, enerd);
377
378     return true;
379 }
380
381 // This is used by PME-only ranks
382 PmeOutput pme_gpu_wait_finish_task(gmx_pme_t     *pme,
383                                    const int      flags,
384                                    gmx_wallcycle *wcycle)
385 {
386     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
387
388     wallcycle_start(wcycle, ewcWAIT_GPU_PME_GATHER);
389
390     // Synchronize the whole PME stream at once, including D2H result transfers.
391     //
392     // TODO: make this sync conditional to wait only for virial/energies
393     pme_gpu_synchronize(pme->gpu);
394
395     pme_gpu_update_timings(pme->gpu);
396     PmeOutput output = pme_gpu_getOutput(*pme, flags);
397     wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
398     return output;
399 }
400
401 // This is used when not using the alternate-waiting reduction
402 void pme_gpu_wait_and_reduce(gmx_pme_t            *pme,
403                              const int             flags,
404                              gmx_wallcycle        *wcycle,
405                              gmx::ForceWithVirial *forceWithVirial,
406                              gmx_enerdata_t       *enerd)
407 {
408     PmeOutput output = pme_gpu_wait_finish_task(pme, flags, wcycle);
409     GMX_ASSERT(pme->gpu->settings.useGpuForceReduction == !output.haveForceOutput_, "When forces are reduced on the CPU, there needs to be force output");
410     pme_gpu_reduce_outputs(flags, output, wcycle, forceWithVirial, enerd);
411 }
412
413 void pme_gpu_reinit_computation(const gmx_pme_t *pme,
414                                 gmx_wallcycle   *wcycle)
415 {
416     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
417
418     wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
419     wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
420
421     pme_gpu_clear_grids(pme->gpu);
422     pme_gpu_clear_energy_virial(pme->gpu);
423
424     wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
425     wallcycle_stop(wcycle, ewcLAUNCH_GPU);
426 }
427
428 DeviceBuffer<float> pme_gpu_get_device_x(const gmx_pme_t *pme)
429 {
430     GMX_ASSERT((pme && pme_gpu_active(pme)), "PME GPU coordinates buffer was requested from uninitialized PME module");
431     return pme_gpu_get_kernelparam_coordinates(pme->gpu);
432 }
433
434 void *pme_gpu_get_device_f(const gmx_pme_t *pme)
435 {
436     if (!pme || !pme_gpu_active(pme))
437     {
438         return nullptr;
439     }
440     return pme_gpu_get_kernelparam_forces(pme->gpu);
441 }
442
443 void *pme_gpu_get_device_stream(const gmx_pme_t *pme)
444 {
445     if (!pme || !pme_gpu_active(pme))
446     {
447         return nullptr;
448     }
449     return pme_gpu_get_stream(pme->gpu);
450 }
451
452 GpuEventSynchronizer * pme_gpu_get_f_ready_synchronizer(const gmx_pme_t *pme)
453 {
454     if (!pme || !pme_gpu_active(pme))
455     {
456         return nullptr;
457     }
458
459     return pme_gpu_get_forces_ready_synchronizer(pme->gpu);
460 }