40c653bb83ec4646870d0e5e68f07d4e79b74e52
[alexxy/gromacs.git] / src / gromacs / ewald / pme-gpu.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2016,2017,2018, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35
36 /*! \internal \file
37  * \brief Implements high-level PME GPU functions which do not require GPU framework-specific code.
38  *
39  * \author Aleksei Iupinov <a.yupinov@gmail.com>
40  * \ingroup module_ewald
41  */
42
43 #include "gmxpre.h"
44
45 #include "config.h"
46
47 #include <list>
48
49 #include "gromacs/ewald/ewald-utils.h"
50 #include "gromacs/ewald/pme.h"
51 #include "gromacs/fft/parallel_3dfft.h"
52 #include "gromacs/math/invertmatrix.h"
53 #include "gromacs/mdlib/gmx_omp_nthreads.h"
54 #include "gromacs/mdtypes/enerdata.h"
55 #include "gromacs/mdtypes/forceoutput.h"
56 #include "gromacs/mdtypes/inputrec.h"
57 #include "gromacs/utility/exceptions.h"
58 #include "gromacs/utility/fatalerror.h"
59 #include "gromacs/utility/gmxassert.h"
60 #include "gromacs/utility/stringutil.h"
61
62 #include "pme-gpu-internal.h"
63 #include "pme-grid.h"
64 #include "pme-internal.h"
65 #include "pme-solve.h"
66
67 void pme_gpu_reset_timings(const gmx_pme_t *pme)
68 {
69     if (pme_gpu_active(pme))
70     {
71         pme_gpu_reset_timings(pme->gpu);
72     }
73 }
74
75 void pme_gpu_get_timings(const gmx_pme_t *pme, gmx_wallclock_gpu_pme_t *timings)
76 {
77     if (pme_gpu_active(pme))
78     {
79         pme_gpu_get_timings(pme->gpu, timings);
80     }
81 }
82
83 /*! \brief
84  * A convenience wrapper for launching either the GPU or CPU FFT.
85  *
86  * \param[in] pme            The PME structure.
87  * \param[in] gridIndex      The grid index - should currently always be 0.
88  * \param[in] dir            The FFT direction enum.
89  * \param[in] wcycle         The wallclock counter.
90  */
91 void inline parallel_3dfft_execute_gpu_wrapper(gmx_pme_t              *pme,
92                                                const int               gridIndex,
93                                                enum gmx_fft_direction  dir,
94                                                gmx_wallcycle_t         wcycle)
95 {
96     GMX_ASSERT(gridIndex == 0, "Only single grid supported");
97     if (pme_gpu_performs_FFT(pme->gpu))
98     {
99         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
100         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
101         pme_gpu_3dfft(pme->gpu, dir, gridIndex);
102         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
103         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
104     }
105     else
106     {
107         wallcycle_start(wcycle, ewcPME_FFT_MIXED_MODE);
108 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
109         for (int thread = 0; thread < pme->nthread; thread++)
110         {
111             gmx_parallel_3dfft_execute(pme->pfft_setup[gridIndex], dir, thread, wcycle);
112         }
113         wallcycle_stop(wcycle, ewcPME_FFT_MIXED_MODE);
114     }
115 }
116
117 /* The PME computation code split into a few separate functions. */
118
119 void pme_gpu_prepare_computation(gmx_pme_t            *pme,
120                                  bool                  needToUpdateBox,
121                                  const matrix          box,
122                                  gmx_wallcycle        *wcycle,
123                                  int                   flags)
124 {
125     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
126     GMX_ASSERT(pme->nnodes > 0, "");
127     GMX_ASSERT(pme->nnodes == 1 || pme->ndecompdim > 0, "");
128
129     PmeGpu *pmeGpu = pme->gpu;
130     pmeGpu->settings.currentFlags = flags;
131     // TODO these flags are only here to honor the CPU PME code, and probably should be removed
132
133     bool shouldUpdateBox = false;
134     for (int i = 0; i < DIM; ++i)
135     {
136         for (int j = 0; j <= i; ++j)
137         {
138             shouldUpdateBox                  |= (pmeGpu->common->previousBox[i][j] != box[i][j]);
139             pmeGpu->common->previousBox[i][j] = box[i][j];
140         }
141     }
142
143     if (needToUpdateBox || shouldUpdateBox) // || is to make the first computation always update
144     {
145         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
146         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
147         pme_gpu_update_input_box(pmeGpu, box);
148         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
149         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
150
151         if (!pme_gpu_performs_solve(pmeGpu))
152         {
153             // TODO remove code duplication and add test coverage
154             matrix scaledBox;
155             pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
156             gmx::invertBoxMatrix(scaledBox, pme->recipbox);
157             pme->boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
158         }
159     }
160 }
161
162
163 void pme_gpu_launch_spread(gmx_pme_t            *pme,
164                            const rvec           *x,
165                            gmx_wallcycle        *wcycle)
166 {
167     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
168
169     PmeGpu *pmeGpu = pme->gpu;
170
171     // The only spot of PME GPU where LAUNCH_GPU counter increases call-count
172     wallcycle_start(wcycle, ewcLAUNCH_GPU);
173     // The only spot of PME GPU where ewcsLAUNCH_GPU_PME subcounter increases call-count
174     wallcycle_sub_start(wcycle, ewcsLAUNCH_GPU_PME);
175     pme_gpu_copy_input_coordinates(pmeGpu, x);
176     wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
177     wallcycle_stop(wcycle, ewcLAUNCH_GPU);
178
179     const unsigned int gridIndex  = 0;
180     real              *fftgrid    = pme->fftgrid[gridIndex];
181     if (pmeGpu->settings.currentFlags & GMX_PME_SPREAD)
182     {
183         /* Spread the coefficients on a grid */
184         const bool computeSplines = true;
185         const bool spreadCharges  = true;
186         wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
187         wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
188         pme_gpu_spread(pmeGpu, gridIndex, fftgrid, computeSplines, spreadCharges);
189         wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
190         wallcycle_stop(wcycle, ewcLAUNCH_GPU);
191     }
192 }
193
194 void pme_gpu_launch_complex_transforms(gmx_pme_t      *pme,
195                                        gmx_wallcycle  *wcycle)
196 {
197     PmeGpu            *pmeGpu                 = pme->gpu;
198     const bool         computeEnergyAndVirial = (pmeGpu->settings.currentFlags & GMX_PME_CALC_ENER_VIR) != 0;
199     const bool         performBackFFT         = (pmeGpu->settings.currentFlags & (GMX_PME_CALC_F | GMX_PME_CALC_POT)) != 0;
200     const unsigned int gridIndex              = 0;
201     t_complex         *cfftgrid               = pme->cfftgrid[gridIndex];
202
203     if (pmeGpu->settings.currentFlags & GMX_PME_SPREAD)
204     {
205         if (!pme_gpu_performs_FFT(pmeGpu))
206         {
207             wallcycle_start(wcycle, ewcWAIT_GPU_PME_SPREAD);
208             pme_gpu_sync_spread_grid(pme->gpu);
209             wallcycle_stop(wcycle, ewcWAIT_GPU_PME_SPREAD);
210         }
211     }
212
213     try
214     {
215         if (pmeGpu->settings.currentFlags & GMX_PME_SOLVE)
216         {
217             /* do R2C 3D-FFT */
218             parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_REAL_TO_COMPLEX, wcycle);
219
220             /* solve in k-space for our local cells */
221             if (pme_gpu_performs_solve(pmeGpu))
222             {
223                 const auto gridOrdering = pme_gpu_uses_dd(pmeGpu) ? GridOrdering::YZX : GridOrdering::XYZ;
224                 wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
225                 wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
226                 pme_gpu_solve(pmeGpu, cfftgrid, gridOrdering, computeEnergyAndVirial);
227                 wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
228                 wallcycle_stop(wcycle, ewcLAUNCH_GPU);
229             }
230             else
231             {
232                 wallcycle_start(wcycle, ewcPME_SOLVE_MIXED_MODE);
233 #pragma omp parallel for num_threads(pme->nthread) schedule(static)
234                 for (int thread = 0; thread < pme->nthread; thread++)
235                 {
236                     solve_pme_yzx(pme, cfftgrid, pme->boxVolume,
237                                   computeEnergyAndVirial, pme->nthread, thread);
238                 }
239                 wallcycle_stop(wcycle, ewcPME_SOLVE_MIXED_MODE);
240             }
241         }
242
243         if (performBackFFT)
244         {
245             parallel_3dfft_execute_gpu_wrapper(pme, gridIndex, GMX_FFT_COMPLEX_TO_REAL, wcycle);
246         }
247     } GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR;
248 }
249
250 void pme_gpu_launch_gather(const gmx_pme_t                 *pme,
251                            gmx_wallcycle gmx_unused        *wcycle,
252                            PmeForceOutputHandling           forceTreatment)
253 {
254     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
255
256     if (!pme_gpu_performs_gather(pme->gpu))
257     {
258         return;
259     }
260
261     wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
262     wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
263     const unsigned int gridIndex  = 0;
264     real              *fftgrid    = pme->fftgrid[gridIndex];
265     pme_gpu_gather(pme->gpu, forceTreatment, reinterpret_cast<float *>(fftgrid));
266     wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
267     wallcycle_stop(wcycle, ewcLAUNCH_GPU);
268 }
269
270 //! Accumulate the \c forcesToAdd to \c f, using the available threads.
271 static void sum_forces(gmx::ArrayRef<gmx::RVec>       f,
272                        gmx::ArrayRef<const gmx::RVec> forceToAdd)
273 {
274     const int      end = forceToAdd.size();
275
276     int gmx_unused nt = gmx_omp_nthreads_get(emntPME);
277 #pragma omp parallel for num_threads(nt) schedule(static)
278     for (int i = 0; i < end; i++)
279     {
280         f[i] += forceToAdd[i];
281     }
282 }
283
284 //! Reduce quantities from \c output to \c forceWithVirial and \c enerd.
285 static void pme_gpu_reduce_outputs(const int             flags,
286                                    const PmeOutput      &output,
287                                    gmx_wallcycle        *wcycle,
288                                    gmx::ForceWithVirial *forceWithVirial,
289                                    gmx_enerdata_t       *enerd)
290 {
291     wallcycle_start(wcycle, ewcPME_GPU_F_REDUCTION);
292     GMX_ASSERT(forceWithVirial, "Invalid force pointer");
293     const bool haveComputedEnergyAndVirial = (flags & GMX_PME_CALC_ENER_VIR) != 0;
294     if (haveComputedEnergyAndVirial)
295     {
296         GMX_ASSERT(enerd, "Invalid energy output manager");
297         forceWithVirial->addVirialContribution(output.coulombVirial_);
298         enerd->term[F_COUL_RECIP] += output.coulombEnergy_;
299     }
300     sum_forces(forceWithVirial->force_, output.forces_);
301     wallcycle_stop(wcycle, ewcPME_GPU_F_REDUCTION);
302 }
303
304 bool pme_gpu_try_finish_task(gmx_pme_t            *pme,
305                              const int             flags,
306                              gmx_wallcycle        *wcycle,
307                              gmx::ForceWithVirial *forceWithVirial,
308                              gmx_enerdata_t       *enerd,
309                              GpuTaskCompletion     completionKind)
310 {
311     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
312
313     // First, if possible, check whether all tasks on the stream have
314     // completed, and return fast if not. Accumulate to wcycle the
315     // time needed for that checking, but do not yet record that the
316     // gather has occured.
317     bool           needToSynchronize      = true;
318     constexpr bool c_streamQuerySupported = (GMX_GPU == GMX_GPU_CUDA);
319     // TODO: implement c_streamQuerySupported with an additional GpuEventSynchronizer per stream (#2521)
320     if ((completionKind == GpuTaskCompletion::Check) && c_streamQuerySupported)
321     {
322         wallcycle_start_nocount(wcycle, ewcWAIT_GPU_PME_GATHER);
323         // Query the PME stream for completion of all tasks enqueued and
324         // if we're not done, stop the timer before early return.
325         const bool pmeGpuDone = pme_gpu_stream_query(pme->gpu);
326         wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
327
328         if (!pmeGpuDone)
329         {
330             return false;
331         }
332         needToSynchronize = false;
333     }
334
335     wallcycle_start(wcycle, ewcWAIT_GPU_PME_GATHER);
336     // If the above check passed, then there is no need to make an
337     // explicit synchronization call.
338     if (needToSynchronize)
339     {
340         // Synchronize the whole PME stream at once, including D2H result transfers.
341         pme_gpu_synchronize(pme->gpu);
342     }
343     pme_gpu_update_timings(pme->gpu);
344     PmeOutput output = pme_gpu_getOutput(*pme, flags);
345     wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
346
347     pme_gpu_reduce_outputs(flags, output, wcycle, forceWithVirial, enerd);
348
349     return true;
350 }
351
352 // This is used by PME-only ranks
353 PmeOutput pme_gpu_wait_finish_task(gmx_pme_t     *pme,
354                                    const int      flags,
355                                    gmx_wallcycle *wcycle)
356 {
357     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
358
359     wallcycle_start(wcycle, ewcWAIT_GPU_PME_GATHER);
360     // Synchronize the whole PME stream at once, including D2H result transfers.
361     pme_gpu_synchronize(pme->gpu);
362
363     pme_gpu_update_timings(pme->gpu);
364     PmeOutput output = pme_gpu_getOutput(*pme, flags);
365     wallcycle_stop(wcycle, ewcWAIT_GPU_PME_GATHER);
366     return output;
367 }
368
369 // This is used when not using the alternate-waiting reduction
370 void pme_gpu_wait_and_reduce(gmx_pme_t            *pme,
371                              const int             flags,
372                              gmx_wallcycle        *wcycle,
373                              gmx::ForceWithVirial *forceWithVirial,
374                              gmx_enerdata_t       *enerd)
375 {
376     PmeOutput output = pme_gpu_wait_finish_task(pme, flags, wcycle);
377     pme_gpu_reduce_outputs(flags, output, wcycle, forceWithVirial, enerd);
378 }
379
380 void pme_gpu_reinit_computation(const gmx_pme_t *pme,
381                                 gmx_wallcycle   *wcycle)
382 {
383     GMX_ASSERT(pme_gpu_active(pme), "This should be a GPU run of PME but it is not enabled.");
384
385     wallcycle_start_nocount(wcycle, ewcLAUNCH_GPU);
386     wallcycle_sub_start_nocount(wcycle, ewcsLAUNCH_GPU_PME);
387
388     pme_gpu_clear_grids(pme->gpu);
389     pme_gpu_clear_energy_virial(pme->gpu);
390
391     wallcycle_sub_stop(wcycle, ewcsLAUNCH_GPU_PME);
392     wallcycle_stop(wcycle, ewcLAUNCH_GPU);
393 }