2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * \brief Defines the propagator element for the modular simulator
38 * \author Pascal Merz <pascal.merz@me.com>
39 * \ingroup module_modularsimulator
44 #include "propagator.h"
46 #include "gromacs/utility.h"
47 #include "gromacs/math/vec.h"
48 #include "gromacs/math/vectypes.h"
49 #include "gromacs/mdlib/gmx_omp_nthreads.h"
50 #include "gromacs/mdlib/mdatoms.h"
51 #include "gromacs/mdlib/update.h"
52 #include "gromacs/mdtypes/inputrec.h"
53 #include "gromacs/mdtypes/mdatom.h"
54 #include "gromacs/timing/wallcycle.h"
55 #include "gromacs/utility/fatalerror.h"
57 #include "modularsimulator.h"
58 #include "simulatoralgorithm.h"
59 #include "statepropagatordata.h"
64 template<NumVelocityScalingValues numVelocityScalingValues, ParrinelloRahmanVelocityScaling parrinelloRahmanVelocityScaling>
65 static void inline updateVelocities(int a,
68 const rvec* gmx_restrict invMassPerDim,
70 const rvec* gmx_restrict f,
72 const matrix matrixPR)
74 for (int d = 0; d < DIM; d++)
76 // TODO: Extract this into policy classes
77 if (numVelocityScalingValues != NumVelocityScalingValues::None
78 && parrinelloRahmanVelocityScaling == ParrinelloRahmanVelocityScaling::No)
82 if (numVelocityScalingValues != NumVelocityScalingValues::None
83 && parrinelloRahmanVelocityScaling == ParrinelloRahmanVelocityScaling::Diagonal)
85 v[a][d] *= (lambda - diagPR[d]);
87 if (numVelocityScalingValues != NumVelocityScalingValues::None
88 && parrinelloRahmanVelocityScaling == ParrinelloRahmanVelocityScaling::Full)
90 v[a][d] = lambda * v[a][d] - iprod(matrixPR[d], v[a]);
92 if (numVelocityScalingValues == NumVelocityScalingValues::None
93 && parrinelloRahmanVelocityScaling == ParrinelloRahmanVelocityScaling::Diagonal)
95 v[a][d] *= (1 - diagPR[d]);
97 if (numVelocityScalingValues == NumVelocityScalingValues::None
98 && parrinelloRahmanVelocityScaling == ParrinelloRahmanVelocityScaling::Full)
100 v[a][d] -= iprod(matrixPR[d], v[a]);
102 v[a][d] += f[a][d] * invMassPerDim[a][d] * dt;
107 static void inline updatePositions(int a,
109 const rvec* gmx_restrict x,
110 rvec* gmx_restrict xprime,
111 const rvec* gmx_restrict v)
113 for (int d = 0; d < DIM; d++)
115 xprime[a][d] = x[a][d] + v[a][d] * dt;
119 //! Propagation (position only)
121 template<NumVelocityScalingValues numVelocityScalingValues, ParrinelloRahmanVelocityScaling parrinelloRahmanVelocityScaling>
122 void Propagator<IntegrationStep::PositionsOnly>::run()
124 wallcycle_start(wcycle_, ewcUPDATE);
126 auto xp = as_rvec_array(statePropagatorData_->positionsView().paddedArrayRef().data());
127 auto x = as_rvec_array(statePropagatorData_->constPreviousPositionsView().paddedArrayRef().data());
128 auto v = as_rvec_array(statePropagatorData_->constVelocitiesView().paddedArrayRef().data());
130 int nth = gmx_omp_nthreads_get(emntUpdate);
131 int homenr = mdAtoms_->mdatoms()->homenr;
133 #pragma omp parallel for num_threads(nth) schedule(static) default(none) shared(nth, homenr, x, xp, v)
134 for (int th = 0; th < nth; th++)
138 int start_th, end_th;
139 getThreadAtomRange(nth, th, homenr, &start_th, &end_th);
141 for (int a = start_th; a < end_th; a++)
143 updatePositions(a, timestep_, x, xp, v);
146 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
148 wallcycle_stop(wcycle_, ewcUPDATE);
151 //! Propagation (velocity only)
153 template<NumVelocityScalingValues numVelocityScalingValues, ParrinelloRahmanVelocityScaling parrinelloRahmanVelocityScaling>
154 void Propagator<IntegrationStep::VelocitiesOnly>::run()
156 wallcycle_start(wcycle_, ewcUPDATE);
158 auto v = as_rvec_array(statePropagatorData_->velocitiesView().paddedArrayRef().data());
159 auto f = as_rvec_array(statePropagatorData_->constForcesView().force().data());
160 auto invMassPerDim = mdAtoms_->mdatoms()->invMassPerDim;
163 (numVelocityScalingValues == NumVelocityScalingValues::Single) ? velocityScaling_[0] : 1.0;
165 bool doDiagonalScaling = false;
166 if (parrinelloRahmanVelocityScaling != ParrinelloRahmanVelocityScaling::No)
168 // TODO: Could we know in advance whether the matrix is diagonal?
169 doDiagonalScaling = (matrixPR_[YY][XX] == 0 && matrixPR_[ZZ][XX] == 0 && matrixPR_[ZZ][YY] == 0);
170 if (doDiagonalScaling)
172 diagPR_[XX] = matrixPR_[XX][XX];
173 diagPR_[YY] = matrixPR_[YY][YY];
174 diagPR_[ZZ] = matrixPR_[ZZ][ZZ];
178 int nth = gmx_omp_nthreads_get(emntUpdate);
179 int homenr = mdAtoms_->mdatoms()->homenr;
181 // lambda could be shared, but gcc-8 & gcc-9 don't agree how to write that...
182 // https://www.gnu.org/software/gcc/gcc-9/porting_to.html -> OpenMP data sharing
183 #pragma omp parallel for num_threads(nth) schedule(static) default(none) \
184 shared(nth, homenr, v, f, invMassPerDim, doDiagonalScaling) firstprivate(lambda)
185 for (int th = 0; th < nth; th++)
189 int start_th, end_th;
190 getThreadAtomRange(nth, th, homenr, &start_th, &end_th);
192 for (int a = start_th; a < end_th; a++)
194 if (parrinelloRahmanVelocityScaling == ParrinelloRahmanVelocityScaling::No)
196 if (numVelocityScalingValues == NumVelocityScalingValues::Multiple)
198 updateVelocities<NumVelocityScalingValues::Multiple, ParrinelloRahmanVelocityScaling::No>(
199 a, timestep_, velocityScaling_[mdAtoms_->mdatoms()->cTC[a]],
200 invMassPerDim, v, f, diagPR_, matrixPR_);
204 updateVelocities<numVelocityScalingValues, ParrinelloRahmanVelocityScaling::No>(
205 a, timestep_, lambda, invMassPerDim, v, f, diagPR_, matrixPR_);
210 if (doDiagonalScaling)
212 if (numVelocityScalingValues == NumVelocityScalingValues::Multiple)
214 updateVelocities<NumVelocityScalingValues::Multiple, ParrinelloRahmanVelocityScaling::Diagonal>(
215 a, timestep_, velocityScaling_[mdAtoms_->mdatoms()->cTC[a]],
216 invMassPerDim, v, f, diagPR_, matrixPR_);
220 updateVelocities<numVelocityScalingValues, ParrinelloRahmanVelocityScaling::Diagonal>(
221 a, timestep_, lambda, invMassPerDim, v, f, diagPR_, matrixPR_);
226 if (numVelocityScalingValues == NumVelocityScalingValues::Multiple)
228 updateVelocities<NumVelocityScalingValues::Multiple, ParrinelloRahmanVelocityScaling::Full>(
229 a, timestep_, velocityScaling_[mdAtoms_->mdatoms()->cTC[a]],
230 invMassPerDim, v, f, diagPR_, matrixPR_);
234 updateVelocities<numVelocityScalingValues, ParrinelloRahmanVelocityScaling::Full>(
235 a, timestep_, lambda, invMassPerDim, v, f, diagPR_, matrixPR_);
241 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
243 wallcycle_stop(wcycle_, ewcUPDATE);
246 //! Propagation (leapfrog case - position and velocity)
248 template<NumVelocityScalingValues numVelocityScalingValues, ParrinelloRahmanVelocityScaling parrinelloRahmanVelocityScaling>
249 void Propagator<IntegrationStep::LeapFrog>::run()
251 wallcycle_start(wcycle_, ewcUPDATE);
253 auto xp = as_rvec_array(statePropagatorData_->positionsView().paddedArrayRef().data());
254 auto x = as_rvec_array(statePropagatorData_->constPreviousPositionsView().paddedArrayRef().data());
255 auto v = as_rvec_array(statePropagatorData_->velocitiesView().paddedArrayRef().data());
256 auto f = as_rvec_array(statePropagatorData_->constForcesView().force().data());
257 auto invMassPerDim = mdAtoms_->mdatoms()->invMassPerDim;
260 (numVelocityScalingValues == NumVelocityScalingValues::Single) ? velocityScaling_[0] : 1.0;
262 bool doDiagonalScaling = false;
263 if (parrinelloRahmanVelocityScaling != ParrinelloRahmanVelocityScaling::No)
265 // TODO: Could we know in advance whether the matrix is diagonal?
266 doDiagonalScaling = (matrixPR_[YY][XX] == 0 && matrixPR_[ZZ][XX] == 0 && matrixPR_[ZZ][YY] == 0);
267 if (doDiagonalScaling)
269 diagPR_[XX] = matrixPR_[XX][XX];
270 diagPR_[YY] = matrixPR_[YY][YY];
271 diagPR_[ZZ] = matrixPR_[ZZ][ZZ];
275 int nth = gmx_omp_nthreads_get(emntUpdate);
276 int homenr = mdAtoms_->mdatoms()->homenr;
278 // lambda could be shared, but gcc-8 & gcc-9 don't agree how to write that...
279 // https://www.gnu.org/software/gcc/gcc-9/porting_to.html -> OpenMP data sharing
280 #pragma omp parallel for num_threads(nth) schedule(static) default(none) \
281 shared(nth, homenr, x, xp, v, f, invMassPerDim, doDiagonalScaling) firstprivate(lambda)
282 for (int th = 0; th < nth; th++)
286 int start_th, end_th;
287 getThreadAtomRange(nth, th, homenr, &start_th, &end_th);
289 for (int a = start_th; a < end_th; a++)
291 if (parrinelloRahmanVelocityScaling == ParrinelloRahmanVelocityScaling::No)
293 if (numVelocityScalingValues == NumVelocityScalingValues::Multiple)
295 updateVelocities<NumVelocityScalingValues::Multiple, ParrinelloRahmanVelocityScaling::No>(
296 a, timestep_, velocityScaling_[mdAtoms_->mdatoms()->cTC[a]],
297 invMassPerDim, v, f, diagPR_, matrixPR_);
301 updateVelocities<numVelocityScalingValues, ParrinelloRahmanVelocityScaling::No>(
302 a, timestep_, lambda, invMassPerDim, v, f, diagPR_, matrixPR_);
307 if (doDiagonalScaling)
309 if (numVelocityScalingValues == NumVelocityScalingValues::Multiple)
311 updateVelocities<NumVelocityScalingValues::Multiple, ParrinelloRahmanVelocityScaling::Diagonal>(
312 a, timestep_, velocityScaling_[mdAtoms_->mdatoms()->cTC[a]],
313 invMassPerDim, v, f, diagPR_, matrixPR_);
317 updateVelocities<numVelocityScalingValues, ParrinelloRahmanVelocityScaling::Diagonal>(
318 a, timestep_, lambda, invMassPerDim, v, f, diagPR_, matrixPR_);
323 if (numVelocityScalingValues == NumVelocityScalingValues::Multiple)
325 updateVelocities<NumVelocityScalingValues::Multiple, ParrinelloRahmanVelocityScaling::Full>(
326 a, timestep_, velocityScaling_[mdAtoms_->mdatoms()->cTC[a]],
327 invMassPerDim, v, f, diagPR_, matrixPR_);
331 updateVelocities<numVelocityScalingValues, ParrinelloRahmanVelocityScaling::Full>(
332 a, timestep_, lambda, invMassPerDim, v, f, diagPR_, matrixPR_);
336 updatePositions(a, timestep_, x, xp, v);
339 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
341 wallcycle_stop(wcycle_, ewcUPDATE);
344 //! Propagation (velocity verlet stage 2 - velocity and position)
346 template<NumVelocityScalingValues numVelocityScalingValues, ParrinelloRahmanVelocityScaling parrinelloRahmanVelocityScaling>
347 void Propagator<IntegrationStep::VelocityVerletPositionsAndVelocities>::run()
349 wallcycle_start(wcycle_, ewcUPDATE);
351 auto xp = as_rvec_array(statePropagatorData_->positionsView().paddedArrayRef().data());
352 auto x = as_rvec_array(statePropagatorData_->constPreviousPositionsView().paddedArrayRef().data());
353 auto v = as_rvec_array(statePropagatorData_->velocitiesView().paddedArrayRef().data());
354 auto f = as_rvec_array(statePropagatorData_->constForcesView().force().data());
355 auto invMassPerDim = mdAtoms_->mdatoms()->invMassPerDim;
357 int nth = gmx_omp_nthreads_get(emntUpdate);
358 int homenr = mdAtoms_->mdatoms()->homenr;
361 (numVelocityScalingValues == NumVelocityScalingValues::Single) ? velocityScaling_[0] : 1.0;
363 bool doDiagonalScaling = false;
364 if (parrinelloRahmanVelocityScaling != ParrinelloRahmanVelocityScaling::No)
366 // TODO: Could we know in advance whether the matrix is diagonal?
367 doDiagonalScaling = (matrixPR_[YY][XX] == 0 && matrixPR_[ZZ][XX] == 0 && matrixPR_[ZZ][YY] == 0);
368 if (doDiagonalScaling)
370 diagPR_[XX] = matrixPR_[XX][XX];
371 diagPR_[YY] = matrixPR_[YY][YY];
372 diagPR_[ZZ] = matrixPR_[ZZ][ZZ];
376 // lambda could be shared, but gcc-8 & gcc-9 don't agree how to write that...
377 // https://www.gnu.org/software/gcc/gcc-9/porting_to.html -> OpenMP data sharing
378 #pragma omp parallel for num_threads(nth) schedule(static) default(none) \
379 shared(nth, homenr, x, xp, v, f, invMassPerDim, doDiagonalScaling) firstprivate(lambda)
380 for (int th = 0; th < nth; th++)
384 int start_th, end_th;
385 getThreadAtomRange(nth, th, homenr, &start_th, &end_th);
387 for (int a = start_th; a < end_th; a++)
389 if (parrinelloRahmanVelocityScaling == ParrinelloRahmanVelocityScaling::No)
391 if (numVelocityScalingValues == NumVelocityScalingValues::Multiple)
393 updateVelocities<NumVelocityScalingValues::Multiple, ParrinelloRahmanVelocityScaling::No>(
394 a, 0.5 * timestep_, velocityScaling_[mdAtoms_->mdatoms()->cTC[a]],
395 invMassPerDim, v, f, diagPR_, matrixPR_);
399 updateVelocities<numVelocityScalingValues, ParrinelloRahmanVelocityScaling::No>(
400 a, 0.5 * timestep_, lambda, invMassPerDim, v, f, diagPR_, matrixPR_);
405 if (doDiagonalScaling)
407 if (numVelocityScalingValues == NumVelocityScalingValues::Multiple)
409 updateVelocities<NumVelocityScalingValues::Multiple, ParrinelloRahmanVelocityScaling::Diagonal>(
410 a, 0.5 * timestep_, velocityScaling_[mdAtoms_->mdatoms()->cTC[a]],
411 invMassPerDim, v, f, diagPR_, matrixPR_);
415 updateVelocities<numVelocityScalingValues, ParrinelloRahmanVelocityScaling::Diagonal>(
416 a, 0.5 * timestep_, lambda, invMassPerDim, v, f, diagPR_, matrixPR_);
421 if (numVelocityScalingValues == NumVelocityScalingValues::Multiple)
423 updateVelocities<NumVelocityScalingValues::Multiple, ParrinelloRahmanVelocityScaling::Full>(
424 a, 0.5 * timestep_, velocityScaling_[mdAtoms_->mdatoms()->cTC[a]],
425 invMassPerDim, v, f, diagPR_, matrixPR_);
429 updateVelocities<numVelocityScalingValues, ParrinelloRahmanVelocityScaling::Full>(
430 a, 0.5 * timestep_, lambda, invMassPerDim, v, f, diagPR_, matrixPR_);
434 updatePositions(a, timestep_, x, xp, v);
437 GMX_CATCH_ALL_AND_EXIT_WITH_FATAL_ERROR
439 wallcycle_stop(wcycle_, ewcUPDATE);
442 template<IntegrationStep algorithm>
443 Propagator<algorithm>::Propagator(double timestep,
444 StatePropagatorData* statePropagatorData,
445 const MDAtoms* mdAtoms,
446 gmx_wallcycle* wcycle) :
448 statePropagatorData_(statePropagatorData),
449 doSingleVelocityScaling_(false),
450 doGroupVelocityScaling_(false),
451 scalingStepVelocity_(-1),
460 template<IntegrationStep algorithm>
461 void Propagator<algorithm>::scheduleTask(Step gmx_unused step,
462 Time gmx_unused time,
463 const RegisterRunFunction& registerRunFunction)
465 const bool doSingleVScalingThisStep = (doSingleVelocityScaling_ && (step == scalingStepVelocity_));
466 const bool doGroupVScalingThisStep = (doGroupVelocityScaling_ && (step == scalingStepVelocity_));
468 const bool doParrinelloRahmanThisStep = (step == scalingStepPR_);
470 if (doSingleVScalingThisStep)
472 if (doParrinelloRahmanThisStep)
474 registerRunFunction([this]() {
475 run<NumVelocityScalingValues::Single, ParrinelloRahmanVelocityScaling::Full>();
480 registerRunFunction([this]() {
481 run<NumVelocityScalingValues::Single, ParrinelloRahmanVelocityScaling::No>();
485 else if (doGroupVScalingThisStep)
487 if (doParrinelloRahmanThisStep)
489 registerRunFunction([this]() {
490 run<NumVelocityScalingValues::Multiple, ParrinelloRahmanVelocityScaling::Full>();
495 registerRunFunction([this]() {
496 run<NumVelocityScalingValues::Multiple, ParrinelloRahmanVelocityScaling::No>();
502 if (doParrinelloRahmanThisStep)
504 registerRunFunction([this]() {
505 run<NumVelocityScalingValues::None, ParrinelloRahmanVelocityScaling::Full>();
510 registerRunFunction([this]() {
511 run<NumVelocityScalingValues::None, ParrinelloRahmanVelocityScaling::No>();
517 template<IntegrationStep algorithm>
518 void Propagator<algorithm>::setNumVelocityScalingVariables(int numVelocityScalingVariables)
520 if (algorithm == IntegrationStep::PositionsOnly)
522 gmx_fatal(FARGS, "Velocity scaling not implemented for IntegrationStep::PositionsOnly.");
524 GMX_ASSERT(velocityScaling_.empty(),
525 "Number of velocity scaling variables cannot be changed once set.");
527 velocityScaling_.resize(numVelocityScalingVariables, 1.);
528 doSingleVelocityScaling_ = numVelocityScalingVariables == 1;
529 doGroupVelocityScaling_ = numVelocityScalingVariables > 1;
532 template<IntegrationStep algorithm>
533 ArrayRef<real> Propagator<algorithm>::viewOnVelocityScaling()
535 if (algorithm == IntegrationStep::PositionsOnly)
537 gmx_fatal(FARGS, "Velocity scaling not implemented for IntegrationStep::PositionsOnly.");
539 GMX_ASSERT(!velocityScaling_.empty(), "Number of velocity scaling variables not set.");
541 return velocityScaling_;
544 template<IntegrationStep algorithm>
545 PropagatorCallback Propagator<algorithm>::velocityScalingCallback()
547 if (algorithm == IntegrationStep::PositionsOnly)
549 gmx_fatal(FARGS, "Velocity scaling not implemented for IntegrationStep::PositionsOnly.");
552 return [this](Step step) { scalingStepVelocity_ = step; };
555 template<IntegrationStep algorithm>
556 ArrayRef<rvec> Propagator<algorithm>::viewOnPRScalingMatrix()
559 algorithm != IntegrationStep::PositionsOnly,
560 "Parrinello-Rahman scaling not implemented for IntegrationStep::PositionsOnly.");
562 clear_mat(matrixPR_);
563 // gcc-5 needs this to be explicit (all other tested compilers would be ok
564 // with simply returning matrixPR)
565 return ArrayRef<rvec>(matrixPR_);
568 template<IntegrationStep algorithm>
569 PropagatorCallback Propagator<algorithm>::prScalingCallback()
572 algorithm != IntegrationStep::PositionsOnly,
573 "Parrinello-Rahman scaling not implemented for IntegrationStep::PositionsOnly.");
575 return [this](Step step) { scalingStepPR_ = step; };
578 template<IntegrationStep algorithm>
579 ISimulatorElement* Propagator<algorithm>::getElementPointerImpl(
580 LegacySimulatorData* legacySimulatorData,
581 ModularSimulatorAlgorithmBuilderHelper* builderHelper,
582 StatePropagatorData* statePropagatorData,
583 EnergyData gmx_unused* energyData,
584 FreeEnergyPerturbationData gmx_unused* freeEnergyPerturbationData,
585 GlobalCommunicationHelper gmx_unused* globalCommunicationHelper,
587 RegisterWithThermostat registerWithThermostat,
588 RegisterWithBarostat registerWithBarostat)
590 auto* element = builderHelper->storeElement(std::make_unique<Propagator<algorithm>>(
591 timestep, statePropagatorData, legacySimulatorData->mdAtoms, legacySimulatorData->wcycle));
592 if (registerWithThermostat == RegisterWithThermostat::True)
594 auto* propagator = static_cast<Propagator<algorithm>*>(element);
595 builderHelper->registerWithThermostat(
596 { [propagator](int num) { propagator->setNumVelocityScalingVariables(num); },
597 [propagator]() { return propagator->viewOnVelocityScaling(); },
598 [propagator]() { return propagator->velocityScalingCallback(); } });
600 if (registerWithBarostat == RegisterWithBarostat::True)
602 auto* propagator = static_cast<Propagator<algorithm>*>(element);
603 builderHelper->registerWithBarostat(
604 { [propagator]() { return propagator->viewOnPRScalingMatrix(); },
605 [propagator]() { return propagator->prScalingCallback(); } });
610 // Explicit template initializations
611 template class Propagator<IntegrationStep::PositionsOnly>;
612 template class Propagator<IntegrationStep::VelocitiesOnly>;
613 template class Propagator<IntegrationStep::LeapFrog>;
614 template class Propagator<IntegrationStep::VelocityVerletPositionsAndVelocities>;