2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team,
6 * check out http://www.gromacs.org for more information.
7 * Copyright (c) 2012,2013, by the GROMACS development team, led by
8 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
9 * others, as listed in the AUTHORS file in the top-level source
10 * directory and at http://www.gromacs.org.
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
46 #include "nonbonded.h"
47 #include "nb_kernel.h"
49 #include "nb_free_energy.h"
52 gmx_nb_free_energy_kernel(const t_nblist * gmx_restrict nlist,
53 rvec * gmx_restrict xx,
54 rvec * gmx_restrict ff,
55 t_forcerec * gmx_restrict fr,
56 const t_mdatoms * gmx_restrict mdatoms,
57 nb_kernel_data_t * gmx_restrict kernel_data,
58 t_nrnb * gmx_restrict nrnb)
64 int i, j, n, ii, is3, ii3, k, nj0, nj1, jnr, j3, ggid;
66 real Fscal, FscalC[NSTATES], FscalV[NSTATES], tx, ty, tz;
67 real Vcoul[NSTATES], Vvdw[NSTATES];
68 real rinv6, r, rt, rtC, rtV;
70 real qq[NSTATES], vctot, krsq;
71 int ntiA, ntiB, tj[NSTATES];
72 real Vvdw6, Vvdw12, vvtot;
73 real ix, iy, iz, fix, fiy, fiz;
74 real dx, dy, dz, rsq, rinv;
75 real c6[NSTATES], c12[NSTATES];
76 real LFC[NSTATES], LFV[NSTATES], DLF[NSTATES];
77 double dvdl_coul, dvdl_vdw;
78 real lfac_coul[NSTATES], dlfac_coul[NSTATES], lfac_vdw[NSTATES], dlfac_vdw[NSTATES];
79 real sigma6[NSTATES], alpha_vdw_eff, alpha_coul_eff, sigma2_def, sigma2_min;
80 real rp, rpm2, rC, rV, rinvC, rpinvC, rinvV, rpinvV;
81 real sigma2[NSTATES], sigma_pow[NSTATES], sigma_powm2[NSTATES], rs, rs2;
82 int do_tab, tab_elemsize;
83 int n0, n1C, n1V, nnn;
84 real Y, F, G, H, Fp, Geps, Heps2, epsC, eps2C, epsV, eps2V, VV, FF;
95 const real * shiftvec;
102 real facel, krf, crf;
103 const real * chargeA;
104 const real * chargeB;
105 real sigma6_min, sigma6_def, lam_power, sc_power, sc_r_power;
106 real alpha_coul, alpha_vdw, lambda_coul, lambda_vdw, ewc;
112 real rcoulomb, rvdw, sh_invrc6;
113 gmx_bool bExactElecCutoff, bExactVdwCutoff;
114 real rcutoff, rcutoff2, rswitch, d, d2, swV3, swV4, swV5, swF2, swF3, swF4, sw, dsw, rinvcorr;
115 const real * tab_ewald_F;
116 const real * tab_ewald_V;
117 real tab_ewald_scale, tab_ewald_halfsp;
122 fshift = fr->fshift[0];
126 jindex = nlist->jindex;
128 icoul = nlist->ielec;
130 shift = nlist->shift;
133 shiftvec = fr->shift_vec[0];
134 chargeA = mdatoms->chargeA;
135 chargeB = mdatoms->chargeB;
139 ewc = fr->ewaldcoeff;
140 Vc = kernel_data->energygrp_elec;
141 typeA = mdatoms->typeA;
142 typeB = mdatoms->typeB;
145 Vv = kernel_data->energygrp_vdw;
146 tabscale = kernel_data->table_elec_vdw->scale;
147 VFtab = kernel_data->table_elec_vdw->data;
148 lambda_coul = kernel_data->lambda[efptCOUL];
149 lambda_vdw = kernel_data->lambda[efptVDW];
150 dvdl = kernel_data->dvdl;
151 alpha_coul = fr->sc_alphacoul;
152 alpha_vdw = fr->sc_alphavdw;
153 lam_power = fr->sc_power;
154 sc_r_power = fr->sc_r_power;
155 sigma6_def = fr->sc_sigma6_def;
156 sigma6_min = fr->sc_sigma6_min;
157 bDoForces = kernel_data->flags & GMX_NONBONDED_DO_FORCE;
159 rcoulomb = fr->rcoulomb;
161 sh_invrc6 = fr->ic->sh_invrc6;
163 /* Ewald (PME) reciprocal force and energy quadratic spline tables */
164 tab_ewald_F = fr->ic->tabq_coul_F;
165 tab_ewald_V = fr->ic->tabq_coul_V;
166 tab_ewald_scale = fr->ic->tabq_scale;
167 tab_ewald_halfsp = 0.5/tab_ewald_scale;
169 if (fr->coulomb_modifier == eintmodPOTSWITCH || fr->vdw_modifier == eintmodPOTSWITCH)
171 rcutoff = (fr->coulomb_modifier == eintmodPOTSWITCH) ? fr->rcoulomb : fr->rvdw;
172 rcutoff2 = rcutoff*rcutoff;
173 rswitch = (fr->coulomb_modifier == eintmodPOTSWITCH) ? fr->rcoulomb_switch : fr->rvdw_switch;
175 swV3 = -10.0/(d*d*d);
176 swV4 = 15.0/(d*d*d*d);
177 swV5 = -6.0/(d*d*d*d*d);
178 swF2 = -30.0/(d*d*d);
179 swF3 = 60.0/(d*d*d*d);
180 swF4 = -30.0/(d*d*d*d*d);
184 /* Stupid compilers dont realize these variables will not be used */
194 bExactElecCutoff = (fr->coulomb_modifier != eintmodNONE) || fr->eeltype == eelRF_ZERO;
195 bExactVdwCutoff = (fr->vdw_modifier != eintmodNONE);
197 /* fix compiler warnings */
206 /* Lambda factor for state A, 1-lambda*/
207 LFC[STATE_A] = 1.0 - lambda_coul;
208 LFV[STATE_A] = 1.0 - lambda_vdw;
210 /* Lambda factor for state B, lambda*/
211 LFC[STATE_B] = lambda_coul;
212 LFV[STATE_B] = lambda_vdw;
214 /*derivative of the lambda factor for state A and B */
218 for (i = 0; i < NSTATES; i++)
220 lfac_coul[i] = (lam_power == 2 ? (1-LFC[i])*(1-LFC[i]) : (1-LFC[i]));
221 dlfac_coul[i] = DLF[i]*lam_power/sc_r_power*(lam_power == 2 ? (1-LFC[i]) : 1);
222 lfac_vdw[i] = (lam_power == 2 ? (1-LFV[i])*(1-LFV[i]) : (1-LFV[i]));
223 dlfac_vdw[i] = DLF[i]*lam_power/sc_r_power*(lam_power == 2 ? (1-LFV[i]) : 1);
226 sigma2_def = pow(sigma6_def, 1.0/3.0);
227 sigma2_min = pow(sigma6_min, 1.0/3.0);
229 /* Ewald (not PME) table is special (icoul==enbcoulFEWALD) */
231 do_tab = (icoul == GMX_NBKERNEL_ELEC_CUBICSPLINETABLE ||
232 ivdw == GMX_NBKERNEL_VDW_CUBICSPLINETABLE);
234 /* we always use the combined table here */
237 for (n = 0; (n < nri); n++)
241 shY = shiftvec[is3+1];
242 shZ = shiftvec[is3+2];
250 iqA = facel*chargeA[ii];
251 iqB = facel*chargeB[ii];
252 ntiA = 2*ntype*typeA[ii];
253 ntiB = 2*ntype*typeB[ii];
260 for (k = nj0; (k < nj1); k++)
267 rsq = dx*dx+dy*dy+dz*dz;
268 rinv = gmx_invsqrt(rsq);
270 if (sc_r_power == 6.0)
272 rpm2 = rsq*rsq; /* r4 */
273 rp = rpm2*rsq; /* r6 */
275 else if (sc_r_power == 48.0)
277 rp = rsq*rsq*rsq; /* r6 */
278 rp = rp*rp; /* r12 */
279 rp = rp*rp; /* r24 */
280 rp = rp*rp; /* r48 */
281 rpm2 = rp/rsq; /* r46 */
285 rp = pow(r, sc_r_power); /* not currently supported as input, but can handle it */
289 tj[STATE_A] = ntiA+2*typeA[jnr];
290 tj[STATE_B] = ntiB+2*typeB[jnr];
291 qq[STATE_A] = iqA*chargeA[jnr];
292 qq[STATE_B] = iqB*chargeB[jnr];
294 for (i = 0; i < NSTATES; i++)
298 c12[i] = nbfp[tj[i]+1];
299 if ((c6[i] > 0) && (c12[i] > 0))
301 /* c12 is stored scaled with 12.0 and c6 is scaled with 6.0 - correct for this */
302 sigma6[i] = 0.5*c12[i]/c6[i];
303 sigma2[i] = pow(sigma6[i], 1.0/3.0);
304 /* should be able to get rid of this ^^^ internal pow call eventually. Will require agreement on
305 what data to store externally. Can't be fixed without larger scale changes, so not 4.6 */
306 if (sigma6[i] < sigma6_min) /* for disappearing coul and vdw with soft core at the same time */
308 sigma6[i] = sigma6_min;
309 sigma2[i] = sigma2_min;
314 sigma6[i] = sigma6_def;
315 sigma2[i] = sigma2_def;
317 if (sc_r_power == 6.0)
319 sigma_pow[i] = sigma6[i];
320 sigma_powm2[i] = sigma6[i]/sigma2[i];
322 else if (sc_r_power == 48.0)
324 sigma_pow[i] = sigma6[i]*sigma6[i]; /* sigma^12 */
325 sigma_pow[i] = sigma_pow[i]*sigma_pow[i]; /* sigma^24 */
326 sigma_pow[i] = sigma_pow[i]*sigma_pow[i]; /* sigma^48 */
327 sigma_powm2[i] = sigma_pow[i]/sigma2[i];
330 { /* not really supported as input, but in here for testing the general case*/
331 sigma_pow[i] = pow(sigma2[i], sc_r_power/2);
332 sigma_powm2[i] = sigma_pow[i]/(sigma2[i]);
336 /* only use softcore if one of the states has a zero endstate - softcore is for avoiding infinities!*/
337 if ((c12[STATE_A] > 0) && (c12[STATE_B] > 0))
344 alpha_vdw_eff = alpha_vdw;
345 alpha_coul_eff = alpha_coul;
348 for (i = 0; i < NSTATES; i++)
355 /* Only spend time on A or B state if it is non-zero */
356 if ( (qq[i] != 0) || (c6[i] != 0) || (c12[i] != 0) )
359 /* this section has to be inside the loop becaue of the dependence on sigma_pow */
360 rpinvC = 1.0/(alpha_coul_eff*lfac_coul[i]*sigma_pow[i]+rp);
361 rinvC = pow(rpinvC, 1.0/sc_r_power);
364 rpinvV = 1.0/(alpha_vdw_eff*lfac_vdw[i]*sigma_pow[i]+rp);
365 rinvV = pow(rpinvV, 1.0/sc_r_power);
374 n1C = tab_elemsize*n0;
380 n1V = tab_elemsize*n0;
383 /* With Ewald and soft-core we should put the cut-off on r,
384 * not on the soft-cored rC, as the real-space and
385 * reciprocal space contributions should (almost) cancel.
388 !(bExactElecCutoff &&
389 ((icoul != GMX_NBKERNEL_ELEC_EWALD && rC >= rcoulomb) ||
390 (icoul == GMX_NBKERNEL_ELEC_EWALD && r >= rcoulomb))))
394 case GMX_NBKERNEL_ELEC_COULOMB:
395 case GMX_NBKERNEL_ELEC_EWALD:
396 /* simple cutoff (yes, ewald is done all on direct space for free energy) */
397 Vcoul[i] = qq[i]*rinvC;
398 FscalC[i] = Vcoul[i];
401 case GMX_NBKERNEL_ELEC_REACTIONFIELD:
403 Vcoul[i] = qq[i]*(rinvC + krf*rC*rC-crf);
404 FscalC[i] = qq[i]*(rinvC - 2.0*krf*rC*rC);
407 case GMX_NBKERNEL_ELEC_CUBICSPLINETABLE:
408 /* non-Ewald tabulated coulomb */
412 Geps = epsC*VFtab[nnn+2];
413 Heps2 = eps2C*VFtab[nnn+3];
416 FF = Fp+Geps+2.0*Heps2;
418 FscalC[i] = -qq[i]*tabscale*FF*rC;
421 case GMX_NBKERNEL_ELEC_GENERALIZEDBORN:
422 gmx_fatal(FARGS, "Free energy and GB not implemented.\n");
425 case GMX_NBKERNEL_ELEC_NONE:
431 gmx_incons("Invalid icoul in free energy kernel");
435 if (fr->coulomb_modifier == eintmodPOTSWITCH)
438 d = (d > 0.0) ? d : 0.0;
440 sw = 1.0+d2*d*(swV3+d*(swV4+d*swV5));
441 dsw = d2*(swF2+d*(swF3+d*swF4));
444 FscalC[i] = FscalC[i]*sw + Vcoul[i]*dsw;
448 if ((c6[i] != 0 || c12[i] != 0) &&
449 !(bExactVdwCutoff && rV >= rvdw))
453 case GMX_NBKERNEL_VDW_LENNARDJONES:
455 if (sc_r_power == 6.0)
461 rinv6 = pow(rinvV, 6.0);
464 Vvdw12 = c12[i]*rinv6*rinv6;
465 if (fr->vdw_modifier == eintmodPOTSHIFT)
467 Vvdw[i] = ( (Vvdw12-c12[i]*sh_invrc6*sh_invrc6)*(1.0/12.0)
468 -(Vvdw6-c6[i]*sh_invrc6)*(1.0/6.0));
472 Vvdw[i] = Vvdw12*(1.0/12.0) - Vvdw6*(1.0/6.0);
474 FscalV[i] = Vvdw12 - Vvdw6;
477 case GMX_NBKERNEL_VDW_BUCKINGHAM:
478 gmx_fatal(FARGS, "Buckingham free energy not supported.");
481 case GMX_NBKERNEL_VDW_CUBICSPLINETABLE:
487 Geps = epsV*VFtab[nnn+2];
488 Heps2 = eps2V*VFtab[nnn+3];
491 FF = Fp+Geps+2.0*Heps2;
493 FscalV[i] -= c6[i]*tabscale*FF*rV;
498 Geps = epsV*VFtab[nnn+6];
499 Heps2 = eps2V*VFtab[nnn+7];
502 FF = Fp+Geps+2.0*Heps2;
503 Vvdw[i] += c12[i]*VV;
504 FscalV[i] -= c12[i]*tabscale*FF*rV;
507 case GMX_NBKERNEL_VDW_NONE:
513 gmx_incons("Invalid ivdw in free energy kernel");
517 if (fr->vdw_modifier == eintmodPOTSWITCH)
520 d = (d > 0.0) ? d : 0.0;
522 sw = 1.0+d2*d*(swV3+d*(swV4+d*swV5));
523 dsw = d2*(swF2+d*(swF3+d*swF4));
526 FscalV[i] = FscalV[i]*sw + Vvdw[i]*dsw;
528 FscalV[i] = (rV < rvdw) ? FscalV[i] : 0.0;
529 Vvdw[i] = (rV < rvdw) ? Vvdw[i] : 0.0;
533 /* FscalC (and FscalV) now contain: dV/drC * rC
534 * Now we multiply by rC^-p, so it will be: dV/drC * rC^1-p
535 * Further down we first multiply by r^p-2 and then by
536 * the vector r, which in total gives: dV/drC * (r/rC)^1-p
545 if (icoul == GMX_NBKERNEL_ELEC_EWALD &&
546 !(bExactElecCutoff && r >= rcoulomb))
548 /* Because we compute the soft-core normally,
549 * we have to remove the Ewald short range portion.
550 * Done outside of the states loop because this part
551 * doesn't depend on the scaled R.
556 rs = rsq*rinv*tab_ewald_scale;
559 f_lr = (1 - frac)*tab_ewald_F[ri] + frac*tab_ewald_F[ri+1];
561 VV = tab_ewald_V[ri] - tab_ewald_halfsp*frac*(tab_ewald_F[ri] + f_lr);
563 for (i = 0; i < NSTATES; i++)
565 vctot -= LFC[i]*qq[i]*VV;
566 Fscal -= LFC[i]*qq[i]*FF;
567 dvdl_coul -= (DLF[i]*qq[i])*VV;
571 /* Assemble A and B states */
572 for (i = 0; i < NSTATES; i++)
574 vctot += LFC[i]*Vcoul[i];
575 vvtot += LFV[i]*Vvdw[i];
577 Fscal += LFC[i]*FscalC[i]*rpm2;
578 Fscal += LFV[i]*FscalV[i]*rpm2;
580 dvdl_coul += Vcoul[i]*DLF[i] + LFC[i]*alpha_coul_eff*dlfac_coul[i]*FscalC[i]*sigma_pow[i];
581 dvdl_vdw += Vvdw[i]*DLF[i] + LFV[i]*alpha_vdw_eff*dlfac_vdw[i]*FscalV[i]*sigma_pow[i];
593 f[j3+1] = f[j3+1] - ty;
594 f[j3+2] = f[j3+2] - tz;
600 f[ii3] = f[ii3] + fix;
601 f[ii3+1] = f[ii3+1] + fiy;
602 f[ii3+2] = f[ii3+2] + fiz;
603 fshift[is3] = fshift[is3] + fix;
604 fshift[is3+1] = fshift[is3+1] + fiy;
605 fshift[is3+2] = fshift[is3+2] + fiz;
608 Vc[ggid] = Vc[ggid] + vctot;
609 Vv[ggid] = Vv[ggid] + vvtot;
612 dvdl[efptCOUL] += dvdl_coul;
613 dvdl[efptVDW] += dvdl_vdw;
615 /* Estimate flops, average for free energy stuff:
616 * 12 flops per outer iteration
617 * 150 flops per inner iteration
619 inc_nrnb(nrnb, eNR_NBKERNEL_FREE_ENERGY, nlist->nri*12 + nlist->jindex[n]*150);
623 nb_free_energy_evaluate_single(real r2, real sc_r_power, real alpha_coul, real alpha_vdw,
624 real tabscale, real *vftab,
625 real qqA, real c6A, real c12A, real qqB, real c6B, real c12B,
626 real LFC[2], real LFV[2], real DLF[2],
627 real lfac_coul[2], real lfac_vdw[2], real dlfac_coul[2], real dlfac_vdw[2],
628 real sigma6_def, real sigma6_min, real sigma2_def, real sigma2_min,
629 real *velectot, real *vvdwtot, real *dvdl)
631 real r, rp, rpm2, rtab, eps, eps2, Y, F, Geps, Heps2, Fp, VV, FF, fscal;
632 real qq[2], c6[2], c12[2], sigma6[2], sigma2[2], sigma_pow[2], sigma_powm2[2];
633 real alpha_coul_eff, alpha_vdw_eff, dvdl_coul, dvdl_vdw;
634 real rpinv, r_coul, r_vdw, velecsum, vvdwsum;
635 real fscal_vdw[2], fscal_elec[2];
636 real velec[2], vvdw[2];
646 if (sc_r_power == 6.0)
648 rpm2 = r2*r2; /* r4 */
649 rp = rpm2*r2; /* r6 */
651 else if (sc_r_power == 48.0)
653 rp = r2*r2*r2; /* r6 */
654 rp = rp*rp; /* r12 */
655 rp = rp*rp; /* r24 */
656 rp = rp*rp; /* r48 */
657 rpm2 = rp/r2; /* r46 */
661 rp = pow(r2, 0.5*sc_r_power); /* not currently supported as input, but can handle it */
665 /* Loop over state A(0) and B(1) */
666 for (i = 0; i < 2; i++)
668 if ((c6[i] > 0) && (c12[i] > 0))
670 /* The c6 & c12 coefficients now contain the constants 6.0 and 12.0, respectively.
671 * Correct for this by multiplying with (1/12.0)/(1/6.0)=6.0/12.0=0.5.
673 sigma6[i] = 0.5*c12[i]/c6[i];
674 sigma2[i] = pow(0.5*c12[i]/c6[i], 1.0/3.0);
675 /* should be able to get rid of this ^^^ internal pow call eventually. Will require agreement on
676 what data to store externally. Can't be fixed without larger scale changes, so not 4.6 */
677 if (sigma6[i] < sigma6_min) /* for disappearing coul and vdw with soft core at the same time */
679 sigma6[i] = sigma6_min;
680 sigma2[i] = sigma2_min;
685 sigma6[i] = sigma6_def;
686 sigma2[i] = sigma2_def;
688 if (sc_r_power == 6.0)
690 sigma_pow[i] = sigma6[i];
691 sigma_powm2[i] = sigma6[i]/sigma2[i];
693 else if (sc_r_power == 48.0)
695 sigma_pow[i] = sigma6[i]*sigma6[i]; /* sigma^12 */
696 sigma_pow[i] = sigma_pow[i]*sigma_pow[i]; /* sigma^24 */
697 sigma_pow[i] = sigma_pow[i]*sigma_pow[i]; /* sigma^48 */
698 sigma_powm2[i] = sigma_pow[i]/sigma2[i];
701 { /* not really supported as input, but in here for testing the general case*/
702 sigma_pow[i] = pow(sigma2[i], sc_r_power/2);
703 sigma_powm2[i] = sigma_pow[i]/(sigma2[i]);
707 /* only use softcore if one of the states has a zero endstate - softcore is for avoiding infinities!*/
708 if ((c12[0] > 0) && (c12[1] > 0))
715 alpha_vdw_eff = alpha_vdw;
716 alpha_coul_eff = alpha_coul;
719 /* Loop over A and B states again */
720 for (i = 0; i < 2; i++)
727 /* Only spend time on A or B state if it is non-zero */
728 if ( (qq[i] != 0) || (c6[i] != 0) || (c12[i] != 0) )
731 rpinv = 1.0/(alpha_coul_eff*lfac_coul[i]*sigma_pow[i]+rp);
732 r_coul = pow(rpinv, -1.0/sc_r_power);
734 /* Electrostatics table lookup data */
735 rtab = r_coul*tabscale;
743 Geps = eps*vftab[ntab+2];
744 Heps2 = eps2*vftab[ntab+3];
747 FF = Fp+Geps+2.0*Heps2;
749 fscal_elec[i] = -qq[i]*FF*r_coul*rpinv*tabscale;
752 rpinv = 1.0/(alpha_vdw_eff*lfac_vdw[i]*sigma_pow[i]+rp);
753 r_vdw = pow(rpinv, -1.0/sc_r_power);
754 /* Vdw table lookup data */
755 rtab = r_vdw*tabscale;
763 Geps = eps*vftab[ntab+6];
764 Heps2 = eps2*vftab[ntab+7];
767 FF = Fp+Geps+2.0*Heps2;
769 fscal_vdw[i] = -c6[i]*FF;
774 Geps = eps*vftab[ntab+10];
775 Heps2 = eps2*vftab[ntab+11];
778 FF = Fp+Geps+2.0*Heps2;
779 vvdw[i] += c12[i]*VV;
780 fscal_vdw[i] -= c12[i]*FF;
781 fscal_vdw[i] *= r_vdw*rpinv*tabscale;
784 /* Now we have velec[i], vvdw[i], and fscal[i] for both states */
785 /* Assemble A and B states */
791 for (i = 0; i < 2; i++)
793 velecsum += LFC[i]*velec[i];
794 vvdwsum += LFV[i]*vvdw[i];
796 fscal += (LFC[i]*fscal_elec[i]+LFV[i]*fscal_vdw[i])*rpm2;
798 dvdl_coul += velec[i]*DLF[i] + LFC[i]*alpha_coul_eff*dlfac_coul[i]*fscal_elec[i]*sigma_pow[i];
799 dvdl_vdw += vvdw[i]*DLF[i] + LFV[i]*alpha_vdw_eff*dlfac_vdw[i]*fscal_vdw[i]*sigma_pow[i];
802 dvdl[efptCOUL] += dvdl_coul;
803 dvdl[efptVDW] += dvdl_vdw;
805 *velectot = velecsum;