2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013, by the GROMACS development team, led by
7 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
8 * and including many others, as listed in the AUTHORS file in the
9 * top-level source directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
45 #include "nonbonded.h"
46 #include "nb_kernel.h"
48 #include "nb_free_energy.h"
51 gmx_nb_free_energy_kernel(const t_nblist * gmx_restrict nlist,
52 rvec * gmx_restrict xx,
53 rvec * gmx_restrict ff,
54 t_forcerec * gmx_restrict fr,
55 const t_mdatoms * gmx_restrict mdatoms,
56 nb_kernel_data_t * gmx_restrict kernel_data,
57 t_nrnb * gmx_restrict nrnb)
63 int i, j, n, ii, is3, ii3, k, nj0, nj1, jnr, j3, ggid;
65 real Fscal, FscalC[NSTATES], FscalV[NSTATES], tx, ty, tz;
66 real Vcoul[NSTATES], Vvdw[NSTATES];
67 real rinv6, r, rt, rtC, rtV;
69 real qq[NSTATES], vctot, krsq;
70 int ntiA, ntiB, tj[NSTATES];
71 real Vvdw6, Vvdw12, vvtot;
72 real ix, iy, iz, fix, fiy, fiz;
73 real dx, dy, dz, rsq, rinv;
74 real c6[NSTATES], c12[NSTATES];
75 real LFC[NSTATES], LFV[NSTATES], DLF[NSTATES];
76 double dvdl_coul, dvdl_vdw;
77 real lfac_coul[NSTATES], dlfac_coul[NSTATES], lfac_vdw[NSTATES], dlfac_vdw[NSTATES];
78 real sigma6[NSTATES], alpha_vdw_eff, alpha_coul_eff, sigma2_def, sigma2_min;
79 real rp, rpm2, rC, rV, rinvC, rpinvC, rinvV, rpinvV;
80 real sigma2[NSTATES], sigma_pow[NSTATES], sigma_powm2[NSTATES], rs, rs2;
81 int do_tab, tab_elemsize;
82 int n0, n1C, n1V, nnn;
83 real Y, F, G, H, Fp, Geps, Heps2, epsC, eps2C, epsV, eps2V, VV, FF;
94 const real * shiftvec;
101 real facel, krf, crf;
102 const real * chargeA;
103 const real * chargeB;
104 real sigma6_min, sigma6_def, lam_power, sc_power, sc_r_power;
105 real alpha_coul, alpha_vdw, lambda_coul, lambda_vdw, ewc;
111 real rcoulomb, rvdw, sh_invrc6;
112 gmx_bool bExactElecCutoff, bExactVdwCutoff;
113 real rcutoff, rcutoff2, rswitch, d, d2, swV3, swV4, swV5, swF2, swF3, swF4, sw, dsw, rinvcorr;
114 const real * tab_ewald_F;
115 const real * tab_ewald_V;
116 real tab_ewald_scale, tab_ewald_halfsp;
121 fshift = fr->fshift[0];
125 jindex = nlist->jindex;
127 icoul = nlist->ielec;
129 shift = nlist->shift;
132 shiftvec = fr->shift_vec[0];
133 chargeA = mdatoms->chargeA;
134 chargeB = mdatoms->chargeB;
138 ewc = fr->ewaldcoeff_q;
139 Vc = kernel_data->energygrp_elec;
140 typeA = mdatoms->typeA;
141 typeB = mdatoms->typeB;
144 Vv = kernel_data->energygrp_vdw;
145 tabscale = kernel_data->table_elec_vdw->scale;
146 VFtab = kernel_data->table_elec_vdw->data;
147 lambda_coul = kernel_data->lambda[efptCOUL];
148 lambda_vdw = kernel_data->lambda[efptVDW];
149 dvdl = kernel_data->dvdl;
150 alpha_coul = fr->sc_alphacoul;
151 alpha_vdw = fr->sc_alphavdw;
152 lam_power = fr->sc_power;
153 sc_r_power = fr->sc_r_power;
154 sigma6_def = fr->sc_sigma6_def;
155 sigma6_min = fr->sc_sigma6_min;
156 bDoForces = kernel_data->flags & GMX_NONBONDED_DO_FORCE;
158 rcoulomb = fr->rcoulomb;
160 sh_invrc6 = fr->ic->sh_invrc6;
162 /* Ewald (PME) reciprocal force and energy quadratic spline tables */
163 tab_ewald_F = fr->ic->tabq_coul_F;
164 tab_ewald_V = fr->ic->tabq_coul_V;
165 tab_ewald_scale = fr->ic->tabq_scale;
166 tab_ewald_halfsp = 0.5/tab_ewald_scale;
168 if (fr->coulomb_modifier == eintmodPOTSWITCH || fr->vdw_modifier == eintmodPOTSWITCH)
170 rcutoff = (fr->coulomb_modifier == eintmodPOTSWITCH) ? fr->rcoulomb : fr->rvdw;
171 rcutoff2 = rcutoff*rcutoff;
172 rswitch = (fr->coulomb_modifier == eintmodPOTSWITCH) ? fr->rcoulomb_switch : fr->rvdw_switch;
174 swV3 = -10.0/(d*d*d);
175 swV4 = 15.0/(d*d*d*d);
176 swV5 = -6.0/(d*d*d*d*d);
177 swF2 = -30.0/(d*d*d);
178 swF3 = 60.0/(d*d*d*d);
179 swF4 = -30.0/(d*d*d*d*d);
183 /* Stupid compilers dont realize these variables will not be used */
193 bExactElecCutoff = (fr->coulomb_modifier != eintmodNONE) || fr->eeltype == eelRF_ZERO;
194 bExactVdwCutoff = (fr->vdw_modifier != eintmodNONE);
196 /* fix compiler warnings */
205 /* Lambda factor for state A, 1-lambda*/
206 LFC[STATE_A] = 1.0 - lambda_coul;
207 LFV[STATE_A] = 1.0 - lambda_vdw;
209 /* Lambda factor for state B, lambda*/
210 LFC[STATE_B] = lambda_coul;
211 LFV[STATE_B] = lambda_vdw;
213 /*derivative of the lambda factor for state A and B */
217 for (i = 0; i < NSTATES; i++)
219 lfac_coul[i] = (lam_power == 2 ? (1-LFC[i])*(1-LFC[i]) : (1-LFC[i]));
220 dlfac_coul[i] = DLF[i]*lam_power/sc_r_power*(lam_power == 2 ? (1-LFC[i]) : 1);
221 lfac_vdw[i] = (lam_power == 2 ? (1-LFV[i])*(1-LFV[i]) : (1-LFV[i]));
222 dlfac_vdw[i] = DLF[i]*lam_power/sc_r_power*(lam_power == 2 ? (1-LFV[i]) : 1);
225 sigma2_def = pow(sigma6_def, 1.0/3.0);
226 sigma2_min = pow(sigma6_min, 1.0/3.0);
228 /* Ewald (not PME) table is special (icoul==enbcoulFEWALD) */
230 do_tab = (icoul == GMX_NBKERNEL_ELEC_CUBICSPLINETABLE ||
231 ivdw == GMX_NBKERNEL_VDW_CUBICSPLINETABLE);
233 /* we always use the combined table here */
236 for (n = 0; (n < nri); n++)
240 shY = shiftvec[is3+1];
241 shZ = shiftvec[is3+2];
249 iqA = facel*chargeA[ii];
250 iqB = facel*chargeB[ii];
251 ntiA = 2*ntype*typeA[ii];
252 ntiB = 2*ntype*typeB[ii];
259 for (k = nj0; (k < nj1); k++)
266 rsq = dx*dx+dy*dy+dz*dz;
267 rinv = gmx_invsqrt(rsq);
269 if (sc_r_power == 6.0)
271 rpm2 = rsq*rsq; /* r4 */
272 rp = rpm2*rsq; /* r6 */
274 else if (sc_r_power == 48.0)
276 rp = rsq*rsq*rsq; /* r6 */
277 rp = rp*rp; /* r12 */
278 rp = rp*rp; /* r24 */
279 rp = rp*rp; /* r48 */
280 rpm2 = rp/rsq; /* r46 */
284 rp = pow(r, sc_r_power); /* not currently supported as input, but can handle it */
288 tj[STATE_A] = ntiA+2*typeA[jnr];
289 tj[STATE_B] = ntiB+2*typeB[jnr];
290 qq[STATE_A] = iqA*chargeA[jnr];
291 qq[STATE_B] = iqB*chargeB[jnr];
293 for (i = 0; i < NSTATES; i++)
297 c12[i] = nbfp[tj[i]+1];
298 if ((c6[i] > 0) && (c12[i] > 0))
300 /* c12 is stored scaled with 12.0 and c6 is scaled with 6.0 - correct for this */
301 sigma6[i] = 0.5*c12[i]/c6[i];
302 sigma2[i] = pow(sigma6[i], 1.0/3.0);
303 /* should be able to get rid of this ^^^ internal pow call eventually. Will require agreement on
304 what data to store externally. Can't be fixed without larger scale changes, so not 4.6 */
305 if (sigma6[i] < sigma6_min) /* for disappearing coul and vdw with soft core at the same time */
307 sigma6[i] = sigma6_min;
308 sigma2[i] = sigma2_min;
313 sigma6[i] = sigma6_def;
314 sigma2[i] = sigma2_def;
316 if (sc_r_power == 6.0)
318 sigma_pow[i] = sigma6[i];
319 sigma_powm2[i] = sigma6[i]/sigma2[i];
321 else if (sc_r_power == 48.0)
323 sigma_pow[i] = sigma6[i]*sigma6[i]; /* sigma^12 */
324 sigma_pow[i] = sigma_pow[i]*sigma_pow[i]; /* sigma^24 */
325 sigma_pow[i] = sigma_pow[i]*sigma_pow[i]; /* sigma^48 */
326 sigma_powm2[i] = sigma_pow[i]/sigma2[i];
329 { /* not really supported as input, but in here for testing the general case*/
330 sigma_pow[i] = pow(sigma2[i], sc_r_power/2);
331 sigma_powm2[i] = sigma_pow[i]/(sigma2[i]);
335 /* only use softcore if one of the states has a zero endstate - softcore is for avoiding infinities!*/
336 if ((c12[STATE_A] > 0) && (c12[STATE_B] > 0))
343 alpha_vdw_eff = alpha_vdw;
344 alpha_coul_eff = alpha_coul;
347 for (i = 0; i < NSTATES; i++)
354 /* Only spend time on A or B state if it is non-zero */
355 if ( (qq[i] != 0) || (c6[i] != 0) || (c12[i] != 0) )
358 /* this section has to be inside the loop becaue of the dependence on sigma_pow */
359 rpinvC = 1.0/(alpha_coul_eff*lfac_coul[i]*sigma_pow[i]+rp);
360 rinvC = pow(rpinvC, 1.0/sc_r_power);
363 rpinvV = 1.0/(alpha_vdw_eff*lfac_vdw[i]*sigma_pow[i]+rp);
364 rinvV = pow(rpinvV, 1.0/sc_r_power);
373 n1C = tab_elemsize*n0;
379 n1V = tab_elemsize*n0;
382 /* With Ewald and soft-core we should put the cut-off on r,
383 * not on the soft-cored rC, as the real-space and
384 * reciprocal space contributions should (almost) cancel.
387 !(bExactElecCutoff &&
388 ((icoul != GMX_NBKERNEL_ELEC_EWALD && rC >= rcoulomb) ||
389 (icoul == GMX_NBKERNEL_ELEC_EWALD && r >= rcoulomb))))
393 case GMX_NBKERNEL_ELEC_COULOMB:
394 case GMX_NBKERNEL_ELEC_EWALD:
395 /* simple cutoff (yes, ewald is done all on direct space for free energy) */
396 Vcoul[i] = qq[i]*rinvC;
397 FscalC[i] = Vcoul[i];
400 case GMX_NBKERNEL_ELEC_REACTIONFIELD:
402 Vcoul[i] = qq[i]*(rinvC + krf*rC*rC-crf);
403 FscalC[i] = qq[i]*(rinvC - 2.0*krf*rC*rC);
406 case GMX_NBKERNEL_ELEC_CUBICSPLINETABLE:
407 /* non-Ewald tabulated coulomb */
411 Geps = epsC*VFtab[nnn+2];
412 Heps2 = eps2C*VFtab[nnn+3];
415 FF = Fp+Geps+2.0*Heps2;
417 FscalC[i] = -qq[i]*tabscale*FF*rC;
420 case GMX_NBKERNEL_ELEC_GENERALIZEDBORN:
421 gmx_fatal(FARGS, "Free energy and GB not implemented.\n");
424 case GMX_NBKERNEL_ELEC_NONE:
430 gmx_incons("Invalid icoul in free energy kernel");
434 if (fr->coulomb_modifier == eintmodPOTSWITCH)
437 d = (d > 0.0) ? d : 0.0;
439 sw = 1.0+d2*d*(swV3+d*(swV4+d*swV5));
440 dsw = d2*(swF2+d*(swF3+d*swF4));
443 FscalC[i] = FscalC[i]*sw + Vcoul[i]*dsw;
447 if ((c6[i] != 0 || c12[i] != 0) &&
448 !(bExactVdwCutoff && rV >= rvdw))
452 case GMX_NBKERNEL_VDW_LENNARDJONES:
454 if (sc_r_power == 6.0)
460 rinv6 = pow(rinvV, 6.0);
463 Vvdw12 = c12[i]*rinv6*rinv6;
464 if (fr->vdw_modifier == eintmodPOTSHIFT)
466 Vvdw[i] = ( (Vvdw12-c12[i]*sh_invrc6*sh_invrc6)*(1.0/12.0)
467 -(Vvdw6-c6[i]*sh_invrc6)*(1.0/6.0));
471 Vvdw[i] = Vvdw12*(1.0/12.0) - Vvdw6*(1.0/6.0);
473 FscalV[i] = Vvdw12 - Vvdw6;
476 case GMX_NBKERNEL_VDW_BUCKINGHAM:
477 gmx_fatal(FARGS, "Buckingham free energy not supported.");
480 case GMX_NBKERNEL_VDW_CUBICSPLINETABLE:
486 Geps = epsV*VFtab[nnn+2];
487 Heps2 = eps2V*VFtab[nnn+3];
490 FF = Fp+Geps+2.0*Heps2;
492 FscalV[i] -= c6[i]*tabscale*FF*rV;
497 Geps = epsV*VFtab[nnn+6];
498 Heps2 = eps2V*VFtab[nnn+7];
501 FF = Fp+Geps+2.0*Heps2;
502 Vvdw[i] += c12[i]*VV;
503 FscalV[i] -= c12[i]*tabscale*FF*rV;
506 case GMX_NBKERNEL_VDW_NONE:
512 gmx_incons("Invalid ivdw in free energy kernel");
516 if (fr->vdw_modifier == eintmodPOTSWITCH)
519 d = (d > 0.0) ? d : 0.0;
521 sw = 1.0+d2*d*(swV3+d*(swV4+d*swV5));
522 dsw = d2*(swF2+d*(swF3+d*swF4));
525 FscalV[i] = FscalV[i]*sw + Vvdw[i]*dsw;
527 FscalV[i] = (rV < rvdw) ? FscalV[i] : 0.0;
528 Vvdw[i] = (rV < rvdw) ? Vvdw[i] : 0.0;
532 /* FscalC (and FscalV) now contain: dV/drC * rC
533 * Now we multiply by rC^-p, so it will be: dV/drC * rC^1-p
534 * Further down we first multiply by r^p-2 and then by
535 * the vector r, which in total gives: dV/drC * (r/rC)^1-p
544 if (icoul == GMX_NBKERNEL_ELEC_EWALD &&
545 !(bExactElecCutoff && r >= rcoulomb))
547 /* Because we compute the soft-core normally,
548 * we have to remove the Ewald short range portion.
549 * Done outside of the states loop because this part
550 * doesn't depend on the scaled R.
555 rs = rsq*rinv*tab_ewald_scale;
558 f_lr = (1 - frac)*tab_ewald_F[ri] + frac*tab_ewald_F[ri+1];
560 VV = tab_ewald_V[ri] - tab_ewald_halfsp*frac*(tab_ewald_F[ri] + f_lr);
562 for (i = 0; i < NSTATES; i++)
564 vctot -= LFC[i]*qq[i]*VV;
565 Fscal -= LFC[i]*qq[i]*FF;
566 dvdl_coul -= (DLF[i]*qq[i])*VV;
570 /* Assemble A and B states */
571 for (i = 0; i < NSTATES; i++)
573 vctot += LFC[i]*Vcoul[i];
574 vvtot += LFV[i]*Vvdw[i];
576 Fscal += LFC[i]*FscalC[i]*rpm2;
577 Fscal += LFV[i]*FscalV[i]*rpm2;
579 dvdl_coul += Vcoul[i]*DLF[i] + LFC[i]*alpha_coul_eff*dlfac_coul[i]*FscalC[i]*sigma_pow[i];
580 dvdl_vdw += Vvdw[i]*DLF[i] + LFV[i]*alpha_vdw_eff*dlfac_vdw[i]*FscalV[i]*sigma_pow[i];
592 f[j3+1] = f[j3+1] - ty;
593 f[j3+2] = f[j3+2] - tz;
599 f[ii3] = f[ii3] + fix;
600 f[ii3+1] = f[ii3+1] + fiy;
601 f[ii3+2] = f[ii3+2] + fiz;
602 fshift[is3] = fshift[is3] + fix;
603 fshift[is3+1] = fshift[is3+1] + fiy;
604 fshift[is3+2] = fshift[is3+2] + fiz;
607 Vc[ggid] = Vc[ggid] + vctot;
608 Vv[ggid] = Vv[ggid] + vvtot;
611 dvdl[efptCOUL] += dvdl_coul;
612 dvdl[efptVDW] += dvdl_vdw;
614 /* Estimate flops, average for free energy stuff:
615 * 12 flops per outer iteration
616 * 150 flops per inner iteration
618 inc_nrnb(nrnb, eNR_NBKERNEL_FREE_ENERGY, nlist->nri*12 + nlist->jindex[n]*150);
622 nb_free_energy_evaluate_single(real r2, real sc_r_power, real alpha_coul, real alpha_vdw,
623 real tabscale, real *vftab,
624 real qqA, real c6A, real c12A, real qqB, real c6B, real c12B,
625 real LFC[2], real LFV[2], real DLF[2],
626 real lfac_coul[2], real lfac_vdw[2], real dlfac_coul[2], real dlfac_vdw[2],
627 real sigma6_def, real sigma6_min, real sigma2_def, real sigma2_min,
628 real *velectot, real *vvdwtot, real *dvdl)
630 real r, rp, rpm2, rtab, eps, eps2, Y, F, Geps, Heps2, Fp, VV, FF, fscal;
631 real qq[2], c6[2], c12[2], sigma6[2], sigma2[2], sigma_pow[2], sigma_powm2[2];
632 real alpha_coul_eff, alpha_vdw_eff, dvdl_coul, dvdl_vdw;
633 real rpinv, r_coul, r_vdw, velecsum, vvdwsum;
634 real fscal_vdw[2], fscal_elec[2];
635 real velec[2], vvdw[2];
645 if (sc_r_power == 6.0)
647 rpm2 = r2*r2; /* r4 */
648 rp = rpm2*r2; /* r6 */
650 else if (sc_r_power == 48.0)
652 rp = r2*r2*r2; /* r6 */
653 rp = rp*rp; /* r12 */
654 rp = rp*rp; /* r24 */
655 rp = rp*rp; /* r48 */
656 rpm2 = rp/r2; /* r46 */
660 rp = pow(r2, 0.5*sc_r_power); /* not currently supported as input, but can handle it */
664 /* Loop over state A(0) and B(1) */
665 for (i = 0; i < 2; i++)
667 if ((c6[i] > 0) && (c12[i] > 0))
669 /* The c6 & c12 coefficients now contain the constants 6.0 and 12.0, respectively.
670 * Correct for this by multiplying with (1/12.0)/(1/6.0)=6.0/12.0=0.5.
672 sigma6[i] = 0.5*c12[i]/c6[i];
673 sigma2[i] = pow(0.5*c12[i]/c6[i], 1.0/3.0);
674 /* should be able to get rid of this ^^^ internal pow call eventually. Will require agreement on
675 what data to store externally. Can't be fixed without larger scale changes, so not 4.6 */
676 if (sigma6[i] < sigma6_min) /* for disappearing coul and vdw with soft core at the same time */
678 sigma6[i] = sigma6_min;
679 sigma2[i] = sigma2_min;
684 sigma6[i] = sigma6_def;
685 sigma2[i] = sigma2_def;
687 if (sc_r_power == 6.0)
689 sigma_pow[i] = sigma6[i];
690 sigma_powm2[i] = sigma6[i]/sigma2[i];
692 else if (sc_r_power == 48.0)
694 sigma_pow[i] = sigma6[i]*sigma6[i]; /* sigma^12 */
695 sigma_pow[i] = sigma_pow[i]*sigma_pow[i]; /* sigma^24 */
696 sigma_pow[i] = sigma_pow[i]*sigma_pow[i]; /* sigma^48 */
697 sigma_powm2[i] = sigma_pow[i]/sigma2[i];
700 { /* not really supported as input, but in here for testing the general case*/
701 sigma_pow[i] = pow(sigma2[i], sc_r_power/2);
702 sigma_powm2[i] = sigma_pow[i]/(sigma2[i]);
706 /* only use softcore if one of the states has a zero endstate - softcore is for avoiding infinities!*/
707 if ((c12[0] > 0) && (c12[1] > 0))
714 alpha_vdw_eff = alpha_vdw;
715 alpha_coul_eff = alpha_coul;
718 /* Loop over A and B states again */
719 for (i = 0; i < 2; i++)
726 /* Only spend time on A or B state if it is non-zero */
727 if ( (qq[i] != 0) || (c6[i] != 0) || (c12[i] != 0) )
730 rpinv = 1.0/(alpha_coul_eff*lfac_coul[i]*sigma_pow[i]+rp);
731 r_coul = pow(rpinv, -1.0/sc_r_power);
733 /* Electrostatics table lookup data */
734 rtab = r_coul*tabscale;
742 Geps = eps*vftab[ntab+2];
743 Heps2 = eps2*vftab[ntab+3];
746 FF = Fp+Geps+2.0*Heps2;
748 fscal_elec[i] = -qq[i]*FF*r_coul*rpinv*tabscale;
751 rpinv = 1.0/(alpha_vdw_eff*lfac_vdw[i]*sigma_pow[i]+rp);
752 r_vdw = pow(rpinv, -1.0/sc_r_power);
753 /* Vdw table lookup data */
754 rtab = r_vdw*tabscale;
762 Geps = eps*vftab[ntab+6];
763 Heps2 = eps2*vftab[ntab+7];
766 FF = Fp+Geps+2.0*Heps2;
768 fscal_vdw[i] = -c6[i]*FF;
773 Geps = eps*vftab[ntab+10];
774 Heps2 = eps2*vftab[ntab+11];
777 FF = Fp+Geps+2.0*Heps2;
778 vvdw[i] += c12[i]*VV;
779 fscal_vdw[i] -= c12[i]*FF;
780 fscal_vdw[i] *= r_vdw*rpinv*tabscale;
783 /* Now we have velec[i], vvdw[i], and fscal[i] for both states */
784 /* Assemble A and B states */
790 for (i = 0; i < 2; i++)
792 velecsum += LFC[i]*velec[i];
793 vvdwsum += LFV[i]*vvdw[i];
795 fscal += (LFC[i]*fscal_elec[i]+LFV[i]*fscal_vdw[i])*rpm2;
797 dvdl_coul += velec[i]*DLF[i] + LFC[i]*alpha_coul_eff*dlfac_coul[i]*fscal_elec[i]*sigma_pow[i];
798 dvdl_vdw += vvdw[i]*DLF[i] + LFV[i]*alpha_vdw_eff*dlfac_vdw[i]*fscal_vdw[i]*sigma_pow[i];
801 dvdl[efptCOUL] += dvdl_coul;
802 dvdl[efptVDW] += dvdl_vdw;
804 *velectot = velecsum;