2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2016,2017,2018,2019, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 /* Doxygen gets confused (buggy) about the block in this file in combination with
37 * the namespace prefix, and thinks store is documented here.
38 * This will solve itself with the second-generation nbnxn kernels, so for now
39 * we just tell Doxygen to stay out.
43 /* This is the innermost loop contents for the 4 x N atom simd kernel.
44 * This flavor of the kernel calculates interactions of 4 i-atoms
45 * with N j-atoms stored in N wide simd registers.
48 /* When calculating RF or Ewald interactions we calculate the electrostatic/LJ
49 * forces on excluded atom pairs here in the non-bonded loops.
50 * But when energies and/or virial is required we calculate them
51 * separately to as then it is easier to separate the energy and virial
54 #if defined CHECK_EXCLS && (defined CALC_COULOMB || defined LJ_EWALD_GEOM)
59 int cj, ajx, ajy, ajz;
63 /* Energy group indices for two atoms packed into one int */
64 int egp_jj[UNROLLJ/2];
68 /* Interaction (non-exclusion) mask of all 1's or 0's */
75 SimdReal jx_S, jy_S, jz_S;
76 SimdReal dx_S0, dy_S0, dz_S0;
77 SimdReal dx_S1, dy_S1, dz_S1;
78 SimdReal dx_S2, dy_S2, dz_S2;
79 SimdReal dx_S3, dy_S3, dz_S3;
80 SimdReal tx_S0, ty_S0, tz_S0;
81 SimdReal tx_S1, ty_S1, tz_S1;
82 SimdReal tx_S2, ty_S2, tz_S2;
83 SimdReal tx_S3, ty_S3, tz_S3;
84 SimdReal rsq_S0, rinv_S0, rinvsq_S0;
85 SimdReal rsq_S1, rinv_S1, rinvsq_S1;
86 SimdReal rsq_S2, rinv_S2, rinvsq_S2;
87 SimdReal rsq_S3, rinv_S3, rinvsq_S3;
89 /* wco: within cut-off, mask of all 1's or 0's */
95 #ifdef VDW_CUTOFF_CHECK
104 #if (defined CALC_COULOMB && defined CALC_COUL_TAB) || defined LJ_FORCE_SWITCH || defined LJ_POT_SWITCH
107 #if (defined CALC_COULOMB && defined CALC_COUL_TAB) || !defined HALF_LJ
113 #if defined LJ_FORCE_SWITCH || defined LJ_POT_SWITCH
114 SimdReal rsw_S0, rsw2_S0;
115 SimdReal rsw_S1, rsw2_S1;
117 SimdReal rsw_S2, rsw2_S2;
118 SimdReal rsw_S3, rsw2_S3;
124 /* 1/r masked with the interaction mask */
136 /* The force (PME mesh force) we need to subtract from 1/r^2 */
142 #ifdef CALC_COUL_EWALD
143 SimdReal brsq_S0, brsq_S1, brsq_S2, brsq_S3;
144 SimdReal ewcorr_S0, ewcorr_S1, ewcorr_S2, ewcorr_S3;
147 /* frcoul = (1/r - fsub)*r */
153 /* For tables: r, rs=r/sp, rf=floor(rs), frac=rs-rf */
154 SimdReal rs_S0, rf_S0, frac_S0;
155 SimdReal rs_S1, rf_S1, frac_S1;
156 SimdReal rs_S2, rf_S2, frac_S2;
157 SimdReal rs_S3, rf_S3, frac_S3;
158 /* Table index: rs truncated to an int */
159 SimdInt32 ti_S0, ti_S1, ti_S2, ti_S3;
160 /* Linear force table values */
161 SimdReal ctab0_S0, ctab1_S0;
162 SimdReal ctab0_S1, ctab1_S1;
163 SimdReal ctab0_S2, ctab1_S2;
164 SimdReal ctab0_S3, ctab1_S3;
166 /* Quadratic energy table value */
167 SimdReal ctabv_S0, dum_S0;
168 SimdReal ctabv_S1, dum_S1;
169 SimdReal ctabv_S2, dum_S2;
170 SimdReal ctabv_S3, dum_S3;
173 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
174 /* The potential (PME mesh) we need to subtract from 1/r */
181 /* Electrostatic potential */
188 /* The force times 1/r */
196 /* LJ sigma_j/2 and sqrt(epsilon_j) */
197 SimdReal hsig_j_S, seps_j_S;
198 /* LJ sigma_ij and epsilon_ij */
199 SimdReal sig_S0, eps_S0;
200 SimdReal sig_S1, eps_S1;
202 SimdReal sig_S2, eps_S2;
203 SimdReal sig_S3, eps_S3;
206 SimdReal sig2_S0, sig6_S0;
207 SimdReal sig2_S1, sig6_S1;
209 SimdReal sig2_S2, sig6_S2;
210 SimdReal sig2_S3, sig6_S3;
212 #endif /* LJ_COMB_LB */
216 SimdReal c6s_j_S, c12s_j_S;
219 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB || defined LJ_EWALD_GEOM
220 /* Index for loading LJ parameters, complicated when interleaving */
224 /* Intermediate variables for LJ calculation */
234 SimdReal sir_S0, sir2_S0, sir6_S0;
235 SimdReal sir_S1, sir2_S1, sir6_S1;
237 SimdReal sir_S2, sir2_S2, sir6_S2;
238 SimdReal sir_S3, sir2_S3, sir6_S3;
242 SimdReal FrLJ6_S0, FrLJ12_S0, frLJ_S0;
243 SimdReal FrLJ6_S1, FrLJ12_S1, frLJ_S1;
245 SimdReal FrLJ6_S2, FrLJ12_S2, frLJ_S2;
246 SimdReal FrLJ6_S3, FrLJ12_S3, frLJ_S3;
250 /* j-cluster index */
253 /* Atom indices (of the first atom in the cluster) */
255 #if defined CALC_LJ && (defined LJ_COMB_GEOM || defined LJ_COMB_LB || defined LJ_EWALD_GEOM)
256 #if UNROLLJ == STRIDE
259 aj2 = (cj>>1)*2*STRIDE + (cj & 1)*UNROLLJ;
262 #if UNROLLJ == STRIDE
265 ajx = (cj>>1)*DIM*STRIDE + (cj & 1)*UNROLLJ;
271 gmx_load_simd_4xn_interactions(static_cast<int>(l_cj[cjind].excl),
272 filter_S0, filter_S1,
273 filter_S2, filter_S3,
274 nbat->simdMasks.interaction_array.data(),
275 &interact_S0, &interact_S1,
276 &interact_S2, &interact_S3);
277 #endif /* CHECK_EXCLS */
279 /* load j atom coordinates */
280 jx_S = load<SimdReal>(x+ajx);
281 jy_S = load<SimdReal>(x+ajy);
282 jz_S = load<SimdReal>(x+ajz);
284 /* Calculate distance */
285 dx_S0 = ix_S0 - jx_S;
286 dy_S0 = iy_S0 - jy_S;
287 dz_S0 = iz_S0 - jz_S;
288 dx_S1 = ix_S1 - jx_S;
289 dy_S1 = iy_S1 - jy_S;
290 dz_S1 = iz_S1 - jz_S;
291 dx_S2 = ix_S2 - jx_S;
292 dy_S2 = iy_S2 - jy_S;
293 dz_S2 = iz_S2 - jz_S;
294 dx_S3 = ix_S3 - jx_S;
295 dy_S3 = iy_S3 - jy_S;
296 dz_S3 = iz_S3 - jz_S;
298 /* rsq = dx*dx+dy*dy+dz*dz */
299 rsq_S0 = norm2(dx_S0, dy_S0, dz_S0);
300 rsq_S1 = norm2(dx_S1, dy_S1, dz_S1);
301 rsq_S2 = norm2(dx_S2, dy_S2, dz_S2);
302 rsq_S3 = norm2(dx_S3, dy_S3, dz_S3);
304 /* Do the cut-off check */
305 wco_S0 = (rsq_S0 < rc2_S);
306 wco_S1 = (rsq_S1 < rc2_S);
307 wco_S2 = (rsq_S2 < rc2_S);
308 wco_S3 = (rsq_S3 < rc2_S);
312 /* Only remove the (sub-)diagonal to avoid double counting */
313 #if UNROLLJ == UNROLLI
316 wco_S0 = wco_S0 && diagonal_mask_S0;
317 wco_S1 = wco_S1 && diagonal_mask_S1;
318 wco_S2 = wco_S2 && diagonal_mask_S2;
319 wco_S3 = wco_S3 && diagonal_mask_S3;
322 #if UNROLLJ < UNROLLI
325 wco_S0 = wco_S0 && diagonal_mask0_S0;
326 wco_S1 = wco_S1 && diagonal_mask0_S1;
327 wco_S2 = wco_S2 && diagonal_mask0_S2;
328 wco_S3 = wco_S3 && diagonal_mask0_S3;
330 if (cj == ci_sh*2 + 1)
332 wco_S0 = wco_S0 && diagonal_mask1_S0;
333 wco_S1 = wco_S1 && diagonal_mask1_S1;
334 wco_S2 = wco_S2 && diagonal_mask1_S2;
335 wco_S3 = wco_S3 && diagonal_mask1_S3;
340 wco_S0 = wco_S0 && diagonal_mask0_S0;
341 wco_S1 = wco_S1 && diagonal_mask0_S1;
342 wco_S2 = wco_S2 && diagonal_mask0_S2;
343 wco_S3 = wco_S3 && diagonal_mask0_S3;
345 else if (cj*2 + 1 == ci_sh)
347 wco_S0 = wco_S0 && diagonal_mask1_S0;
348 wco_S1 = wco_S1 && diagonal_mask1_S1;
349 wco_S2 = wco_S2 && diagonal_mask1_S2;
350 wco_S3 = wco_S3 && diagonal_mask1_S3;
354 #else /* EXCL_FORCES */
355 /* No exclusion forces: remove all excluded atom pairs from the list */
356 wco_S0 = wco_S0 && interact_S0;
357 wco_S1 = wco_S1 && interact_S1;
358 wco_S2 = wco_S2 && interact_S2;
359 wco_S3 = wco_S3 && interact_S3;
366 alignas(GMX_SIMD_ALIGNMENT) real tmp[2*GMX_SIMD_REAL_WIDTH];
368 for (i = 0; i < UNROLLI; i++)
370 store(tmp, rc2_S - (i == 0 ? rsq_S0 : (i == 1 ? rsq_S1 : (i == 2 ? rsq_S2 : rsq_S3))));
371 for (j = 0; j < UNROLLJ; j++)
382 // Ensure the distances do not fall below the limit where r^-12 overflows.
383 // This should never happen for normal interactions.
384 rsq_S0 = max(rsq_S0, minRsq_S);
385 rsq_S1 = max(rsq_S1, minRsq_S);
386 rsq_S2 = max(rsq_S2, minRsq_S);
387 rsq_S3 = max(rsq_S3, minRsq_S);
391 rinv_S0 = invsqrt(rsq_S0);
392 rinv_S1 = invsqrt(rsq_S1);
393 rinv_S2 = invsqrt(rsq_S2);
394 rinv_S3 = invsqrt(rsq_S3);
396 invsqrtPair(rsq_S0, rsq_S1, &rinv_S0, &rinv_S1);
397 invsqrtPair(rsq_S2, rsq_S3, &rinv_S2, &rinv_S3);
401 /* Load parameters for j atom */
402 jq_S = load<SimdReal>(q+aj);
403 qq_S0 = iq_S0 * jq_S;
404 qq_S1 = iq_S1 * jq_S;
405 qq_S2 = iq_S2 * jq_S;
406 qq_S3 = iq_S3 * jq_S;
410 #if !defined LJ_COMB_GEOM && !defined LJ_COMB_LB && !defined FIX_LJ_C
411 SimdReal c6_S0, c6_S1, c12_S0, c12_S1;
412 gatherLoadTranspose<c_simdBestPairAlignment>(nbfp0, type+aj, &c6_S0, &c12_S0);
413 gatherLoadTranspose<c_simdBestPairAlignment>(nbfp1, type+aj, &c6_S1, &c12_S1);
415 SimdReal c6_S2, c6_S3, c12_S2, c12_S3;
416 gatherLoadTranspose<c_simdBestPairAlignment>(nbfp2, type+aj, &c6_S2, &c12_S2);
417 gatherLoadTranspose<c_simdBestPairAlignment>(nbfp3, type+aj, &c6_S3, &c12_S3);
419 #endif /* not defined any LJ rule */
422 c6s_j_S = load<SimdReal>(ljc+aj2+0);
423 c12s_j_S = load<SimdReal>(ljc+aj2+STRIDE);
424 SimdReal c6_S0 = c6s_S0 * c6s_j_S;
425 SimdReal c6_S1 = c6s_S1 * c6s_j_S;
427 SimdReal c6_S2 = c6s_S2 * c6s_j_S;
428 SimdReal c6_S3 = c6s_S3 * c6s_j_S;
430 SimdReal c12_S0 = c12s_S0 * c12s_j_S;
431 SimdReal c12_S1 = c12s_S1 * c12s_j_S;
433 SimdReal c12_S2 = c12s_S2 * c12s_j_S;
434 SimdReal c12_S3 = c12s_S3 * c12s_j_S;
436 #endif /* LJ_COMB_GEOM */
439 hsig_j_S = load<SimdReal>(ljc+aj2+0);
440 seps_j_S = load<SimdReal>(ljc+aj2+STRIDE);
442 sig_S0 = hsig_i_S0 + hsig_j_S;
443 sig_S1 = hsig_i_S1 + hsig_j_S;
444 eps_S0 = seps_i_S0 * seps_j_S;
445 eps_S1 = seps_i_S1 * seps_j_S;
447 sig_S2 = hsig_i_S2 + hsig_j_S;
448 sig_S3 = hsig_i_S3 + hsig_j_S;
449 eps_S2 = seps_i_S2 * seps_j_S;
450 eps_S3 = seps_i_S3 * seps_j_S;
452 #endif /* LJ_COMB_LB */
456 /* Set rinv to zero for r beyond the cut-off */
457 rinv_S0 = selectByMask(rinv_S0, wco_S0);
458 rinv_S1 = selectByMask(rinv_S1, wco_S1);
459 rinv_S2 = selectByMask(rinv_S2, wco_S2);
460 rinv_S3 = selectByMask(rinv_S3, wco_S3);
462 rinvsq_S0 = rinv_S0 * rinv_S0;
463 rinvsq_S1 = rinv_S1 * rinv_S1;
464 rinvsq_S2 = rinv_S2 * rinv_S2;
465 rinvsq_S3 = rinv_S3 * rinv_S3;
468 /* Note that here we calculate force*r, not the usual force/r.
469 * This allows avoiding masking the reaction-field contribution,
470 * as frcoul is later multiplied by rinvsq which has been
471 * masked with the cut-off check.
475 /* Only add 1/r for non-excluded atom pairs */
476 rinv_ex_S0 = selectByMask(rinv_S0, interact_S0);
477 rinv_ex_S1 = selectByMask(rinv_S1, interact_S1);
478 rinv_ex_S2 = selectByMask(rinv_S2, interact_S2);
479 rinv_ex_S3 = selectByMask(rinv_S3, interact_S3);
481 /* No exclusion forces, we always need 1/r */
482 #define rinv_ex_S0 rinv_S0
483 #define rinv_ex_S1 rinv_S1
484 #define rinv_ex_S2 rinv_S2
485 #define rinv_ex_S3 rinv_S3
489 /* Electrostatic interactions */
490 frcoul_S0 = qq_S0 * fma(rsq_S0, mrc_3_S, rinv_ex_S0);
491 frcoul_S1 = qq_S1 * fma(rsq_S1, mrc_3_S, rinv_ex_S1);
492 frcoul_S2 = qq_S2 * fma(rsq_S2, mrc_3_S, rinv_ex_S2);
493 frcoul_S3 = qq_S3 * fma(rsq_S3, mrc_3_S, rinv_ex_S3);
496 vcoul_S0 = qq_S0 * (rinv_ex_S0 + fma(rsq_S0, hrc_3_S, moh_rc_S));
497 vcoul_S1 = qq_S1 * (rinv_ex_S1 + fma(rsq_S1, hrc_3_S, moh_rc_S));
498 vcoul_S2 = qq_S2 * (rinv_ex_S2 + fma(rsq_S2, hrc_3_S, moh_rc_S));
499 vcoul_S3 = qq_S3 * (rinv_ex_S3 + fma(rsq_S3, hrc_3_S, moh_rc_S));
503 #ifdef CALC_COUL_EWALD
504 /* We need to mask (or limit) rsq for the cut-off,
505 * as large distances can cause an overflow in gmx_pmecorrF/V.
507 brsq_S0 = beta2_S * selectByMask(rsq_S0, wco_S0);
508 brsq_S1 = beta2_S * selectByMask(rsq_S1, wco_S1);
509 brsq_S2 = beta2_S * selectByMask(rsq_S2, wco_S2);
510 brsq_S3 = beta2_S * selectByMask(rsq_S3, wco_S3);
511 ewcorr_S0 = beta_S * pmeForceCorrection(brsq_S0);
512 ewcorr_S1 = beta_S * pmeForceCorrection(brsq_S1);
513 ewcorr_S2 = beta_S * pmeForceCorrection(brsq_S2);
514 ewcorr_S3 = beta_S * pmeForceCorrection(brsq_S3);
515 frcoul_S0 = qq_S0 * fma(ewcorr_S0, brsq_S0, rinv_ex_S0);
516 frcoul_S1 = qq_S1 * fma(ewcorr_S1, brsq_S1, rinv_ex_S1);
517 frcoul_S2 = qq_S2 * fma(ewcorr_S2, brsq_S2, rinv_ex_S2);
518 frcoul_S3 = qq_S3 * fma(ewcorr_S3, brsq_S3, rinv_ex_S3);
521 vc_sub_S0 = beta_S * pmePotentialCorrection(brsq_S0);
522 vc_sub_S1 = beta_S * pmePotentialCorrection(brsq_S1);
523 vc_sub_S2 = beta_S * pmePotentialCorrection(brsq_S2);
524 vc_sub_S3 = beta_S * pmePotentialCorrection(brsq_S3);
527 #endif /* CALC_COUL_EWALD */
530 /* Electrostatic interactions */
531 r_S0 = rsq_S0 * rinv_S0;
532 r_S1 = rsq_S1 * rinv_S1;
533 r_S2 = rsq_S2 * rinv_S2;
534 r_S3 = rsq_S3 * rinv_S3;
535 /* Convert r to scaled table units */
536 rs_S0 = r_S0 * invtsp_S;
537 rs_S1 = r_S1 * invtsp_S;
538 rs_S2 = r_S2 * invtsp_S;
539 rs_S3 = r_S3 * invtsp_S;
540 /* Truncate scaled r to an int */
541 ti_S0 = cvttR2I(rs_S0);
542 ti_S1 = cvttR2I(rs_S1);
543 ti_S2 = cvttR2I(rs_S2);
544 ti_S3 = cvttR2I(rs_S3);
546 rf_S0 = trunc(rs_S0);
547 rf_S1 = trunc(rs_S1);
548 rf_S2 = trunc(rs_S2);
549 rf_S3 = trunc(rs_S3);
551 frac_S0 = rs_S0 - rf_S0;
552 frac_S1 = rs_S1 - rf_S1;
553 frac_S2 = rs_S2 - rf_S2;
554 frac_S3 = rs_S3 - rf_S3;
556 /* Load and interpolate table forces and possibly energies.
557 * Force and energy can be combined in one table, stride 4: FDV0
558 * or in two separate tables with stride 1: F and V
559 * Currently single precision uses FDV0, double F and V.
561 #ifndef CALC_ENERGIES
563 gatherLoadBySimdIntTranspose<4>(tab_coul_F, ti_S0, &ctab0_S0, &ctab1_S0);
564 gatherLoadBySimdIntTranspose<4>(tab_coul_F, ti_S1, &ctab0_S1, &ctab1_S1);
565 gatherLoadBySimdIntTranspose<4>(tab_coul_F, ti_S2, &ctab0_S2, &ctab1_S2);
566 gatherLoadBySimdIntTranspose<4>(tab_coul_F, ti_S3, &ctab0_S3, &ctab1_S3);
568 gatherLoadUBySimdIntTranspose<1>(tab_coul_F, ti_S0, &ctab0_S0, &ctab1_S0);
569 gatherLoadUBySimdIntTranspose<1>(tab_coul_F, ti_S1, &ctab0_S1, &ctab1_S1);
570 gatherLoadUBySimdIntTranspose<1>(tab_coul_F, ti_S2, &ctab0_S2, &ctab1_S2);
571 gatherLoadUBySimdIntTranspose<1>(tab_coul_F, ti_S3, &ctab0_S3, &ctab1_S3);
572 ctab1_S0 = ctab1_S0 - ctab0_S0;
573 ctab1_S1 = ctab1_S1 - ctab0_S1;
574 ctab1_S2 = ctab1_S2 - ctab0_S2;
575 ctab1_S3 = ctab1_S3 - ctab0_S3;
579 gatherLoadBySimdIntTranspose<4>(tab_coul_F, ti_S0, &ctab0_S0, &ctab1_S0, &ctabv_S0, &dum_S0);
580 gatherLoadBySimdIntTranspose<4>(tab_coul_F, ti_S1, &ctab0_S1, &ctab1_S1, &ctabv_S1, &dum_S1);
581 gatherLoadBySimdIntTranspose<4>(tab_coul_F, ti_S2, &ctab0_S2, &ctab1_S2, &ctabv_S2, &dum_S2);
582 gatherLoadBySimdIntTranspose<4>(tab_coul_F, ti_S3, &ctab0_S3, &ctab1_S3, &ctabv_S3, &dum_S3);
584 gatherLoadUBySimdIntTranspose<1>(tab_coul_F, ti_S0, &ctab0_S0, &ctab1_S0);
585 gatherLoadUBySimdIntTranspose<1>(tab_coul_V, ti_S0, &ctabv_S0, &dum_S0);
586 gatherLoadUBySimdIntTranspose<1>(tab_coul_F, ti_S1, &ctab0_S1, &ctab1_S1);
587 gatherLoadUBySimdIntTranspose<1>(tab_coul_V, ti_S1, &ctabv_S1, &dum_S1);
588 gatherLoadUBySimdIntTranspose<1>(tab_coul_F, ti_S2, &ctab0_S2, &ctab1_S2);
589 gatherLoadUBySimdIntTranspose<1>(tab_coul_V, ti_S2, &ctabv_S2, &dum_S2);
590 gatherLoadUBySimdIntTranspose<1>(tab_coul_F, ti_S3, &ctab0_S3, &ctab1_S3);
591 gatherLoadUBySimdIntTranspose<1>(tab_coul_V, ti_S3, &ctabv_S3, &dum_S3);
592 ctab1_S0 = ctab1_S0 - ctab0_S0;
593 ctab1_S1 = ctab1_S1 - ctab0_S1;
594 ctab1_S2 = ctab1_S2 - ctab0_S2;
595 ctab1_S3 = ctab1_S3 - ctab0_S3;
598 fsub_S0 = fma(frac_S0, ctab1_S0, ctab0_S0);
599 fsub_S1 = fma(frac_S1, ctab1_S1, ctab0_S1);
600 fsub_S2 = fma(frac_S2, ctab1_S2, ctab0_S2);
601 fsub_S3 = fma(frac_S3, ctab1_S3, ctab0_S3);
602 frcoul_S0 = qq_S0 * fnma(fsub_S0, r_S0, rinv_ex_S0);
603 frcoul_S1 = qq_S1 * fnma(fsub_S1, r_S1, rinv_ex_S1);
604 frcoul_S2 = qq_S2 * fnma(fsub_S2, r_S2, rinv_ex_S2);
605 frcoul_S3 = qq_S3 * fnma(fsub_S3, r_S3, rinv_ex_S3);
608 vc_sub_S0 = fma((mhalfsp_S * frac_S0), (ctab0_S0 + fsub_S0), ctabv_S0);
609 vc_sub_S1 = fma((mhalfsp_S * frac_S1), (ctab0_S1 + fsub_S1), ctabv_S1);
610 vc_sub_S2 = fma((mhalfsp_S * frac_S2), (ctab0_S2 + fsub_S2), ctabv_S2);
611 vc_sub_S3 = fma((mhalfsp_S * frac_S3), (ctab0_S3 + fsub_S3), ctabv_S3);
613 #endif /* CALC_COUL_TAB */
615 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
616 #ifndef NO_SHIFT_EWALD
617 /* Add Ewald potential shift to vc_sub for convenience */
619 vc_sub_S0 = vc_sub_S0 + selectByMask(sh_ewald_S, interact_S0);
620 vc_sub_S1 = vc_sub_S1 + selectByMask(sh_ewald_S, interact_S1);
621 vc_sub_S2 = vc_sub_S2 + selectByMask(sh_ewald_S, interact_S2);
622 vc_sub_S3 = vc_sub_S3 + selectByMask(sh_ewald_S, interact_S3);
624 vc_sub_S0 = vc_sub_S0 + sh_ewald_S;
625 vc_sub_S1 = vc_sub_S1 + sh_ewald_S;
626 vc_sub_S2 = vc_sub_S2 + sh_ewald_S;
627 vc_sub_S3 = vc_sub_S3 + sh_ewald_S;
631 vcoul_S0 = qq_S0 * (rinv_ex_S0 - vc_sub_S0);
632 vcoul_S1 = qq_S1 * (rinv_ex_S1 - vc_sub_S1);
633 vcoul_S2 = qq_S2 * (rinv_ex_S2 - vc_sub_S2);
634 vcoul_S3 = qq_S3 * (rinv_ex_S3 - vc_sub_S3);
639 /* Mask energy for cut-off and diagonal */
640 vcoul_S0 = selectByMask(vcoul_S0, wco_S0);
641 vcoul_S1 = selectByMask(vcoul_S1, wco_S1);
642 vcoul_S2 = selectByMask(vcoul_S2, wco_S2);
643 vcoul_S3 = selectByMask(vcoul_S3, wco_S3);
646 #endif /* CALC_COULOMB */
649 /* Lennard-Jones interaction */
651 #ifdef VDW_CUTOFF_CHECK
652 wco_vdw_S0 = (rsq_S0 < rcvdw2_S);
653 wco_vdw_S1 = (rsq_S1 < rcvdw2_S);
655 wco_vdw_S2 = (rsq_S2 < rcvdw2_S);
656 wco_vdw_S3 = (rsq_S3 < rcvdw2_S);
659 /* Same cut-off for Coulomb and VdW, reuse the registers */
660 #define wco_vdw_S0 wco_S0
661 #define wco_vdw_S1 wco_S1
662 #define wco_vdw_S2 wco_S2
663 #define wco_vdw_S3 wco_S3
667 rinvsix_S0 = rinvsq_S0 * rinvsq_S0 * rinvsq_S0;
668 rinvsix_S1 = rinvsq_S1 * rinvsq_S1 * rinvsq_S1;
670 rinvsix_S0 = selectByMask(rinvsix_S0, interact_S0);
671 rinvsix_S1 = selectByMask(rinvsix_S1, interact_S1);
674 rinvsix_S2 = rinvsq_S2 * rinvsq_S2 * rinvsq_S2;
675 rinvsix_S3 = rinvsq_S3 * rinvsq_S3 * rinvsq_S3;
677 rinvsix_S2 = selectByMask(rinvsix_S2, interact_S2);
678 rinvsix_S3 = selectByMask(rinvsix_S3, interact_S3);
682 #if defined LJ_CUT || defined LJ_POT_SWITCH
683 /* We have plain LJ or LJ-PME with simple C6/6 C12/12 coefficients */
684 FrLJ6_S0 = c6_S0 * rinvsix_S0;
685 FrLJ6_S1 = c6_S1 * rinvsix_S1;
687 FrLJ6_S2 = c6_S2 * rinvsix_S2;
688 FrLJ6_S3 = c6_S3 * rinvsix_S3;
690 FrLJ12_S0 = c12_S0 * rinvsix_S0 * rinvsix_S0;
691 FrLJ12_S1 = c12_S1 * rinvsix_S1 * rinvsix_S1;
693 FrLJ12_S2 = c12_S2 * rinvsix_S2 * rinvsix_S2;
694 FrLJ12_S3 = c12_S3 * rinvsix_S3 * rinvsix_S3;
698 #if defined LJ_FORCE_SWITCH || defined LJ_POT_SWITCH
699 /* We switch the LJ force */
700 r_S0 = rsq_S0 * rinv_S0;
701 rsw_S0 = max(r_S0 - rswitch_S, zero_S);
702 rsw2_S0 = rsw_S0 * rsw_S0;
703 r_S1 = rsq_S1 * rinv_S1;
704 rsw_S1 = max(r_S1 - rswitch_S, zero_S);
705 rsw2_S1 = rsw_S1 * rsw_S1;
707 r_S2 = rsq_S2 * rinv_S2;
708 rsw_S2 = max(r_S2 - rswitch_S, zero_S);
709 rsw2_S2 = rsw_S2 * rsw_S2;
710 r_S3 = rsq_S3 * rinv_S3;
711 rsw_S3 = max(r_S3 - rswitch_S, zero_S);
712 rsw2_S3 = rsw_S3 * rsw_S3;
716 #ifdef LJ_FORCE_SWITCH
718 #define gmx_add_fr_switch(fr, rsw, rsw2_r, c2, c3) fma(fma(c3, rsw, c2), rsw2_r, (fr))
719 SimdReal rsw2_r_S0 = rsw2_S0 * r_S0;
720 SimdReal rsw2_r_S1 = rsw2_S1 * r_S1;
721 FrLJ6_S0 = c6_S0 * gmx_add_fr_switch(rinvsix_S0, rsw_S0, rsw2_r_S0, p6_fc2_S, p6_fc3_S);
722 FrLJ6_S1 = c6_S1 * gmx_add_fr_switch(rinvsix_S1, rsw_S1, rsw2_r_S1, p6_fc2_S, p6_fc3_S);
724 SimdReal rsw2_r_S2 = rsw2_S2 * r_S2;
725 SimdReal rsw2_r_S3 = rsw2_S3 * r_S3;
726 FrLJ6_S2 = c6_S2 * gmx_add_fr_switch(rinvsix_S2, rsw_S2, rsw2_r_S2, p6_fc2_S, p6_fc3_S);
727 FrLJ6_S3 = c6_S3 * gmx_add_fr_switch(rinvsix_S3, rsw_S3, rsw2_r_S3, p6_fc2_S, p6_fc3_S);
729 FrLJ12_S0 = c12_S0 * gmx_add_fr_switch((rinvsix_S0 * rinvsix_S0), rsw_S0, rsw2_r_S0, p12_fc2_S, p12_fc3_S);
730 FrLJ12_S1 = c12_S1 * gmx_add_fr_switch((rinvsix_S1 * rinvsix_S1), rsw_S1, rsw2_r_S1, p12_fc2_S, p12_fc3_S);
732 FrLJ12_S2 = c12_S2 * gmx_add_fr_switch((rinvsix_S2 * rinvsix_S2), rsw_S2, rsw2_r_S2, p12_fc2_S, p12_fc3_S);
733 FrLJ12_S3 = c12_S3 * gmx_add_fr_switch((rinvsix_S3 * rinvsix_S3), rsw_S3, rsw2_r_S3, p12_fc2_S, p12_fc3_S);
735 #undef gmx_add_fr_switch
736 #endif /* LJ_FORCE_SWITCH */
738 #endif /* not LJ_COMB_LB */
741 sir_S0 = sig_S0 * rinv_S0;
742 sir_S1 = sig_S1 * rinv_S1;
744 sir_S2 = sig_S2 * rinv_S2;
745 sir_S3 = sig_S3 * rinv_S3;
747 sir2_S0 = sir_S0 * sir_S0;
748 sir2_S1 = sir_S1 * sir_S1;
750 sir2_S2 = sir_S2 * sir_S2;
751 sir2_S3 = sir_S3 * sir_S3;
753 sir6_S0 = sir2_S0 * sir2_S0 * sir2_S0;
754 sir6_S1 = sir2_S1 * sir2_S1 * sir2_S1;
756 sir6_S0 = selectByMask(sir6_S0, interact_S0);
757 sir6_S1 = selectByMask(sir6_S1, interact_S1);
760 sir6_S2 = sir2_S2 * sir2_S2 * sir2_S2;
761 sir6_S3 = sir2_S3 * sir2_S3 * sir2_S3;
763 sir6_S2 = selectByMask(sir6_S2, interact_S2);
764 sir6_S3 = selectByMask(sir6_S3, interact_S3);
767 #ifdef VDW_CUTOFF_CHECK
768 sir6_S0 = selectByMask(sir6_S0, wco_vdw_S0);
769 sir6_S1 = selectByMask(sir6_S1, wco_vdw_S1);
771 sir6_S2 = selectByMask(sir6_S2, wco_vdw_S2);
772 sir6_S3 = selectByMask(sir6_S3, wco_vdw_S3);
775 FrLJ6_S0 = eps_S0 * sir6_S0;
776 FrLJ6_S1 = eps_S1 * sir6_S1;
778 FrLJ6_S2 = eps_S2 * sir6_S2;
779 FrLJ6_S3 = eps_S3 * sir6_S3;
781 FrLJ12_S0 = FrLJ6_S0 * sir6_S0;
782 FrLJ12_S1 = FrLJ6_S1 * sir6_S1;
784 FrLJ12_S2 = FrLJ6_S2 * sir6_S2;
785 FrLJ12_S3 = FrLJ6_S3 * sir6_S3;
787 #if defined CALC_ENERGIES
788 /* We need C6 and C12 to calculate the LJ potential shift */
789 sig2_S0 = sig_S0 * sig_S0;
790 sig2_S1 = sig_S1 * sig_S1;
792 sig2_S2 = sig_S2 * sig_S2;
793 sig2_S3 = sig_S3 * sig_S3;
795 sig6_S0 = sig2_S0 * sig2_S0 * sig2_S0;
796 sig6_S1 = sig2_S1 * sig2_S1 * sig2_S1;
798 sig6_S2 = sig2_S2 * sig2_S2 * sig2_S2;
799 sig6_S3 = sig2_S3 * sig2_S3 * sig2_S3;
801 SimdReal c6_S0 = eps_S0 * sig6_S0;
802 SimdReal c6_S1 = eps_S1 * sig6_S1;
804 SimdReal c6_S2 = eps_S2 * sig6_S2;
805 SimdReal c6_S3 = eps_S3 * sig6_S3;
807 SimdReal c12_S0 = c6_S0 * sig6_S0;
808 SimdReal c12_S1 = c6_S1 * sig6_S1;
810 SimdReal c12_S2 = c6_S2 * sig6_S2;
811 SimdReal c12_S3 = c6_S3 * sig6_S3;
814 #endif /* LJ_COMB_LB */
816 /* Determine the total scalar LJ force*r */
817 frLJ_S0 = FrLJ12_S0 - FrLJ6_S0;
818 frLJ_S1 = FrLJ12_S1 - FrLJ6_S1;
820 frLJ_S2 = FrLJ12_S2 - FrLJ6_S2;
821 frLJ_S3 = FrLJ12_S3 - FrLJ6_S3;
824 #if (defined LJ_CUT || defined LJ_FORCE_SWITCH) && defined CALC_ENERGIES
827 /* Calculate the LJ energies, with constant potential shift */
828 SimdReal VLJ6_S0 = sixth_S * fma(c6_S0, p6_cpot_S, FrLJ6_S0);
829 SimdReal VLJ6_S1 = sixth_S * fma(c6_S1, p6_cpot_S, FrLJ6_S1);
831 SimdReal VLJ6_S2 = sixth_S * fma(c6_S2, p6_cpot_S, FrLJ6_S2);
832 SimdReal VLJ6_S3 = sixth_S * fma(c6_S3, p6_cpot_S, FrLJ6_S3);
834 SimdReal VLJ12_S0 = twelveth_S * fma(c12_S0, p12_cpot_S, FrLJ12_S0);
835 SimdReal VLJ12_S1 = twelveth_S * fma(c12_S1, p12_cpot_S, FrLJ12_S1);
837 SimdReal VLJ12_S2 = twelveth_S * fma(c12_S2, p12_cpot_S, FrLJ12_S2);
838 SimdReal VLJ12_S3 = twelveth_S * fma(c12_S3, p12_cpot_S, FrLJ12_S3);
842 #ifdef LJ_FORCE_SWITCH
843 #define v_fswitch_r(rsw, rsw2, c0, c3, c4) fma(fma((c4), (rsw), (c3)), ((rsw2) * (rsw)), (c0))
845 SimdReal VLJ6_S0 = c6_S0 * fma(sixth_S, rinvsix_S0, v_fswitch_r(rsw_S0, rsw2_S0, p6_6cpot_S, p6_vc3_S, p6_vc4_S));
846 SimdReal VLJ6_S1 = c6_S1 * fma(sixth_S, rinvsix_S1, v_fswitch_r(rsw_S1, rsw2_S1, p6_6cpot_S, p6_vc3_S, p6_vc4_S));
848 SimdReal VLJ6_S2 = c6_S2 * fma(sixth_S, rinvsix_S2, v_fswitch_r(rsw_S2, rsw2_S2, p6_6cpot_S, p6_vc3_S, p6_vc4_S));
849 SimdReal VLJ6_S3 = c6_S3 * fma(sixth_S, rinvsix_S3, v_fswitch_r(rsw_S3, rsw2_S3, p6_6cpot_S, p6_vc3_S, p6_vc4_S));
851 SimdReal VLJ12_S0 = c12_S0 * fma(twelveth_S, rinvsix_S0 * rinvsix_S0, v_fswitch_r(rsw_S0, rsw2_S0, p12_12cpot_S, p12_vc3_S, p12_vc4_S));
852 SimdReal VLJ12_S1 = c12_S1 * fma(twelveth_S, rinvsix_S1 * rinvsix_S1, v_fswitch_r(rsw_S1, rsw2_S1, p12_12cpot_S, p12_vc3_S, p12_vc4_S));
854 SimdReal VLJ12_S2 = c12_S2 * fma(twelveth_S, rinvsix_S2 * rinvsix_S2, v_fswitch_r(rsw_S2, rsw2_S2, p12_12cpot_S, p12_vc3_S, p12_vc4_S));
855 SimdReal VLJ12_S3 = c12_S3 * fma(twelveth_S, rinvsix_S3 * rinvsix_S3, v_fswitch_r(rsw_S3, rsw2_S3, p12_12cpot_S, p12_vc3_S, p12_vc4_S));
858 #endif /* LJ_FORCE_SWITCH */
860 /* Add up the repulsion and dispersion */
861 SimdReal VLJ_S0 = VLJ12_S0 - VLJ6_S0;
862 SimdReal VLJ_S1 = VLJ12_S1 - VLJ6_S1;
864 SimdReal VLJ_S2 = VLJ12_S2 - VLJ6_S2;
865 SimdReal VLJ_S3 = VLJ12_S3 - VLJ6_S3;
868 #endif /* (LJ_CUT || LJ_FORCE_SWITCH) && CALC_ENERGIES */
871 /* We always need the potential, since it is needed for the force */
872 SimdReal VLJ_S0 = fnma(sixth_S, FrLJ6_S0, twelveth_S * FrLJ12_S0);
873 SimdReal VLJ_S1 = fnma(sixth_S, FrLJ6_S1, twelveth_S * FrLJ12_S1);
875 SimdReal VLJ_S2 = fnma(sixth_S, FrLJ6_S2, twelveth_S * FrLJ12_S2);
876 SimdReal VLJ_S3 = fnma(sixth_S, FrLJ6_S3, twelveth_S * FrLJ12_S3);
880 SimdReal sw_S0, dsw_S0;
881 SimdReal sw_S1, dsw_S1;
883 SimdReal sw_S2, dsw_S2;
884 SimdReal sw_S3, dsw_S3;
887 #define switch_r(rsw, rsw2, c3, c4, c5) fma(fma(fma(c5, rsw, c4), rsw, c3), ((rsw2) * (rsw)), one_S)
888 #define dswitch_r(rsw, rsw2, c2, c3, c4) (fma(fma(c4, rsw, c3), rsw, c2) * (rsw2))
890 sw_S0 = switch_r(rsw_S0, rsw2_S0, swV3_S, swV4_S, swV5_S);
891 dsw_S0 = dswitch_r(rsw_S0, rsw2_S0, swF2_S, swF3_S, swF4_S);
892 sw_S1 = switch_r(rsw_S1, rsw2_S1, swV3_S, swV4_S, swV5_S);
893 dsw_S1 = dswitch_r(rsw_S1, rsw2_S1, swF2_S, swF3_S, swF4_S);
895 sw_S2 = switch_r(rsw_S2, rsw2_S2, swV3_S, swV4_S, swV5_S);
896 dsw_S2 = dswitch_r(rsw_S2, rsw2_S2, swF2_S, swF3_S, swF4_S);
897 sw_S3 = switch_r(rsw_S3, rsw2_S3, swV3_S, swV4_S, swV5_S);
898 dsw_S3 = dswitch_r(rsw_S3, rsw2_S3, swF2_S, swF3_S, swF4_S);
900 frLJ_S0 = fnma(dsw_S0 * VLJ_S0, r_S0, sw_S0 * frLJ_S0);
901 frLJ_S1 = fnma(dsw_S1 * VLJ_S1, r_S1, sw_S1 * frLJ_S1);
903 frLJ_S2 = fnma(dsw_S2 * VLJ_S2, r_S2, sw_S2 * frLJ_S2);
904 frLJ_S3 = fnma(dsw_S3 * VLJ_S3, r_S3, sw_S3 * frLJ_S3);
907 VLJ_S0 = sw_S0 * VLJ_S0;
908 VLJ_S1 = sw_S1 * VLJ_S1;
910 VLJ_S2 = sw_S2 * VLJ_S2;
911 VLJ_S3 = sw_S3 * VLJ_S3;
918 #endif /* LJ_POT_SWITCH */
920 #if defined CALC_ENERGIES && defined CHECK_EXCLS
921 /* The potential shift should be removed for excluded pairs */
922 VLJ_S0 = selectByMask(VLJ_S0, interact_S0);
923 VLJ_S1 = selectByMask(VLJ_S1, interact_S1);
925 VLJ_S2 = selectByMask(VLJ_S2, interact_S2);
926 VLJ_S3 = selectByMask(VLJ_S3, interact_S3);
933 SimdReal c6grid_S0, rinvsix_nm_S0, cr2_S0, expmcr2_S0, poly_S0;
934 SimdReal c6grid_S1, rinvsix_nm_S1, cr2_S1, expmcr2_S1, poly_S1;
936 SimdReal c6grid_S2, rinvsix_nm_S2, cr2_S2, expmcr2_S2, poly_S2;
937 SimdReal c6grid_S3, rinvsix_nm_S3, cr2_S3, expmcr2_S3, poly_S3;
948 /* Determine C6 for the grid using the geometric combination rule */
949 c6s_j_S = load<SimdReal>(ljc+aj2+0);
950 c6grid_S0 = c6s_S0 * c6s_j_S;
951 c6grid_S1 = c6s_S1 * c6s_j_S;
953 c6grid_S2 = c6s_S2 * c6s_j_S;
954 c6grid_S3 = c6s_S3 * c6s_j_S;
958 /* Recalculate rinvsix without exclusion mask (compiler might optimize) */
959 rinvsix_nm_S0 = rinvsq_S0 * rinvsq_S0 * rinvsq_S0;
960 rinvsix_nm_S1 = rinvsq_S1 * rinvsq_S1 * rinvsq_S1;
962 rinvsix_nm_S2 = rinvsq_S2 * rinvsq_S2 * rinvsq_S2;
963 rinvsix_nm_S3 = rinvsq_S3 * rinvsq_S3 * rinvsq_S3;
966 /* We didn't use a mask, so we can copy */
967 rinvsix_nm_S0 = rinvsix_S0;
968 rinvsix_nm_S1 = rinvsix_S1;
970 rinvsix_nm_S2 = rinvsix_S2;
971 rinvsix_nm_S3 = rinvsix_S3;
975 /* Mask for the cut-off to avoid overflow of cr2^2 */
976 cr2_S0 = lje_c2_S * selectByMask(rsq_S0, wco_vdw_S0);
977 cr2_S1 = lje_c2_S * selectByMask(rsq_S1, wco_vdw_S1);
979 cr2_S2 = lje_c2_S * selectByMask(rsq_S2, wco_vdw_S2);
980 cr2_S3 = lje_c2_S * selectByMask(rsq_S3, wco_vdw_S3);
982 // Unsafe version of our exp() should be fine, since these arguments should never
983 // be smaller than -127 for any reasonable choice of cutoff or ewald coefficients.
984 expmcr2_S0 = exp<MathOptimization::Unsafe>( -cr2_S0 );
985 expmcr2_S1 = exp<MathOptimization::Unsafe>( -cr2_S1 );
987 expmcr2_S2 = exp<MathOptimization::Unsafe>( -cr2_S2 );
988 expmcr2_S3 = exp<MathOptimization::Unsafe>( -cr2_S3 );
991 /* 1 + cr2 + 1/2*cr2^2 */
992 poly_S0 = fma(fma(half_S, cr2_S0, one_S), cr2_S0, one_S);
993 poly_S1 = fma(fma(half_S, cr2_S1, one_S), cr2_S1, one_S);
995 poly_S2 = fma(fma(half_S, cr2_S2, one_S), cr2_S2, one_S);
996 poly_S3 = fma(fma(half_S, cr2_S3, one_S), cr2_S3, one_S);
999 /* We calculate LJ F*r = (6*C6)*(r^-6 - F_mesh/6), we use:
1000 * r^-6*cexp*(1 + cr2 + cr2^2/2 + cr2^3/6) = cexp*(r^-6*poly + c^6/6)
1002 frLJ_S0 = fma(c6grid_S0, fnma(expmcr2_S0, fma(rinvsix_nm_S0, poly_S0, lje_c6_6_S), rinvsix_nm_S0), frLJ_S0);
1003 frLJ_S1 = fma(c6grid_S1, fnma(expmcr2_S1, fma(rinvsix_nm_S1, poly_S1, lje_c6_6_S), rinvsix_nm_S1), frLJ_S1);
1005 frLJ_S2 = fma(c6grid_S2, fnma(expmcr2_S2, fma(rinvsix_nm_S2, poly_S2, lje_c6_6_S), rinvsix_nm_S2), frLJ_S2);
1006 frLJ_S3 = fma(c6grid_S3, fnma(expmcr2_S3, fma(rinvsix_nm_S3, poly_S3, lje_c6_6_S), rinvsix_nm_S3), frLJ_S3);
1009 #ifdef CALC_ENERGIES
1011 sh_mask_S0 = selectByMask(lje_vc_S, interact_S0);
1012 sh_mask_S1 = selectByMask(lje_vc_S, interact_S1);
1014 sh_mask_S2 = selectByMask(lje_vc_S, interact_S2);
1015 sh_mask_S3 = selectByMask(lje_vc_S, interact_S3);
1018 sh_mask_S0 = lje_vc_S;
1019 sh_mask_S1 = lje_vc_S;
1021 sh_mask_S2 = lje_vc_S;
1022 sh_mask_S3 = lje_vc_S;
1026 VLJ_S0 = fma(sixth_S * c6grid_S0, fma(rinvsix_nm_S0, fnma(expmcr2_S0, poly_S0, one_S), sh_mask_S0), VLJ_S0);
1027 VLJ_S1 = fma(sixth_S * c6grid_S1, fma(rinvsix_nm_S1, fnma(expmcr2_S1, poly_S1, one_S), sh_mask_S1), VLJ_S1);
1029 VLJ_S2 = fma(sixth_S * c6grid_S2, fma(rinvsix_nm_S2, fnma(expmcr2_S2, poly_S2, one_S), sh_mask_S2), VLJ_S2);
1030 VLJ_S3 = fma(sixth_S * c6grid_S3, fma(rinvsix_nm_S3, fnma(expmcr2_S3, poly_S3, one_S), sh_mask_S3), VLJ_S3);
1032 #endif /* CALC_ENERGIES */
1034 #endif /* LJ_EWALD_GEOM */
1036 #if defined VDW_CUTOFF_CHECK
1037 /* frLJ is multiplied later by rinvsq, which is masked for the Coulomb
1038 * cut-off, but if the VdW cut-off is shorter, we need to mask with that.
1040 frLJ_S0 = selectByMask(frLJ_S0, wco_vdw_S0);
1041 frLJ_S1 = selectByMask(frLJ_S1, wco_vdw_S1);
1043 frLJ_S2 = selectByMask(frLJ_S2, wco_vdw_S2);
1044 frLJ_S3 = selectByMask(frLJ_S3, wco_vdw_S3);
1048 #ifdef CALC_ENERGIES
1049 /* The potential shift should be removed for pairs beyond cut-off */
1050 VLJ_S0 = selectByMask(VLJ_S0, wco_vdw_S0);
1051 VLJ_S1 = selectByMask(VLJ_S1, wco_vdw_S1);
1053 VLJ_S2 = selectByMask(VLJ_S2, wco_vdw_S2);
1054 VLJ_S3 = selectByMask(VLJ_S3, wco_vdw_S3);
1058 #endif /* CALC_LJ */
1060 #ifdef CALC_ENERGIES
1061 #ifdef ENERGY_GROUPS
1062 /* Extract the group pair index per j pair.
1063 * Energy groups are stored per i-cluster, so things get
1064 * complicated when the i- and j-cluster size don't match.
1069 egps_j = nbatParams.energrp[cj >> 1];
1070 egp_jj[0] = ((egps_j >> ((cj & 1)*egps_jshift)) & egps_jmask)*egps_jstride;
1072 /* We assume UNROLLI <= UNROLLJ */
1074 for (jdi = 0; jdi < UNROLLJ/UNROLLI; jdi++)
1077 egps_j = nbatParams.energrp[cj*(UNROLLJ/UNROLLI) + jdi];
1078 for (jj = 0; jj < (UNROLLI/2); jj++)
1080 egp_jj[jdi*(UNROLLI/2)+jj] = ((egps_j >> (jj*egps_jshift)) & egps_jmask)*egps_jstride;
1088 #ifndef ENERGY_GROUPS
1089 vctot_S = vctot_S + vcoul_S0 + vcoul_S1 + vcoul_S2 + vcoul_S3;
1091 add_ener_grp(vcoul_S0, vctp[0], egp_jj);
1092 add_ener_grp(vcoul_S1, vctp[1], egp_jj);
1093 add_ener_grp(vcoul_S2, vctp[2], egp_jj);
1094 add_ener_grp(vcoul_S3, vctp[3], egp_jj);
1100 #ifndef ENERGY_GROUPS
1102 Vvdwtot_S = Vvdwtot_S + VLJ_S0 + VLJ_S1 + VLJ_S2 + VLJ_S3;
1104 Vvdwtot_S = Vvdwtot_S + VLJ_S0 + VLJ_S1;
1107 add_ener_grp(VLJ_S0, vvdwtp[0], egp_jj);
1108 add_ener_grp(VLJ_S1, vvdwtp[1], egp_jj);
1110 add_ener_grp(VLJ_S2, vvdwtp[2], egp_jj);
1111 add_ener_grp(VLJ_S3, vvdwtp[3], egp_jj);
1114 #endif /* CALC_LJ */
1115 #endif /* CALC_ENERGIES */
1119 fscal_S0 = rinvsq_S0 * (frcoul_S0 + frLJ_S0);
1121 fscal_S0 = rinvsq_S0 * frLJ_S0;
1124 fscal_S1 = rinvsq_S1 * (frcoul_S1 + frLJ_S1);
1126 fscal_S1 = rinvsq_S1 * frLJ_S1;
1129 fscal_S0 = rinvsq_S0 * frcoul_S0;
1130 fscal_S1 = rinvsq_S1 * frcoul_S1;
1131 #endif /* CALC_LJ */
1132 #if defined CALC_LJ && !defined HALF_LJ
1134 fscal_S2 = rinvsq_S2 * (frcoul_S2 + frLJ_S2);
1135 fscal_S3 = rinvsq_S3 * (frcoul_S3 + frLJ_S3);
1137 fscal_S2 = rinvsq_S2 * frLJ_S2;
1138 fscal_S3 = rinvsq_S3 * frLJ_S3;
1141 /* Atom 2 and 3 don't have LJ, so only add Coulomb forces */
1142 fscal_S2 = rinvsq_S2 * frcoul_S2;
1143 fscal_S3 = rinvsq_S3 * frcoul_S3;
1146 /* Calculate temporary vectorial force */
1147 tx_S0 = fscal_S0 * dx_S0;
1148 tx_S1 = fscal_S1 * dx_S1;
1149 tx_S2 = fscal_S2 * dx_S2;
1150 tx_S3 = fscal_S3 * dx_S3;
1151 ty_S0 = fscal_S0 * dy_S0;
1152 ty_S1 = fscal_S1 * dy_S1;
1153 ty_S2 = fscal_S2 * dy_S2;
1154 ty_S3 = fscal_S3 * dy_S3;
1155 tz_S0 = fscal_S0 * dz_S0;
1156 tz_S1 = fscal_S1 * dz_S1;
1157 tz_S2 = fscal_S2 * dz_S2;
1158 tz_S3 = fscal_S3 * dz_S3;
1160 /* Increment i atom force */
1161 fix_S0 = fix_S0 + tx_S0;
1162 fix_S1 = fix_S1 + tx_S1;
1163 fix_S2 = fix_S2 + tx_S2;
1164 fix_S3 = fix_S3 + tx_S3;
1165 fiy_S0 = fiy_S0 + ty_S0;
1166 fiy_S1 = fiy_S1 + ty_S1;
1167 fiy_S2 = fiy_S2 + ty_S2;
1168 fiy_S3 = fiy_S3 + ty_S3;
1169 fiz_S0 = fiz_S0 + tz_S0;
1170 fiz_S1 = fiz_S1 + tz_S1;
1171 fiz_S2 = fiz_S2 + tz_S2;
1172 fiz_S3 = fiz_S3 + tz_S3;
1174 /* Decrement j atom force */
1175 store(f+ajx, load<SimdReal>(f+ajx) - (tx_S0 + tx_S1 + tx_S2 + tx_S3));
1176 store(f+ajy, load<SimdReal>(f+ajy) - (ty_S0 + ty_S1 + ty_S2 + ty_S3));
1177 store(f+ajz, load<SimdReal>(f+ajz) - (tz_S0 + tz_S1 + tz_S2 + tz_S3));