2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 /* This is the innermost loop contents for the 4 x N atom SIMD kernel.
37 * This flavor of the kernel duplicates the data for N j-particles in
38 * 2xN wide SIMD registers to do operate on 2 i-particles at once.
39 * This leads to 4/2=2 sets of most instructions. Therefore we call
40 * this kernel 2x(N+N) = 2xnn
42 * This 2xnn kernel is basically the 4xn equivalent with half the registers
43 * and instructions removed.
45 * An alternative would be to load to different cluster of N j-particles
46 * into SIMD registers, giving a 4x(N+N) kernel. This doubles the amount
47 * of instructions, which could lead to better scheduling. But we actually
48 * observed worse scheduling for the AVX-256 4x8 normal analytical PME
49 * kernel, which has a lower pair throughput than 2x(4+4) with gcc 4.7.
50 * It could be worth trying this option, but it takes some more effort.
51 * This 2xnn kernel is basically the 4xn equivalent with
55 /* When calculating RF or Ewald interactions we calculate the electrostatic/LJ
56 * forces on excluded atom pairs here in the non-bonded loops.
57 * But when energies and/or virial is required we calculate them
58 * separately to as then it is easier to separate the energy and virial
61 #if defined CHECK_EXCLS && (defined CALC_COULOMB || defined LJ_EWALD_GEOM)
65 /* Without exclusions and energies we only need to mask the cut-off,
66 * this can be faster with blendv.
68 #if !(defined CHECK_EXCLS || defined CALC_ENERGIES || defined LJ_EWALD_GEOM) && defined GMX_SIMD_HAVE_BLENDV
69 /* With RF and tabulated Coulomb we replace cmp+and with sub+blendv.
70 * With gcc this is slower, except for RF on Sandy Bridge.
71 * Tested with gcc 4.6.2, 4.6.3 and 4.7.1.
73 #if (defined CALC_COUL_RF || defined CALC_COUL_TAB) && (!defined __GNUC__ || (defined CALC_COUL_RF && defined GMX_SIMD_X86_AVX_256_OR_HIGHER))
74 #define NBNXN_CUTOFF_USE_BLENDV
76 /* With analytical Ewald we replace cmp+and+and with sub+blendv+blendv.
77 * This is only faster with icc on Sandy Bridge (PS kernel slower than gcc 4.7).
80 #if defined CALC_COUL_EWALD && defined __INTEL_COMPILER && defined GMX_SIMD_X86_AVX_256_OR_HIGHER
81 #define NBNXN_CUTOFF_USE_BLENDV
86 int cj, aj, ajx, ajy, ajz;
89 /* Energy group indices for two atoms packed into one int */
90 int egp_jj[UNROLLJ/2];
94 /* Interaction (non-exclusion) mask of all 1's or 0's */
95 gmx_simd_bool_t interact_S0;
96 gmx_simd_bool_t interact_S2;
99 gmx_simd_real_t jx_S, jy_S, jz_S;
100 gmx_simd_real_t dx_S0, dy_S0, dz_S0;
101 gmx_simd_real_t dx_S2, dy_S2, dz_S2;
102 gmx_simd_real_t tx_S0, ty_S0, tz_S0;
103 gmx_simd_real_t tx_S2, ty_S2, tz_S2;
104 gmx_simd_real_t rsq_S0, rinv_S0, rinvsq_S0;
105 gmx_simd_real_t rsq_S2, rinv_S2, rinvsq_S2;
106 #ifndef NBNXN_CUTOFF_USE_BLENDV
107 /* wco: within cut-off, mask of all 1's or 0's */
108 gmx_simd_bool_t wco_S0;
109 gmx_simd_bool_t wco_S2;
111 #ifdef VDW_CUTOFF_CHECK
112 gmx_simd_bool_t wco_vdw_S0;
114 gmx_simd_bool_t wco_vdw_S2;
118 #if (defined CALC_COULOMB && defined CALC_COUL_TAB) || defined LJ_FORCE_SWITCH || defined LJ_POT_SWITCH
119 gmx_simd_real_t r_S0;
120 #if (defined CALC_COULOMB && defined CALC_COUL_TAB) || !defined HALF_LJ
121 gmx_simd_real_t r_S2;
125 #if defined LJ_FORCE_SWITCH || defined LJ_POT_SWITCH
126 gmx_simd_real_t rsw_S0, rsw2_S0;
128 gmx_simd_real_t rsw_S2, rsw2_S2;
134 /* 1/r masked with the interaction mask */
135 gmx_simd_real_t rinv_ex_S0;
136 gmx_simd_real_t rinv_ex_S2;
138 gmx_simd_real_t jq_S;
139 gmx_simd_real_t qq_S0;
140 gmx_simd_real_t qq_S2;
142 /* The force (PME mesh force) we need to subtract from 1/r^2 */
143 gmx_simd_real_t fsub_S0;
144 gmx_simd_real_t fsub_S2;
146 #ifdef CALC_COUL_EWALD
147 gmx_simd_real_t brsq_S0, brsq_S2;
148 gmx_simd_real_t ewcorr_S0, ewcorr_S2;
151 /* frcoul = (1/r - fsub)*r */
152 gmx_simd_real_t frcoul_S0;
153 gmx_simd_real_t frcoul_S2;
155 /* For tables: r, rs=r/sp, rf=floor(rs), frac=rs-rf */
156 gmx_simd_real_t rs_S0, rf_S0, frac_S0;
157 gmx_simd_real_t rs_S2, rf_S2, frac_S2;
158 /* Table index: rs truncated to an int */
159 gmx_simd_int32_t ti_S0, ti_S2;
160 /* Linear force table values */
161 gmx_simd_real_t ctab0_S0, ctab1_S0;
162 gmx_simd_real_t ctab0_S2, ctab1_S2;
164 /* Quadratic energy table value */
165 gmx_simd_real_t ctabv_S0;
166 gmx_simd_real_t ctabv_S2;
169 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
170 /* The potential (PME mesh) we need to subtract from 1/r */
171 gmx_simd_real_t vc_sub_S0;
172 gmx_simd_real_t vc_sub_S2;
175 /* Electrostatic potential */
176 gmx_simd_real_t vcoul_S0;
177 gmx_simd_real_t vcoul_S2;
180 /* The force times 1/r */
181 gmx_simd_real_t fscal_S0;
182 gmx_simd_real_t fscal_S2;
186 /* LJ sigma_j/2 and sqrt(epsilon_j) */
187 gmx_simd_real_t hsig_j_S, seps_j_S;
188 /* LJ sigma_ij and epsilon_ij */
189 gmx_simd_real_t sig_S0, eps_S0;
191 gmx_simd_real_t sig_S2, eps_S2;
194 gmx_simd_real_t sig2_S0, sig6_S0;
196 gmx_simd_real_t sig2_S2, sig6_S2;
198 #endif /* LJ_COMB_LB */
202 gmx_simd_real_t c6s_j_S, c12s_j_S;
205 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB || defined LJ_EWALD_GEOM
206 /* Index for loading LJ parameters, complicated when interleaving */
210 /* Intermediate variables for LJ calculation */
212 gmx_simd_real_t rinvsix_S0;
214 gmx_simd_real_t rinvsix_S2;
218 gmx_simd_real_t sir_S0, sir2_S0, sir6_S0;
220 gmx_simd_real_t sir_S2, sir2_S2, sir6_S2;
224 gmx_simd_real_t FrLJ6_S0, FrLJ12_S0, frLJ_S0;
226 gmx_simd_real_t FrLJ6_S2, FrLJ12_S2, frLJ_S2;
230 gmx_mm_hpr fjx_S, fjy_S, fjz_S;
232 /* j-cluster index */
235 /* Atom indices (of the first atom in the cluster) */
237 #if defined CALC_LJ && (defined LJ_COMB_GEOM || defined LJ_COMB_LB || defined LJ_EWALD_GEOM)
245 gmx_load_simd_2xnn_interactions(l_cj[cjind].excl,
246 filter_S0, filter_S2,
247 &interact_S0, &interact_S2);
248 #endif /* CHECK_EXCLS */
250 /* load j atom coordinates */
251 gmx_loaddh_pr(&jx_S, x+ajx);
252 gmx_loaddh_pr(&jy_S, x+ajy);
253 gmx_loaddh_pr(&jz_S, x+ajz);
255 /* Calculate distance */
256 dx_S0 = gmx_simd_sub_r(ix_S0, jx_S);
257 dy_S0 = gmx_simd_sub_r(iy_S0, jy_S);
258 dz_S0 = gmx_simd_sub_r(iz_S0, jz_S);
259 dx_S2 = gmx_simd_sub_r(ix_S2, jx_S);
260 dy_S2 = gmx_simd_sub_r(iy_S2, jy_S);
261 dz_S2 = gmx_simd_sub_r(iz_S2, jz_S);
263 /* rsq = dx*dx+dy*dy+dz*dz */
264 rsq_S0 = gmx_simd_calc_rsq_r(dx_S0, dy_S0, dz_S0);
265 rsq_S2 = gmx_simd_calc_rsq_r(dx_S2, dy_S2, dz_S2);
267 #ifndef NBNXN_CUTOFF_USE_BLENDV
268 wco_S0 = gmx_simd_cmplt_r(rsq_S0, rc2_S);
269 wco_S2 = gmx_simd_cmplt_r(rsq_S2, rc2_S);
274 /* Only remove the (sub-)diagonal to avoid double counting */
275 #if UNROLLJ == UNROLLI
278 wco_S0 = gmx_simd_and_b(wco_S0, diagonal_mask_S0);
279 wco_S2 = gmx_simd_and_b(wco_S2, diagonal_mask_S2);
282 #if UNROLLJ == 2*UNROLLI
285 wco_S0 = gmx_simd_and_b(wco_S0, diagonal_mask0_S0);
286 wco_S2 = gmx_simd_and_b(wco_S2, diagonal_mask0_S2);
288 else if (cj*2 + 1 == ci_sh)
290 wco_S0 = gmx_simd_and_b(wco_S0, diagonal_mask1_S0);
291 wco_S2 = gmx_simd_and_b(wco_S2, diagonal_mask1_S2);
294 #error "only UNROLLJ == UNROLLI*(1 or 2) currently supported in 2xnn kernels"
297 #else /* EXCL_FORCES */
298 /* No exclusion forces: remove all excluded atom pairs from the list */
299 wco_S0 = gmx_simd_and_b(wco_S0, interact_S0);
300 wco_S2 = gmx_simd_and_b(wco_S2, interact_S2);
307 real tmpa[2*GMX_SIMD_REAL_WIDTH], *tmp;
308 tmp = gmx_simd_align_r(tmpa);
309 for (i = 0; i < UNROLLI; i += 2)
311 gmx_simd_store_r(tmp, gmx_simd_sub_r(rc2_S, i == 0 ? rsq_S0 : rsq_S2));
312 for (j = 0; j < 2*UNROLLJ; j++)
324 /* For excluded pairs add a small number to avoid r^-6 = NaN */
325 rsq_S0 = gmx_simd_add_r(rsq_S0, gmx_simd_blendnotzero_r(avoid_sing_S, interact_S0));
326 rsq_S2 = gmx_simd_add_r(rsq_S2, gmx_simd_blendnotzero_r(avoid_sing_S, interact_S2));
330 rinv_S0 = gmx_simd_invsqrt_r(rsq_S0);
331 rinv_S2 = gmx_simd_invsqrt_r(rsq_S2);
334 /* Load parameters for j atom */
335 gmx_loaddh_pr(&jq_S, q+aj);
336 qq_S0 = gmx_simd_mul_r(iq_S0, jq_S);
337 qq_S2 = gmx_simd_mul_r(iq_S2, jq_S);
342 #if !defined LJ_COMB_GEOM && !defined LJ_COMB_LB && !defined FIX_LJ_C
343 gmx_simd_real_t c6_S0, c12_S0;
344 load_lj_pair_params2(nbfp0, nbfp1, type, aj, &c6_S0, &c12_S0);
346 gmx_simd_real_t c6_S2, c12_S2;
347 load_lj_pair_params2(nbfp2, nbfp3, type, aj, &c6_S2, &c12_S2);
349 #endif /* not defined any LJ rule */
352 gmx_loaddh_pr(&c6s_j_S, ljc+aj2+0);
353 gmx_loaddh_pr(&c12s_j_S, ljc+aj2+STRIDE);
354 gmx_simd_real_t c6_S0 = gmx_simd_mul_r(c6s_S0, c6s_j_S );
356 gmx_simd_real_t c6_S2 = gmx_simd_mul_r(c6s_S2, c6s_j_S );
358 gmx_simd_real_t c12_S0 = gmx_simd_mul_r(c12s_S0, c12s_j_S);
360 gmx_simd_real_t c12_S2 = gmx_simd_mul_r(c12s_S2, c12s_j_S);
362 #endif /* LJ_COMB_GEOM */
365 gmx_loaddh_pr(&hsig_j_S, ljc+aj2+0);
366 gmx_loaddh_pr(&seps_j_S, ljc+aj2+STRIDE);
368 sig_S0 = gmx_simd_add_r(hsig_i_S0, hsig_j_S);
369 eps_S0 = gmx_simd_mul_r(seps_i_S0, seps_j_S);
371 sig_S2 = gmx_simd_add_r(hsig_i_S2, hsig_j_S);
372 eps_S2 = gmx_simd_mul_r(seps_i_S2, seps_j_S);
374 #endif /* LJ_COMB_LB */
378 #ifndef NBNXN_CUTOFF_USE_BLENDV
379 rinv_S0 = gmx_simd_blendzero_r(rinv_S0, wco_S0);
380 rinv_S2 = gmx_simd_blendzero_r(rinv_S2, wco_S2);
382 /* This needs to be modified: It makes assumptions about the internal storage
383 * of the SIMD representation, in particular that the blendv instruction always
384 * selects based on the sign bit. If the performance is really critical, it
385 * should be turned into a function that is platform-specific.
387 /* We only need to mask for the cut-off: blendv is faster */
388 rinv_S0 = gmx_simd_blendv_r(rinv_S0, zero_S, gmx_simd_sub_r(rc2_S, rsq_S0));
389 rinv_S2 = gmx_simd_blendv_r(rinv_S2, zero_S, gmx_simd_sub_r(rc2_S, rsq_S2));
392 rinvsq_S0 = gmx_simd_mul_r(rinv_S0, rinv_S0);
393 rinvsq_S2 = gmx_simd_mul_r(rinv_S2, rinv_S2);
396 /* Note that here we calculate force*r, not the usual force/r.
397 * This allows avoiding masking the reaction-field contribution,
398 * as frcoul is later multiplied by rinvsq which has been
399 * masked with the cut-off check.
403 /* Only add 1/r for non-excluded atom pairs */
404 rinv_ex_S0 = gmx_simd_blendzero_r(rinv_S0, interact_S0);
405 rinv_ex_S2 = gmx_simd_blendzero_r(rinv_S2, interact_S2);
407 /* No exclusion forces, we always need 1/r */
408 #define rinv_ex_S0 rinv_S0
409 #define rinv_ex_S2 rinv_S2
413 /* Electrostatic interactions */
414 frcoul_S0 = gmx_simd_mul_r(qq_S0, gmx_simd_fmadd_r(rsq_S0, mrc_3_S, rinv_ex_S0));
415 frcoul_S2 = gmx_simd_mul_r(qq_S2, gmx_simd_fmadd_r(rsq_S2, mrc_3_S, rinv_ex_S2));
418 vcoul_S0 = gmx_simd_mul_r(qq_S0, gmx_simd_add_r(rinv_ex_S0, gmx_simd_add_r(gmx_simd_mul_r(rsq_S0, hrc_3_S), moh_rc_S)));
419 vcoul_S2 = gmx_simd_mul_r(qq_S2, gmx_simd_add_r(rinv_ex_S2, gmx_simd_add_r(gmx_simd_mul_r(rsq_S2, hrc_3_S), moh_rc_S)));
423 #ifdef CALC_COUL_EWALD
424 /* We need to mask (or limit) rsq for the cut-off,
425 * as large distances can cause an overflow in gmx_pmecorrF/V.
427 #ifndef NBNXN_CUTOFF_USE_BLENDV
428 brsq_S0 = gmx_simd_mul_r(beta2_S, gmx_simd_blendzero_r(rsq_S0, wco_S0));
429 brsq_S2 = gmx_simd_mul_r(beta2_S, gmx_simd_blendzero_r(rsq_S2, wco_S2));
431 /* Strangely, putting mul on a separate line is slower (icc 13) */
432 brsq_S0 = gmx_simd_mul_r(beta2_S, gmx_simd_blendv_r(rsq_S0, zero_S, gmx_simd_sub_r(rc2_S, rsq_S0)));
433 brsq_S2 = gmx_simd_mul_r(beta2_S, gmx_simd_blendv_r(rsq_S2, zero_S, gmx_simd_sub_r(rc2_S, rsq_S2)));
435 ewcorr_S0 = gmx_simd_mul_r(gmx_simd_pmecorrF_r(brsq_S0), beta_S);
436 ewcorr_S2 = gmx_simd_mul_r(gmx_simd_pmecorrF_r(brsq_S2), beta_S);
437 frcoul_S0 = gmx_simd_mul_r(qq_S0, gmx_simd_fmadd_r(ewcorr_S0, brsq_S0, rinv_ex_S0));
438 frcoul_S2 = gmx_simd_mul_r(qq_S2, gmx_simd_fmadd_r(ewcorr_S2, brsq_S2, rinv_ex_S2));
441 vc_sub_S0 = gmx_simd_mul_r(gmx_simd_pmecorrV_r(brsq_S0), beta_S);
442 vc_sub_S2 = gmx_simd_mul_r(gmx_simd_pmecorrV_r(brsq_S2), beta_S);
445 #endif /* CALC_COUL_EWALD */
448 /* Electrostatic interactions */
449 r_S0 = gmx_simd_mul_r(rsq_S0, rinv_S0);
450 r_S2 = gmx_simd_mul_r(rsq_S2, rinv_S2);
451 /* Convert r to scaled table units */
452 rs_S0 = gmx_simd_mul_r(r_S0, invtsp_S);
453 rs_S2 = gmx_simd_mul_r(r_S2, invtsp_S);
454 /* Truncate scaled r to an int */
455 ti_S0 = gmx_simd_cvtt_r2i(rs_S0);
456 ti_S2 = gmx_simd_cvtt_r2i(rs_S2);
457 #ifdef GMX_SIMD_HAVE_TRUNC
458 rf_S0 = gmx_simd_trunc_r(rs_S0);
459 rf_S2 = gmx_simd_trunc_r(rs_S2);
461 rf_S0 = gmx_simd_cvt_i2r(ti_S0);
462 rf_S2 = gmx_simd_cvt_i2r(ti_S2);
464 frac_S0 = gmx_simd_sub_r(rs_S0, rf_S0);
465 frac_S2 = gmx_simd_sub_r(rs_S2, rf_S2);
467 /* Load and interpolate table forces and possibly energies.
468 * Force and energy can be combined in one table, stride 4: FDV0
469 * or in two separate tables with stride 1: F and V
470 * Currently single precision uses FDV0, double F and V.
472 #ifndef CALC_ENERGIES
473 load_table_f(tab_coul_F, ti_S0, ti0, &ctab0_S0, &ctab1_S0);
474 load_table_f(tab_coul_F, ti_S2, ti2, &ctab0_S2, &ctab1_S2);
477 load_table_f_v(tab_coul_F, ti_S0, ti0, &ctab0_S0, &ctab1_S0, &ctabv_S0);
478 load_table_f_v(tab_coul_F, ti_S2, ti2, &ctab0_S2, &ctab1_S2, &ctabv_S2);
480 load_table_f_v(tab_coul_F, tab_coul_V, ti_S0, ti0, &ctab0_S0, &ctab1_S0, &ctabv_S0);
481 load_table_f_v(tab_coul_F, tab_coul_V, ti_S2, ti2, &ctab0_S2, &ctab1_S2, &ctabv_S2);
484 fsub_S0 = gmx_simd_add_r(ctab0_S0, gmx_simd_mul_r(frac_S0, ctab1_S0));
485 fsub_S2 = gmx_simd_add_r(ctab0_S2, gmx_simd_mul_r(frac_S2, ctab1_S2));
486 frcoul_S0 = gmx_simd_mul_r(qq_S0, gmx_simd_sub_r(rinv_ex_S0, gmx_simd_mul_r(fsub_S0, r_S0)));
487 frcoul_S2 = gmx_simd_mul_r(qq_S2, gmx_simd_sub_r(rinv_ex_S2, gmx_simd_mul_r(fsub_S2, r_S2)));
490 vc_sub_S0 = gmx_simd_add_r(ctabv_S0, gmx_simd_mul_r(gmx_simd_mul_r(mhalfsp_S, frac_S0), gmx_simd_add_r(ctab0_S0, fsub_S0)));
491 vc_sub_S2 = gmx_simd_add_r(ctabv_S2, gmx_simd_mul_r(gmx_simd_mul_r(mhalfsp_S, frac_S2), gmx_simd_add_r(ctab0_S2, fsub_S2)));
493 #endif /* CALC_COUL_TAB */
495 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
496 #ifndef NO_SHIFT_EWALD
497 /* Add Ewald potential shift to vc_sub for convenience */
499 vc_sub_S0 = gmx_simd_add_r(vc_sub_S0, gmx_simd_blendzero_r(sh_ewald_S, interact_S0));
500 vc_sub_S2 = gmx_simd_add_r(vc_sub_S2, gmx_simd_blendzero_r(sh_ewald_S, interact_S2));
502 vc_sub_S0 = gmx_simd_add_r(vc_sub_S0, sh_ewald_S);
503 vc_sub_S2 = gmx_simd_add_r(vc_sub_S2, sh_ewald_S);
507 vcoul_S0 = gmx_simd_mul_r(qq_S0, gmx_simd_sub_r(rinv_ex_S0, vc_sub_S0));
508 vcoul_S2 = gmx_simd_mul_r(qq_S2, gmx_simd_sub_r(rinv_ex_S2, vc_sub_S2));
512 /* Mask energy for cut-off and diagonal */
513 vcoul_S0 = gmx_simd_blendzero_r(vcoul_S0, wco_S0);
514 vcoul_S2 = gmx_simd_blendzero_r(vcoul_S2, wco_S2);
517 #endif /* CALC_COULOMB */
520 /* Lennard-Jones interaction */
522 #ifdef VDW_CUTOFF_CHECK
523 wco_vdw_S0 = gmx_simd_cmplt_r(rsq_S0, rcvdw2_S);
525 wco_vdw_S2 = gmx_simd_cmplt_r(rsq_S2, rcvdw2_S);
528 /* Same cut-off for Coulomb and VdW, reuse the registers */
529 #define wco_vdw_S0 wco_S0
530 #define wco_vdw_S2 wco_S2
534 rinvsix_S0 = gmx_simd_mul_r(rinvsq_S0, gmx_simd_mul_r(rinvsq_S0, rinvsq_S0));
536 rinvsix_S0 = gmx_simd_blendzero_r(rinvsix_S0, interact_S0);
539 rinvsix_S2 = gmx_simd_mul_r(rinvsq_S2, gmx_simd_mul_r(rinvsq_S2, rinvsq_S2));
541 rinvsix_S2 = gmx_simd_blendzero_r(rinvsix_S2, interact_S2);
545 #if defined LJ_CUT || defined LJ_POT_SWITCH
546 /* We have plain LJ or LJ-PME with simple C6/6 C12/12 coefficients */
547 FrLJ6_S0 = gmx_simd_mul_r(c6_S0, rinvsix_S0);
549 FrLJ6_S2 = gmx_simd_mul_r(c6_S2, rinvsix_S2);
551 FrLJ12_S0 = gmx_simd_mul_r(c12_S0, gmx_simd_mul_r(rinvsix_S0, rinvsix_S0));
553 FrLJ12_S2 = gmx_simd_mul_r(c12_S2, gmx_simd_mul_r(rinvsix_S2, rinvsix_S2));
557 #if defined LJ_FORCE_SWITCH || defined LJ_POT_SWITCH
558 /* We switch the LJ force */
559 r_S0 = gmx_simd_mul_r(rsq_S0, rinv_S0);
560 rsw_S0 = gmx_simd_max_r(gmx_simd_sub_r(r_S0, rswitch_S), zero_S);
561 rsw2_S0 = gmx_simd_mul_r(rsw_S0, rsw_S0);
563 r_S2 = gmx_simd_mul_r(rsq_S2, rinv_S2);
564 rsw_S2 = gmx_simd_max_r(gmx_simd_sub_r(r_S2, rswitch_S), zero_S);
565 rsw2_S2 = gmx_simd_mul_r(rsw_S2, rsw_S2);
569 #ifdef LJ_FORCE_SWITCH
571 #define add_fr_switch(fr, rsw, rsw2_r, c2, c3) gmx_simd_fmadd_r(gmx_simd_fmadd_r(c3, rsw, c2), rsw2_r, fr)
572 gmx_simd_real_t rsw2_r_S0 = gmx_simd_mul_r(rsw2_S0, r_S0);
573 FrLJ6_S0 = gmx_simd_mul_r(c6_S0, add_fr_switch(rinvsix_S0, rsw_S0, rsw2_r_S0, p6_fc2_S, p6_fc3_S));
575 gmx_simd_real_t rsw2_r_S2 = gmx_simd_mul_r(rsw2_S2, r_S2);
576 FrLJ6_S2 = gmx_simd_mul_r(c6_S2, add_fr_switch(rinvsix_S2, rsw_S2, rsw2_r_S2, p6_fc2_S, p6_fc3_S));
578 FrLJ12_S0 = gmx_simd_mul_r(c12_S0, add_fr_switch(gmx_simd_mul_r(rinvsix_S0, rinvsix_S0), rsw_S0, rsw2_r_S0, p12_fc2_S, p12_fc3_S));
580 FrLJ12_S2 = gmx_simd_mul_r(c12_S2, add_fr_switch(gmx_simd_mul_r(rinvsix_S2, rinvsix_S2), rsw_S2, rsw2_r_S2, p12_fc2_S, p12_fc3_S));
583 #endif /* LJ_FORCE_SWITCH */
585 #endif /* not LJ_COMB_LB */
588 sir_S0 = gmx_simd_mul_r(sig_S0, rinv_S0);
590 sir_S2 = gmx_simd_mul_r(sig_S2, rinv_S2);
592 sir2_S0 = gmx_simd_mul_r(sir_S0, sir_S0);
594 sir2_S2 = gmx_simd_mul_r(sir_S2, sir_S2);
596 sir6_S0 = gmx_simd_mul_r(sir2_S0, gmx_simd_mul_r(sir2_S0, sir2_S0));
598 sir6_S0 = gmx_simd_blendzero_r(sir6_S0, interact_S0);
601 sir6_S2 = gmx_simd_mul_r(sir2_S2, gmx_simd_mul_r(sir2_S2, sir2_S2));
603 sir6_S2 = gmx_simd_blendzero_r(sir6_S2, interact_S2);
606 #ifdef VDW_CUTOFF_CHECK
607 sir6_S0 = gmx_simd_blendzero_r(sir6_S0, wco_vdw_S0);
609 sir6_S2 = gmx_simd_blendzero_r(sir6_S2, wco_vdw_S2);
612 FrLJ6_S0 = gmx_simd_mul_r(eps_S0, sir6_S0);
614 FrLJ6_S2 = gmx_simd_mul_r(eps_S2, sir6_S2);
616 FrLJ12_S0 = gmx_simd_mul_r(FrLJ6_S0, sir6_S0);
618 FrLJ12_S2 = gmx_simd_mul_r(FrLJ6_S2, sir6_S2);
620 #if defined CALC_ENERGIES
621 /* We need C6 and C12 to calculate the LJ potential shift */
622 sig2_S0 = gmx_simd_mul_r(sig_S0, sig_S0);
624 sig2_S2 = gmx_simd_mul_r(sig_S2, sig_S2);
626 sig6_S0 = gmx_simd_mul_r(sig2_S0, gmx_simd_mul_r(sig2_S0, sig2_S0));
628 sig6_S2 = gmx_simd_mul_r(sig2_S2, gmx_simd_mul_r(sig2_S2, sig2_S2));
630 gmx_simd_real_t c6_S0 = gmx_simd_mul_r(eps_S0, sig6_S0);
632 gmx_simd_real_t c6_S2 = gmx_simd_mul_r(eps_S2, sig6_S2);
634 gmx_simd_real_t c12_S0 = gmx_simd_mul_r(c6_S0, sig6_S0);
636 gmx_simd_real_t c12_S2 = gmx_simd_mul_r(c6_S2, sig6_S2);
639 #endif /* LJ_COMB_LB */
641 /* Determine the total scalar LJ force*r */
642 frLJ_S0 = gmx_simd_sub_r(FrLJ12_S0, FrLJ6_S0);
644 frLJ_S2 = gmx_simd_sub_r(FrLJ12_S2, FrLJ6_S2);
647 #if (defined LJ_CUT || defined LJ_FORCE_SWITCH) && defined CALC_ENERGIES
650 /* Calculate the LJ energies, with constant potential shift */
651 gmx_simd_real_t VLJ6_S0 = gmx_simd_mul_r(sixth_S, gmx_simd_fmadd_r(c6_S0, p6_cpot_S, FrLJ6_S0));
653 gmx_simd_real_t VLJ6_S2 = gmx_simd_mul_r(sixth_S, gmx_simd_fmadd_r(c6_S2, p6_cpot_S, FrLJ6_S2));
655 gmx_simd_real_t VLJ12_S0 = gmx_simd_mul_r(twelveth_S, gmx_simd_fmadd_r(c12_S0, p12_cpot_S, FrLJ12_S0));
657 gmx_simd_real_t VLJ12_S2 = gmx_simd_mul_r(twelveth_S, gmx_simd_fmadd_r(c12_S2, p12_cpot_S, FrLJ12_S2));
661 #ifdef LJ_FORCE_SWITCH
662 #define v_fswitch_pr(rsw, rsw2, c0, c3, c4) gmx_simd_fmadd_r(gmx_simd_fmadd_r(c4, rsw, c3), gmx_simd_mul_r(rsw2, rsw), c0)
664 gmx_simd_real_t VLJ6_S0 = gmx_simd_mul_r(c6_S0, gmx_simd_fmadd_r(sixth_S, rinvsix_S0, v_fswitch_pr(rsw_S0, rsw2_S0, p6_6cpot_S, p6_vc3_S, p6_vc4_S)));
666 gmx_simd_real_t VLJ6_S2 = gmx_simd_mul_r(c6_S2, gmx_simd_fmadd_r(sixth_S, rinvsix_S2, v_fswitch_pr(rsw_S2, rsw2_S2, p6_6cpot_S, p6_vc3_S, p6_vc4_S)));
668 gmx_simd_real_t VLJ12_S0 = gmx_simd_mul_r(c12_S0, gmx_simd_fmadd_r(twelveth_S, gmx_simd_mul_r(rinvsix_S0, rinvsix_S0), v_fswitch_pr(rsw_S0, rsw2_S0, p12_12cpot_S, p12_vc3_S, p12_vc4_S)));
670 gmx_simd_real_t VLJ12_S2 = gmx_simd_mul_r(c12_S2, gmx_simd_fmadd_r(twelveth_S, gmx_simd_mul_r(rinvsix_S2, rinvsix_S2), v_fswitch_pr(rsw_S2, rsw2_S2, p12_12cpot_S, p12_vc3_S, p12_vc4_S)));
673 #endif /* LJ_FORCE_SWITCH */
675 /* Add up the repulsion and dispersion */
676 gmx_simd_real_t VLJ_S0 = gmx_simd_sub_r(VLJ12_S0, VLJ6_S0);
678 gmx_simd_real_t VLJ_S2 = gmx_simd_sub_r(VLJ12_S2, VLJ6_S2);
681 #endif /* (LJ_CUT || LJ_FORCE_SWITCH) && CALC_ENERGIES */
684 /* We always need the potential, since it is needed for the force */
685 gmx_simd_real_t VLJ_S0 = gmx_simd_fnmadd_r(sixth_S, FrLJ6_S0, gmx_simd_mul_r(twelveth_S, FrLJ12_S0));
687 gmx_simd_real_t VLJ_S2 = gmx_simd_fnmadd_r(sixth_S, FrLJ6_S2, gmx_simd_mul_r(twelveth_S, FrLJ12_S2));
691 gmx_simd_real_t sw_S0, dsw_S0;
693 gmx_simd_real_t sw_S2, dsw_S2;
696 #define switch_pr(rsw, rsw2, c3, c4, c5) gmx_simd_fmadd_r(gmx_simd_fmadd_r(gmx_simd_fmadd_r(c5, rsw, c4), rsw, c3), gmx_simd_mul_r(rsw2, rsw), one_S)
697 #define dswitch_pr(rsw, rsw2, c2, c3, c4) gmx_simd_mul_r(gmx_simd_fmadd_r(gmx_simd_fmadd_r(c4, rsw, c3), rsw, c2), rsw2)
699 sw_S0 = switch_pr(rsw_S0, rsw2_S0, swV3_S, swV4_S, swV5_S);
700 dsw_S0 = dswitch_pr(rsw_S0, rsw2_S0, swF2_S, swF3_S, swF4_S);
702 sw_S2 = switch_pr(rsw_S2, rsw2_S2, swV3_S, swV4_S, swV5_S);
703 dsw_S2 = dswitch_pr(rsw_S2, rsw2_S2, swF2_S, swF3_S, swF4_S);
705 frLJ_S0 = gmx_simd_fnmadd_r(gmx_simd_mul_r(dsw_S0, VLJ_S0), r_S0, gmx_simd_mul_r(sw_S0, frLJ_S0));
707 frLJ_S2 = gmx_simd_fnmadd_r(gmx_simd_mul_r(dsw_S2, VLJ_S2), r_S2, gmx_simd_mul_r(sw_S2, frLJ_S2));
710 VLJ_S0 = gmx_simd_mul_r(sw_S0, VLJ_S0);
712 VLJ_S2 = gmx_simd_mul_r(sw_S2, VLJ_S2);
719 #endif /* LJ_POT_SWITCH */
721 #if defined CALC_ENERGIES && defined CHECK_EXCLS
722 /* The potential shift should be removed for excluded pairs */
723 VLJ_S0 = gmx_simd_blendzero_r(VLJ_S0, interact_S0);
725 VLJ_S2 = gmx_simd_blendzero_r(VLJ_S2, interact_S2);
731 gmx_simd_real_t c6s_j_S;
732 gmx_simd_real_t c6grid_S0, rinvsix_nm_S0, cr2_S0, expmcr2_S0, poly_S0;
734 gmx_simd_real_t c6grid_S2, rinvsix_nm_S2, cr2_S2, expmcr2_S2, poly_S2;
737 gmx_simd_real_t sh_mask_S0;
739 gmx_simd_real_t sh_mask_S2;
743 /* Determine C6 for the grid using the geometric combination rule */
744 gmx_loaddh_pr(&c6s_j_S, ljc+aj2+0);
745 c6grid_S0 = gmx_simd_mul_r(c6s_S0, c6s_j_S);
747 c6grid_S2 = gmx_simd_mul_r(c6s_S2, c6s_j_S);
751 /* Recalculate rinvsix without exclusion mask (compiler might optimize) */
752 rinvsix_nm_S0 = gmx_simd_mul_r(rinvsq_S0, gmx_simd_mul_r(rinvsq_S0, rinvsq_S0));
754 rinvsix_nm_S2 = gmx_simd_mul_r(rinvsq_S2, gmx_simd_mul_r(rinvsq_S2, rinvsq_S2));
757 /* We didn't use a mask, so we can copy */
758 rinvsix_nm_S0 = rinvsix_S0;
760 rinvsix_nm_S2 = rinvsix_S2;
764 /* Mask for the cut-off to avoid overflow of cr2^2 */
765 cr2_S0 = gmx_simd_mul_r(lje_c2_S, gmx_simd_blendzero_r(rsq_S0, wco_vdw_S0));
767 cr2_S2 = gmx_simd_mul_r(lje_c2_S, gmx_simd_blendzero_r(rsq_S2, wco_vdw_S2));
769 expmcr2_S0 = gmx_simd_exp_r(gmx_simd_mul_r(mone_S, cr2_S0));
771 expmcr2_S2 = gmx_simd_exp_r(gmx_simd_mul_r(mone_S, cr2_S2));
774 /* 1 + cr2 + 1/2*cr2^2 */
775 poly_S0 = gmx_simd_fmadd_r(gmx_simd_fmadd_r(half_S, cr2_S0, one_S), cr2_S0, one_S);
777 poly_S2 = gmx_simd_fmadd_r(gmx_simd_fmadd_r(half_S, cr2_S2, one_S), cr2_S2, one_S);
780 /* We calculate LJ F*r = (6*C6)*(r^-6 - F_mesh/6), we use:
781 * r^-6*cexp*(1 + cr2 + cr2^2/2 + cr2^3/6) = cexp*(r^-6*poly + c^6/6)
783 frLJ_S0 = gmx_simd_fmadd_r(c6grid_S0, gmx_simd_fnmadd_r(expmcr2_S0, gmx_simd_fmadd_r(rinvsix_nm_S0, poly_S0, lje_c6_6_S), rinvsix_nm_S0), frLJ_S0);
785 frLJ_S2 = gmx_simd_fmadd_r(c6grid_S2, gmx_simd_fnmadd_r(expmcr2_S2, gmx_simd_fmadd_r(rinvsix_nm_S2, poly_S2, lje_c6_6_S), rinvsix_nm_S2), frLJ_S2);
790 sh_mask_S0 = gmx_simd_blendzero_r(lje_vc_S, interact_S0);
792 sh_mask_S2 = gmx_simd_blendzero_r(lje_vc_S, interact_S2);
795 sh_mask_S0 = lje_vc_S;
797 sh_mask_S2 = lje_vc_S;
801 VLJ_S0 = gmx_simd_fmadd_r(gmx_simd_mul_r(sixth_S, c6grid_S0), gmx_simd_fmadd_r(rinvsix_nm_S0, gmx_simd_fnmadd_r(expmcr2_S0, poly_S0, one_S), sh_mask_S0), VLJ_S0);
803 VLJ_S2 = gmx_simd_fmadd_r(gmx_simd_mul_r(sixth_S, c6grid_S2), gmx_simd_fmadd_r(rinvsix_nm_S2, gmx_simd_fnmadd_r(expmcr2_S2, poly_S2, one_S), sh_mask_S2), VLJ_S2);
805 #endif /* CALC_ENERGIES */
807 #endif /* LJ_EWALD_GEOM */
809 #if defined VDW_CUTOFF_CHECK
810 /* frLJ is multiplied later by rinvsq, which is masked for the Coulomb
811 * cut-off, but if the VdW cut-off is shorter, we need to mask with that.
813 frLJ_S0 = gmx_simd_blendzero_r(frLJ_S0, wco_vdw_S0);
815 frLJ_S2 = gmx_simd_blendzero_r(frLJ_S2, wco_vdw_S2);
820 /* The potential shift should be removed for pairs beyond cut-off */
821 VLJ_S0 = gmx_simd_blendzero_r(VLJ_S0, wco_vdw_S0);
823 VLJ_S2 = gmx_simd_blendzero_r(VLJ_S2, wco_vdw_S2);
831 /* Extract the group pair index per j pair.
832 * Energy groups are stored per i-cluster, so things get
833 * complicated when the i- and j-cluster size don't match.
838 egps_j = nbat->energrp[cj>>1];
839 egp_jj[0] = ((egps_j >> ((cj & 1)*egps_jshift)) & egps_jmask)*egps_jstride;
841 /* We assume UNROLLI <= UNROLLJ */
843 for (jdi = 0; jdi < UNROLLJ/UNROLLI; jdi++)
846 egps_j = nbat->energrp[cj*(UNROLLJ/UNROLLI)+jdi];
847 for (jj = 0; jj < (UNROLLI/2); jj++)
849 egp_jj[jdi*(UNROLLI/2)+jj] = ((egps_j >> (jj*egps_jshift)) & egps_jmask)*egps_jstride;
857 #ifndef ENERGY_GROUPS
858 vctot_S = gmx_simd_add_r(vctot_S, gmx_simd_add_r(vcoul_S0, vcoul_S2));
860 add_ener_grp_halves(vcoul_S0, vctp[0], vctp[1], egp_jj);
861 add_ener_grp_halves(vcoul_S2, vctp[2], vctp[3], egp_jj);
866 #ifndef ENERGY_GROUPS
867 Vvdwtot_S = gmx_simd_add_r(Vvdwtot_S,
869 gmx_simd_add_r(VLJ_S0, VLJ_S2)
875 add_ener_grp_halves(VLJ_S0, vvdwtp[0], vvdwtp[1], egp_jj);
877 add_ener_grp_halves(VLJ_S2, vvdwtp[2], vvdwtp[3], egp_jj);
881 #endif /* CALC_ENERGIES */
885 fscal_S0 = gmx_simd_mul_r(rinvsq_S0, gmx_simd_add_r(frcoul_S0, frLJ_S0));
887 fscal_S0 = gmx_simd_mul_r(rinvsq_S0, frLJ_S0);
890 fscal_S0 = gmx_simd_mul_r(rinvsq_S0, frcoul_S0);
892 #if defined CALC_LJ && !defined HALF_LJ
894 fscal_S2 = gmx_simd_mul_r(rinvsq_S2, gmx_simd_add_r(frcoul_S2, frLJ_S2));
896 fscal_S2 = gmx_simd_mul_r(rinvsq_S2, frLJ_S2);
899 /* Atom 2 and 3 don't have LJ, so only add Coulomb forces */
900 fscal_S2 = gmx_simd_mul_r(rinvsq_S2, frcoul_S2);
903 /* Calculate temporary vectorial force */
904 tx_S0 = gmx_simd_mul_r(fscal_S0, dx_S0);
905 tx_S2 = gmx_simd_mul_r(fscal_S2, dx_S2);
906 ty_S0 = gmx_simd_mul_r(fscal_S0, dy_S0);
907 ty_S2 = gmx_simd_mul_r(fscal_S2, dy_S2);
908 tz_S0 = gmx_simd_mul_r(fscal_S0, dz_S0);
909 tz_S2 = gmx_simd_mul_r(fscal_S2, dz_S2);
911 /* Increment i atom force */
912 fix_S0 = gmx_simd_add_r(fix_S0, tx_S0);
913 fix_S2 = gmx_simd_add_r(fix_S2, tx_S2);
914 fiy_S0 = gmx_simd_add_r(fiy_S0, ty_S0);
915 fiy_S2 = gmx_simd_add_r(fiy_S2, ty_S2);
916 fiz_S0 = gmx_simd_add_r(fiz_S0, tz_S0);
917 fiz_S2 = gmx_simd_add_r(fiz_S2, tz_S2);
919 /* Decrement j atom force */
920 gmx_load_hpr(&fjx_S, f+ajx);
921 gmx_load_hpr(&fjy_S, f+ajy);
922 gmx_load_hpr(&fjz_S, f+ajz);
923 gmx_store_hpr(f+ajx, gmx_sub_hpr(fjx_S, gmx_sum4_hpr(tx_S0, tx_S2)));
924 gmx_store_hpr(f+ajy, gmx_sub_hpr(fjy_S, gmx_sum4_hpr(ty_S0, ty_S2)));
925 gmx_store_hpr(f+ajz, gmx_sub_hpr(fjz_S, gmx_sum4_hpr(tz_S0, tz_S2)));
934 #undef NBNXN_CUTOFF_USE_BLENDV