2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2009, The GROMACS Development Team
6 * Copyright (c) 2012,2013, by the GROMACS development team, led by
7 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
8 * others, as listed in the AUTHORS file in the top-level source
9 * directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
38 /* This is the innermost loop contents for the 4 x N atom SIMD kernel.
39 * This flavor of the kernel calculates interactions of 4 i-atoms
40 * with N j-atoms stored in N wide SIMD registers.
44 /* When calculating RF or Ewald interactions we calculate the electrostatic
45 * forces on excluded atom pairs here in the non-bonded loops.
46 * But when energies and/or virial is required we calculate them
47 * separately to as then it is easier to separate the energy and virial
50 #if defined CHECK_EXCLS && defined CALC_COULOMB
54 /* Without exclusions and energies we only need to mask the cut-off,
55 * this can be faster when we have defined gmx_blendv_pr, i.e. an instruction
56 * that selects from two SIMD registers based on the contents of a third.
58 #if !(defined CHECK_EXCLS || defined CALC_ENERGIES) && defined GMX_SIMD_HAVE_BLENDV
59 /* With RF and tabulated Coulomb we replace cmp+and with sub+blendv.
60 * With gcc this is slower, except for RF on Sandy Bridge.
61 * Tested with gcc 4.6.2, 4.6.3 and 4.7.1.
63 #if (defined CALC_COUL_RF || defined CALC_COUL_TAB) && (!defined __GNUC__ || (defined CALC_COUL_RF && defined GMX_X86_AVX_256))
64 #define NBNXN_CUTOFF_USE_BLENDV
66 /* With analytical Ewald we replace cmp+and+and with sub+blendv+blendv.
67 * This is only faster with icc on Sandy Bridge (PS kernel slower than gcc 4.7).
70 #if defined CALC_COUL_EWALD && defined __INTEL_COMPILER && defined GMX_X86_AVX_256
71 #define NBNXN_CUTOFF_USE_BLENDV
76 int cj, aj, ajx, ajy, ajz;
79 /* Energy group indices for two atoms packed into one int */
80 int egp_jj[UNROLLJ/2];
84 /* Interaction (non-exclusion) mask of all 1's or 0's */
85 gmx_mm_pb interact_S0;
86 gmx_mm_pb interact_S1;
87 gmx_mm_pb interact_S2;
88 gmx_mm_pb interact_S3;
91 gmx_mm_pr jx_S, jy_S, jz_S;
92 gmx_mm_pr dx_S0, dy_S0, dz_S0;
93 gmx_mm_pr dx_S1, dy_S1, dz_S1;
94 gmx_mm_pr dx_S2, dy_S2, dz_S2;
95 gmx_mm_pr dx_S3, dy_S3, dz_S3;
96 gmx_mm_pr tx_S0, ty_S0, tz_S0;
97 gmx_mm_pr tx_S1, ty_S1, tz_S1;
98 gmx_mm_pr tx_S2, ty_S2, tz_S2;
99 gmx_mm_pr tx_S3, ty_S3, tz_S3;
100 gmx_mm_pr rsq_S0, rinv_S0, rinvsq_S0;
101 gmx_mm_pr rsq_S1, rinv_S1, rinvsq_S1;
102 gmx_mm_pr rsq_S2, rinv_S2, rinvsq_S2;
103 gmx_mm_pr rsq_S3, rinv_S3, rinvsq_S3;
104 #ifndef NBNXN_CUTOFF_USE_BLENDV
105 /* wco: within cut-off, mask of all 1's or 0's */
111 #ifdef VDW_CUTOFF_CHECK
112 gmx_mm_pb wco_vdw_S0;
113 gmx_mm_pb wco_vdw_S1;
115 gmx_mm_pb wco_vdw_S2;
116 gmx_mm_pb wco_vdw_S3;
121 /* 1/r masked with the interaction mask */
122 gmx_mm_pr rinv_ex_S0;
123 gmx_mm_pr rinv_ex_S1;
124 gmx_mm_pr rinv_ex_S2;
125 gmx_mm_pr rinv_ex_S3;
133 /* The force (PME mesh force) we need to subtract from 1/r^2 */
139 #ifdef CALC_COUL_EWALD
140 gmx_mm_pr brsq_S0, brsq_S1, brsq_S2, brsq_S3;
141 gmx_mm_pr ewcorr_S0, ewcorr_S1, ewcorr_S2, ewcorr_S3;
144 /* frcoul = (1/r - fsub)*r */
150 /* For tables: r, rs=r/sp, rf=floor(rs), frac=rs-rf */
151 gmx_mm_pr r_S0, rs_S0, rf_S0, frac_S0;
152 gmx_mm_pr r_S1, rs_S1, rf_S1, frac_S1;
153 gmx_mm_pr r_S2, rs_S2, rf_S2, frac_S2;
154 gmx_mm_pr r_S3, rs_S3, rf_S3, frac_S3;
155 /* Table index: rs truncated to an int */
156 gmx_epi32 ti_S0, ti_S1, ti_S2, ti_S3;
157 /* Linear force table values */
158 gmx_mm_pr ctab0_S0, ctab1_S0;
159 gmx_mm_pr ctab0_S1, ctab1_S1;
160 gmx_mm_pr ctab0_S2, ctab1_S2;
161 gmx_mm_pr ctab0_S3, ctab1_S3;
163 /* Quadratic energy table value */
170 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
171 /* The potential (PME mesh) we need to subtract from 1/r */
178 /* Electrostatic potential */
185 /* The force times 1/r */
193 /* LJ sigma_j/2 and sqrt(epsilon_j) */
194 gmx_mm_pr hsig_j_S, seps_j_S;
195 /* LJ sigma_ij and epsilon_ij */
196 gmx_mm_pr sig_S0, eps_S0;
197 gmx_mm_pr sig_S1, eps_S1;
199 gmx_mm_pr sig_S2, eps_S2;
200 gmx_mm_pr sig_S3, eps_S3;
203 gmx_mm_pr sig2_S0, sig6_S0;
204 gmx_mm_pr sig2_S1, sig6_S1;
206 gmx_mm_pr sig2_S2, sig6_S2;
207 gmx_mm_pr sig2_S3, sig6_S3;
209 #endif /* LJ_COMB_LB */
213 gmx_mm_pr c6s_j_S, c12s_j_S;
216 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
217 /* Index for loading LJ parameters, complicated when interleaving */
222 /* LJ C6 and C12 parameters, used with geometric comb. rule */
223 gmx_mm_pr c6_S0, c12_S0;
224 gmx_mm_pr c6_S1, c12_S1;
226 gmx_mm_pr c6_S2, c12_S2;
227 gmx_mm_pr c6_S3, c12_S3;
231 /* Intermediate variables for LJ calculation */
233 gmx_mm_pr rinvsix_S0;
234 gmx_mm_pr rinvsix_S1;
236 gmx_mm_pr rinvsix_S2;
237 gmx_mm_pr rinvsix_S3;
241 gmx_mm_pr sir_S0, sir2_S0, sir6_S0;
242 gmx_mm_pr sir_S1, sir2_S1, sir6_S1;
244 gmx_mm_pr sir_S2, sir2_S2, sir6_S2;
245 gmx_mm_pr sir_S3, sir2_S3, sir6_S3;
249 gmx_mm_pr FrLJ6_S0, FrLJ12_S0;
250 gmx_mm_pr FrLJ6_S1, FrLJ12_S1;
252 gmx_mm_pr FrLJ6_S2, FrLJ12_S2;
253 gmx_mm_pr FrLJ6_S3, FrLJ12_S3;
256 gmx_mm_pr VLJ6_S0, VLJ12_S0, VLJ_S0;
257 gmx_mm_pr VLJ6_S1, VLJ12_S1, VLJ_S1;
259 gmx_mm_pr VLJ6_S2, VLJ12_S2, VLJ_S2;
260 gmx_mm_pr VLJ6_S3, VLJ12_S3, VLJ_S3;
265 /* j-cluster index */
268 /* Atom indices (of the first atom in the cluster) */
270 #if defined CALC_LJ && (defined LJ_COMB_GEOM || defined LJ_COMB_LB)
271 #if UNROLLJ == STRIDE
274 aj2 = (cj>>1)*2*STRIDE + (cj & 1)*UNROLLJ;
277 #if UNROLLJ == STRIDE
280 ajx = (cj>>1)*DIM*STRIDE + (cj & 1)*UNROLLJ;
286 gmx_load_simd_4xn_interactions(l_cj[cjind].excl, filter_S0, filter_S1, filter_S2, filter_S3, &interact_S0, &interact_S1, &interact_S2, &interact_S3);
287 #endif /* CHECK_EXCLS */
289 /* load j atom coordinates */
290 jx_S = gmx_load_pr(x+ajx);
291 jy_S = gmx_load_pr(x+ajy);
292 jz_S = gmx_load_pr(x+ajz);
294 /* Calculate distance */
295 dx_S0 = gmx_sub_pr(ix_S0, jx_S);
296 dy_S0 = gmx_sub_pr(iy_S0, jy_S);
297 dz_S0 = gmx_sub_pr(iz_S0, jz_S);
298 dx_S1 = gmx_sub_pr(ix_S1, jx_S);
299 dy_S1 = gmx_sub_pr(iy_S1, jy_S);
300 dz_S1 = gmx_sub_pr(iz_S1, jz_S);
301 dx_S2 = gmx_sub_pr(ix_S2, jx_S);
302 dy_S2 = gmx_sub_pr(iy_S2, jy_S);
303 dz_S2 = gmx_sub_pr(iz_S2, jz_S);
304 dx_S3 = gmx_sub_pr(ix_S3, jx_S);
305 dy_S3 = gmx_sub_pr(iy_S3, jy_S);
306 dz_S3 = gmx_sub_pr(iz_S3, jz_S);
308 /* rsq = dx*dx+dy*dy+dz*dz */
309 rsq_S0 = gmx_calc_rsq_pr(dx_S0, dy_S0, dz_S0);
310 rsq_S1 = gmx_calc_rsq_pr(dx_S1, dy_S1, dz_S1);
311 rsq_S2 = gmx_calc_rsq_pr(dx_S2, dy_S2, dz_S2);
312 rsq_S3 = gmx_calc_rsq_pr(dx_S3, dy_S3, dz_S3);
314 #ifndef NBNXN_CUTOFF_USE_BLENDV
315 wco_S0 = gmx_cmplt_pr(rsq_S0, rc2_S);
316 wco_S1 = gmx_cmplt_pr(rsq_S1, rc2_S);
317 wco_S2 = gmx_cmplt_pr(rsq_S2, rc2_S);
318 wco_S3 = gmx_cmplt_pr(rsq_S3, rc2_S);
323 /* Only remove the (sub-)diagonal to avoid double counting */
324 #if UNROLLJ == UNROLLI
327 wco_S0 = gmx_and_pb(wco_S0, diagonal_mask_S0);
328 wco_S1 = gmx_and_pb(wco_S1, diagonal_mask_S1);
329 wco_S2 = gmx_and_pb(wco_S2, diagonal_mask_S2);
330 wco_S3 = gmx_and_pb(wco_S3, diagonal_mask_S3);
333 #if UNROLLJ < UNROLLI
336 wco_S0 = gmx_and_pb(wco_S0, diagonal_mask0_S0);
337 wco_S1 = gmx_and_pb(wco_S1, diagonal_mask0_S1);
338 wco_S2 = gmx_and_pb(wco_S2, diagonal_mask0_S2);
339 wco_S3 = gmx_and_pb(wco_S3, diagonal_mask0_S3);
341 if (cj == ci_sh*2 + 1)
343 wco_S0 = gmx_and_pb(wco_S0, diagonal_mask1_S0);
344 wco_S1 = gmx_and_pb(wco_S1, diagonal_mask1_S1);
345 wco_S2 = gmx_and_pb(wco_S2, diagonal_mask1_S2);
346 wco_S3 = gmx_and_pb(wco_S3, diagonal_mask1_S3);
351 wco_S0 = gmx_and_pb(wco_S0, diagonal_mask0_S0);
352 wco_S1 = gmx_and_pb(wco_S1, diagonal_mask0_S1);
353 wco_S2 = gmx_and_pb(wco_S2, diagonal_mask0_S2);
354 wco_S3 = gmx_and_pb(wco_S3, diagonal_mask0_S3);
356 else if (cj*2 + 1 == ci_sh)
358 wco_S0 = gmx_and_pb(wco_S0, diagonal_mask1_S0);
359 wco_S1 = gmx_and_pb(wco_S1, diagonal_mask1_S1);
360 wco_S2 = gmx_and_pb(wco_S2, diagonal_mask1_S2);
361 wco_S3 = gmx_and_pb(wco_S3, diagonal_mask1_S3);
365 #else /* EXCL_FORCES */
366 /* No exclusion forces: remove all excluded atom pairs from the list */
367 wco_S0 = gmx_and_pb(wco_S0, interact_S0);
368 wco_S1 = gmx_and_pb(wco_S1, interact_S1);
369 wco_S2 = gmx_and_pb(wco_S2, interact_S2);
370 wco_S3 = gmx_and_pb(wco_S3, interact_S3);
377 real tmpa[2*GMX_SIMD_WIDTH_HERE], *tmp;
378 tmp = gmx_simd_align_real(tmpa);
379 for (i = 0; i < UNROLLI; i++)
381 gmx_store_pr(tmp, gmx_sub_pr(rc2_S, i == 0 ? rsq_S0 : (i == 1 ? rsq_S1 : (i == 2 ? rsq_S2 : rsq_S3))));
382 for (j = 0; j < UNROLLJ; j++)
394 /* For excluded pairs add a small number to avoid r^-6 = NaN */
395 rsq_S0 = gmx_masknot_add_pr(interact_S0, rsq_S0, avoid_sing_S);
396 rsq_S1 = gmx_masknot_add_pr(interact_S1, rsq_S1, avoid_sing_S);
397 rsq_S2 = gmx_masknot_add_pr(interact_S2, rsq_S2, avoid_sing_S);
398 rsq_S3 = gmx_masknot_add_pr(interact_S3, rsq_S3, avoid_sing_S);
403 rinv_S0 = gmx_invsqrt_pr(rsq_S0);
404 rinv_S1 = gmx_invsqrt_pr(rsq_S1);
405 rinv_S2 = gmx_invsqrt_pr(rsq_S2);
406 rinv_S3 = gmx_invsqrt_pr(rsq_S3);
408 gmx_mm_invsqrt2_pd(rsq_S0, rsq_S1, &rinv_S0, &rinv_S1);
409 gmx_mm_invsqrt2_pd(rsq_S2, rsq_S3, &rinv_S2, &rinv_S3);
413 /* Load parameters for j atom */
414 jq_S = gmx_load_pr(q+aj);
415 qq_S0 = gmx_mul_pr(iq_S0, jq_S);
416 qq_S1 = gmx_mul_pr(iq_S1, jq_S);
417 qq_S2 = gmx_mul_pr(iq_S2, jq_S);
418 qq_S3 = gmx_mul_pr(iq_S3, jq_S);
423 #if !defined LJ_COMB_GEOM && !defined LJ_COMB_LB && !defined FIX_LJ_C
424 load_lj_pair_params(nbfp0, type, aj, &c6_S0, &c12_S0);
425 load_lj_pair_params(nbfp1, type, aj, &c6_S1, &c12_S1);
427 load_lj_pair_params(nbfp2, type, aj, &c6_S2, &c12_S2);
428 load_lj_pair_params(nbfp3, type, aj, &c6_S3, &c12_S3);
430 #endif /* not defined any LJ rule */
433 c6s_j_S = gmx_load_pr(ljc+aj2+0);
434 c12s_j_S = gmx_load_pr(ljc+aj2+STRIDE);
435 c6_S0 = gmx_mul_pr(c6s_S0, c6s_j_S );
436 c6_S1 = gmx_mul_pr(c6s_S1, c6s_j_S );
438 c6_S2 = gmx_mul_pr(c6s_S2, c6s_j_S );
439 c6_S3 = gmx_mul_pr(c6s_S3, c6s_j_S );
441 c12_S0 = gmx_mul_pr(c12s_S0, c12s_j_S);
442 c12_S1 = gmx_mul_pr(c12s_S1, c12s_j_S);
444 c12_S2 = gmx_mul_pr(c12s_S2, c12s_j_S);
445 c12_S3 = gmx_mul_pr(c12s_S3, c12s_j_S);
447 #endif /* LJ_COMB_GEOM */
450 hsig_j_S = gmx_load_pr(ljc+aj2+0);
451 seps_j_S = gmx_load_pr(ljc+aj2+STRIDE);
453 sig_S0 = gmx_add_pr(hsig_i_S0, hsig_j_S);
454 sig_S1 = gmx_add_pr(hsig_i_S1, hsig_j_S);
455 eps_S0 = gmx_mul_pr(seps_i_S0, seps_j_S);
456 eps_S1 = gmx_mul_pr(seps_i_S1, seps_j_S);
458 sig_S2 = gmx_add_pr(hsig_i_S2, hsig_j_S);
459 sig_S3 = gmx_add_pr(hsig_i_S3, hsig_j_S);
460 eps_S2 = gmx_mul_pr(seps_i_S2, seps_j_S);
461 eps_S3 = gmx_mul_pr(seps_i_S3, seps_j_S);
463 #endif /* LJ_COMB_LB */
467 #ifndef NBNXN_CUTOFF_USE_BLENDV
468 rinv_S0 = gmx_blendzero_pr(rinv_S0, wco_S0);
469 rinv_S1 = gmx_blendzero_pr(rinv_S1, wco_S1);
470 rinv_S2 = gmx_blendzero_pr(rinv_S2, wco_S2);
471 rinv_S3 = gmx_blendzero_pr(rinv_S3, wco_S3);
473 /* We only need to mask for the cut-off: blendv is faster */
474 rinv_S0 = gmx_blendv_pr(rinv_S0, zero_S, gmx_sub_pr(rc2_S, rsq_S0));
475 rinv_S1 = gmx_blendv_pr(rinv_S1, zero_S, gmx_sub_pr(rc2_S, rsq_S1));
476 rinv_S2 = gmx_blendv_pr(rinv_S2, zero_S, gmx_sub_pr(rc2_S, rsq_S2));
477 rinv_S3 = gmx_blendv_pr(rinv_S3, zero_S, gmx_sub_pr(rc2_S, rsq_S3));
480 rinvsq_S0 = gmx_mul_pr(rinv_S0, rinv_S0);
481 rinvsq_S1 = gmx_mul_pr(rinv_S1, rinv_S1);
482 rinvsq_S2 = gmx_mul_pr(rinv_S2, rinv_S2);
483 rinvsq_S3 = gmx_mul_pr(rinv_S3, rinv_S3);
486 /* Note that here we calculate force*r, not the usual force/r.
487 * This allows avoiding masking the reaction-field contribution,
488 * as frcoul is later multiplied by rinvsq which has been
489 * masked with the cut-off check.
493 /* Only add 1/r for non-excluded atom pairs */
494 rinv_ex_S0 = gmx_blendzero_pr(rinv_S0, interact_S0);
495 rinv_ex_S1 = gmx_blendzero_pr(rinv_S1, interact_S1);
496 rinv_ex_S2 = gmx_blendzero_pr(rinv_S2, interact_S2);
497 rinv_ex_S3 = gmx_blendzero_pr(rinv_S3, interact_S3);
499 /* No exclusion forces, we always need 1/r */
500 #define rinv_ex_S0 rinv_S0
501 #define rinv_ex_S1 rinv_S1
502 #define rinv_ex_S2 rinv_S2
503 #define rinv_ex_S3 rinv_S3
507 /* Electrostatic interactions */
508 frcoul_S0 = gmx_mul_pr(qq_S0, gmx_madd_pr(rsq_S0, mrc_3_S, rinv_ex_S0));
509 frcoul_S1 = gmx_mul_pr(qq_S1, gmx_madd_pr(rsq_S1, mrc_3_S, rinv_ex_S1));
510 frcoul_S2 = gmx_mul_pr(qq_S2, gmx_madd_pr(rsq_S2, mrc_3_S, rinv_ex_S2));
511 frcoul_S3 = gmx_mul_pr(qq_S3, gmx_madd_pr(rsq_S3, mrc_3_S, rinv_ex_S3));
514 vcoul_S0 = gmx_mul_pr(qq_S0, gmx_add_pr(rinv_ex_S0, gmx_add_pr(gmx_mul_pr(rsq_S0, hrc_3_S), moh_rc_S)));
515 vcoul_S1 = gmx_mul_pr(qq_S1, gmx_add_pr(rinv_ex_S1, gmx_add_pr(gmx_mul_pr(rsq_S1, hrc_3_S), moh_rc_S)));
516 vcoul_S2 = gmx_mul_pr(qq_S2, gmx_add_pr(rinv_ex_S2, gmx_add_pr(gmx_mul_pr(rsq_S2, hrc_3_S), moh_rc_S)));
517 vcoul_S3 = gmx_mul_pr(qq_S3, gmx_add_pr(rinv_ex_S3, gmx_add_pr(gmx_mul_pr(rsq_S3, hrc_3_S), moh_rc_S)));
521 #ifdef CALC_COUL_EWALD
522 /* We need to mask (or limit) rsq for the cut-off,
523 * as large distances can cause an overflow in gmx_pmecorrF/V.
525 #ifndef NBNXN_CUTOFF_USE_BLENDV
526 brsq_S0 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S0, wco_S0));
527 brsq_S1 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S1, wco_S1));
528 brsq_S2 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S2, wco_S2));
529 brsq_S3 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S3, wco_S3));
531 /* Strangely, putting mul on a separate line is slower (icc 13) */
532 brsq_S0 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S0, zero_S, gmx_sub_pr(rc2_S, rsq_S0)));
533 brsq_S1 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S1, zero_S, gmx_sub_pr(rc2_S, rsq_S1)));
534 brsq_S2 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S2, zero_S, gmx_sub_pr(rc2_S, rsq_S2)));
535 brsq_S3 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S3, zero_S, gmx_sub_pr(rc2_S, rsq_S3)));
537 ewcorr_S0 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S0), beta_S);
538 ewcorr_S1 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S1), beta_S);
539 ewcorr_S2 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S2), beta_S);
540 ewcorr_S3 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S3), beta_S);
541 frcoul_S0 = gmx_mul_pr(qq_S0, gmx_madd_pr(ewcorr_S0, brsq_S0, rinv_ex_S0));
542 frcoul_S1 = gmx_mul_pr(qq_S1, gmx_madd_pr(ewcorr_S1, brsq_S1, rinv_ex_S1));
543 frcoul_S2 = gmx_mul_pr(qq_S2, gmx_madd_pr(ewcorr_S2, brsq_S2, rinv_ex_S2));
544 frcoul_S3 = gmx_mul_pr(qq_S3, gmx_madd_pr(ewcorr_S3, brsq_S3, rinv_ex_S3));
547 vc_sub_S0 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S0), beta_S);
548 vc_sub_S1 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S1), beta_S);
549 vc_sub_S2 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S2), beta_S);
550 vc_sub_S3 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S3), beta_S);
553 #endif /* CALC_COUL_EWALD */
556 /* Electrostatic interactions */
557 r_S0 = gmx_mul_pr(rsq_S0, rinv_S0);
558 r_S1 = gmx_mul_pr(rsq_S1, rinv_S1);
559 r_S2 = gmx_mul_pr(rsq_S2, rinv_S2);
560 r_S3 = gmx_mul_pr(rsq_S3, rinv_S3);
561 /* Convert r to scaled table units */
562 rs_S0 = gmx_mul_pr(r_S0, invtsp_S);
563 rs_S1 = gmx_mul_pr(r_S1, invtsp_S);
564 rs_S2 = gmx_mul_pr(r_S2, invtsp_S);
565 rs_S3 = gmx_mul_pr(r_S3, invtsp_S);
566 /* Truncate scaled r to an int */
567 ti_S0 = gmx_cvttpr_epi32(rs_S0);
568 ti_S1 = gmx_cvttpr_epi32(rs_S1);
569 ti_S2 = gmx_cvttpr_epi32(rs_S2);
570 ti_S3 = gmx_cvttpr_epi32(rs_S3);
571 #ifdef GMX_SIMD_HAVE_FLOOR
572 /* SSE4.1 floor is faster than gmx_cvtepi32_ps int->float cast */
573 rf_S0 = gmx_floor_pr(rs_S0);
574 rf_S1 = gmx_floor_pr(rs_S1);
575 rf_S2 = gmx_floor_pr(rs_S2);
576 rf_S3 = gmx_floor_pr(rs_S3);
578 rf_S0 = gmx_cvtepi32_pr(ti_S0);
579 rf_S1 = gmx_cvtepi32_pr(ti_S1);
580 rf_S2 = gmx_cvtepi32_pr(ti_S2);
581 rf_S3 = gmx_cvtepi32_pr(ti_S3);
583 frac_S0 = gmx_sub_pr(rs_S0, rf_S0);
584 frac_S1 = gmx_sub_pr(rs_S1, rf_S1);
585 frac_S2 = gmx_sub_pr(rs_S2, rf_S2);
586 frac_S3 = gmx_sub_pr(rs_S3, rf_S3);
588 /* Load and interpolate table forces and possibly energies.
589 * Force and energy can be combined in one table, stride 4: FDV0
590 * or in two separate tables with stride 1: F and V
591 * Currently single precision uses FDV0, double F and V.
593 #ifndef CALC_ENERGIES
594 load_table_f(tab_coul_F, ti_S0, ti0, &ctab0_S0, &ctab1_S0);
595 load_table_f(tab_coul_F, ti_S1, ti1, &ctab0_S1, &ctab1_S1);
596 load_table_f(tab_coul_F, ti_S2, ti2, &ctab0_S2, &ctab1_S2);
597 load_table_f(tab_coul_F, ti_S3, ti3, &ctab0_S3, &ctab1_S3);
600 load_table_f_v(tab_coul_F, ti_S0, ti0, &ctab0_S0, &ctab1_S0, &ctabv_S0);
601 load_table_f_v(tab_coul_F, ti_S1, ti1, &ctab0_S1, &ctab1_S1, &ctabv_S1);
602 load_table_f_v(tab_coul_F, ti_S2, ti2, &ctab0_S2, &ctab1_S2, &ctabv_S2);
603 load_table_f_v(tab_coul_F, ti_S3, ti3, &ctab0_S3, &ctab1_S3, &ctabv_S3);
605 load_table_f_v(tab_coul_F, tab_coul_V, ti_S0, ti0, &ctab0_S0, &ctab1_S0, &ctabv_S0);
606 load_table_f_v(tab_coul_F, tab_coul_V, ti_S1, ti1, &ctab0_S1, &ctab1_S1, &ctabv_S1);
607 load_table_f_v(tab_coul_F, tab_coul_V, ti_S2, ti2, &ctab0_S2, &ctab1_S2, &ctabv_S2);
608 load_table_f_v(tab_coul_F, tab_coul_V, ti_S3, ti3, &ctab0_S3, &ctab1_S3, &ctabv_S3);
611 fsub_S0 = gmx_add_pr(ctab0_S0, gmx_mul_pr(frac_S0, ctab1_S0));
612 fsub_S1 = gmx_add_pr(ctab0_S1, gmx_mul_pr(frac_S1, ctab1_S1));
613 fsub_S2 = gmx_add_pr(ctab0_S2, gmx_mul_pr(frac_S2, ctab1_S2));
614 fsub_S3 = gmx_add_pr(ctab0_S3, gmx_mul_pr(frac_S3, ctab1_S3));
615 frcoul_S0 = gmx_mul_pr(qq_S0, gmx_sub_pr(rinv_ex_S0, gmx_mul_pr(fsub_S0, r_S0)));
616 frcoul_S1 = gmx_mul_pr(qq_S1, gmx_sub_pr(rinv_ex_S1, gmx_mul_pr(fsub_S1, r_S1)));
617 frcoul_S2 = gmx_mul_pr(qq_S2, gmx_sub_pr(rinv_ex_S2, gmx_mul_pr(fsub_S2, r_S2)));
618 frcoul_S3 = gmx_mul_pr(qq_S3, gmx_sub_pr(rinv_ex_S3, gmx_mul_pr(fsub_S3, r_S3)));
621 vc_sub_S0 = gmx_add_pr(ctabv_S0, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S0), gmx_add_pr(ctab0_S0, fsub_S0)));
622 vc_sub_S1 = gmx_add_pr(ctabv_S1, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S1), gmx_add_pr(ctab0_S1, fsub_S1)));
623 vc_sub_S2 = gmx_add_pr(ctabv_S2, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S2), gmx_add_pr(ctab0_S2, fsub_S2)));
624 vc_sub_S3 = gmx_add_pr(ctabv_S3, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S3), gmx_add_pr(ctab0_S3, fsub_S3)));
626 #endif /* CALC_COUL_TAB */
628 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
629 #ifndef NO_SHIFT_EWALD
630 /* Add Ewald potential shift to vc_sub for convenience */
632 vc_sub_S0 = gmx_add_pr(vc_sub_S0, gmx_blendzero_pr(sh_ewald_S, interact_S0));
633 vc_sub_S1 = gmx_add_pr(vc_sub_S1, gmx_blendzero_pr(sh_ewald_S, interact_S1));
634 vc_sub_S2 = gmx_add_pr(vc_sub_S2, gmx_blendzero_pr(sh_ewald_S, interact_S2));
635 vc_sub_S3 = gmx_add_pr(vc_sub_S3, gmx_blendzero_pr(sh_ewald_S, interact_S3));
637 vc_sub_S0 = gmx_add_pr(vc_sub_S0, sh_ewald_S);
638 vc_sub_S1 = gmx_add_pr(vc_sub_S1, sh_ewald_S);
639 vc_sub_S2 = gmx_add_pr(vc_sub_S2, sh_ewald_S);
640 vc_sub_S3 = gmx_add_pr(vc_sub_S3, sh_ewald_S);
644 vcoul_S0 = gmx_mul_pr(qq_S0, gmx_sub_pr(rinv_ex_S0, vc_sub_S0));
645 vcoul_S1 = gmx_mul_pr(qq_S1, gmx_sub_pr(rinv_ex_S1, vc_sub_S1));
646 vcoul_S2 = gmx_mul_pr(qq_S2, gmx_sub_pr(rinv_ex_S2, vc_sub_S2));
647 vcoul_S3 = gmx_mul_pr(qq_S3, gmx_sub_pr(rinv_ex_S3, vc_sub_S3));
652 /* Mask energy for cut-off and diagonal */
653 vcoul_S0 = gmx_blendzero_pr(vcoul_S0, wco_S0);
654 vcoul_S1 = gmx_blendzero_pr(vcoul_S1, wco_S1);
655 vcoul_S2 = gmx_blendzero_pr(vcoul_S2, wco_S2);
656 vcoul_S3 = gmx_blendzero_pr(vcoul_S3, wco_S3);
659 #endif /* CALC_COULOMB */
662 /* Lennard-Jones interaction */
664 #ifdef VDW_CUTOFF_CHECK
665 wco_vdw_S0 = gmx_cmplt_pr(rsq_S0, rcvdw2_S);
666 wco_vdw_S1 = gmx_cmplt_pr(rsq_S1, rcvdw2_S);
668 wco_vdw_S2 = gmx_cmplt_pr(rsq_S2, rcvdw2_S);
669 wco_vdw_S3 = gmx_cmplt_pr(rsq_S3, rcvdw2_S);
672 /* Same cut-off for Coulomb and VdW, reuse the registers */
673 #define wco_vdw_S0 wco_S0
674 #define wco_vdw_S1 wco_S1
675 #define wco_vdw_S2 wco_S2
676 #define wco_vdw_S3 wco_S3
680 rinvsix_S0 = gmx_mul_pr(rinvsq_S0, gmx_mul_pr(rinvsq_S0, rinvsq_S0));
681 rinvsix_S1 = gmx_mul_pr(rinvsq_S1, gmx_mul_pr(rinvsq_S1, rinvsq_S1));
683 rinvsix_S0 = gmx_blendzero_pr(rinvsix_S0, interact_S0);
684 rinvsix_S1 = gmx_blendzero_pr(rinvsix_S1, interact_S1);
687 rinvsix_S2 = gmx_mul_pr(rinvsq_S2, gmx_mul_pr(rinvsq_S2, rinvsq_S2));
688 rinvsix_S3 = gmx_mul_pr(rinvsq_S3, gmx_mul_pr(rinvsq_S3, rinvsq_S3));
690 rinvsix_S2 = gmx_blendzero_pr(rinvsix_S2, interact_S2);
691 rinvsix_S3 = gmx_blendzero_pr(rinvsix_S3, interact_S3);
694 #ifdef VDW_CUTOFF_CHECK
695 rinvsix_S0 = gmx_blendzero_pr(rinvsix_S0, wco_vdw_S0);
696 rinvsix_S1 = gmx_blendzero_pr(rinvsix_S1, wco_vdw_S1);
698 rinvsix_S2 = gmx_blendzero_pr(rinvsix_S2, wco_vdw_S2);
699 rinvsix_S3 = gmx_blendzero_pr(rinvsix_S3, wco_vdw_S3);
702 FrLJ6_S0 = gmx_mul_pr(c6_S0, rinvsix_S0);
703 FrLJ6_S1 = gmx_mul_pr(c6_S1, rinvsix_S1);
705 FrLJ6_S2 = gmx_mul_pr(c6_S2, rinvsix_S2);
706 FrLJ6_S3 = gmx_mul_pr(c6_S3, rinvsix_S3);
708 FrLJ12_S0 = gmx_mul_pr(c12_S0, gmx_mul_pr(rinvsix_S0, rinvsix_S0));
709 FrLJ12_S1 = gmx_mul_pr(c12_S1, gmx_mul_pr(rinvsix_S1, rinvsix_S1));
711 FrLJ12_S2 = gmx_mul_pr(c12_S2, gmx_mul_pr(rinvsix_S2, rinvsix_S2));
712 FrLJ12_S3 = gmx_mul_pr(c12_S3, gmx_mul_pr(rinvsix_S3, rinvsix_S3));
714 #endif /* not LJ_COMB_LB */
717 sir_S0 = gmx_mul_pr(sig_S0, rinv_S0);
718 sir_S1 = gmx_mul_pr(sig_S1, rinv_S1);
720 sir_S2 = gmx_mul_pr(sig_S2, rinv_S2);
721 sir_S3 = gmx_mul_pr(sig_S3, rinv_S3);
723 sir2_S0 = gmx_mul_pr(sir_S0, sir_S0);
724 sir2_S1 = gmx_mul_pr(sir_S1, sir_S1);
726 sir2_S2 = gmx_mul_pr(sir_S2, sir_S2);
727 sir2_S3 = gmx_mul_pr(sir_S3, sir_S3);
729 sir6_S0 = gmx_mul_pr(sir2_S0, gmx_mul_pr(sir2_S0, sir2_S0));
730 sir6_S1 = gmx_mul_pr(sir2_S1, gmx_mul_pr(sir2_S1, sir2_S1));
732 sir6_S0 = gmx_blendzero_pr(sir6_S0, interact_S0);
733 sir6_S1 = gmx_blendzero_pr(sir6_S1, interact_S1);
736 sir6_S2 = gmx_mul_pr(sir2_S2, gmx_mul_pr(sir2_S2, sir2_S2));
737 sir6_S3 = gmx_mul_pr(sir2_S3, gmx_mul_pr(sir2_S3, sir2_S3));
739 sir6_S2 = gmx_blendzero_pr(sir6_S2, interact_S2);
740 sir6_S3 = gmx_blendzero_pr(sir6_S3, interact_S3);
743 #ifdef VDW_CUTOFF_CHECK
744 sir6_S0 = gmx_blendzero_pr(sir6_S0, wco_vdw_S0);
745 sir6_S1 = gmx_blendzero_pr(sir6_S1, wco_vdw_S1);
747 sir6_S2 = gmx_blendzero_pr(sir6_S2, wco_vdw_S2);
748 sir6_S3 = gmx_blendzero_pr(sir6_S3, wco_vdw_S3);
751 FrLJ6_S0 = gmx_mul_pr(eps_S0, sir6_S0);
752 FrLJ6_S1 = gmx_mul_pr(eps_S1, sir6_S1);
754 FrLJ6_S2 = gmx_mul_pr(eps_S2, sir6_S2);
755 FrLJ6_S3 = gmx_mul_pr(eps_S3, sir6_S3);
757 FrLJ12_S0 = gmx_mul_pr(FrLJ6_S0, sir6_S0);
758 FrLJ12_S1 = gmx_mul_pr(FrLJ6_S1, sir6_S1);
760 FrLJ12_S2 = gmx_mul_pr(FrLJ6_S2, sir6_S2);
761 FrLJ12_S3 = gmx_mul_pr(FrLJ6_S3, sir6_S3);
763 #if defined CALC_ENERGIES
764 /* We need C6 and C12 to calculate the LJ potential shift */
765 sig2_S0 = gmx_mul_pr(sig_S0, sig_S0);
766 sig2_S1 = gmx_mul_pr(sig_S1, sig_S1);
768 sig2_S2 = gmx_mul_pr(sig_S2, sig_S2);
769 sig2_S3 = gmx_mul_pr(sig_S3, sig_S3);
771 sig6_S0 = gmx_mul_pr(sig2_S0, gmx_mul_pr(sig2_S0, sig2_S0));
772 sig6_S1 = gmx_mul_pr(sig2_S1, gmx_mul_pr(sig2_S1, sig2_S1));
774 sig6_S2 = gmx_mul_pr(sig2_S2, gmx_mul_pr(sig2_S2, sig2_S2));
775 sig6_S3 = gmx_mul_pr(sig2_S3, gmx_mul_pr(sig2_S3, sig2_S3));
777 c6_S0 = gmx_mul_pr(eps_S0, sig6_S0);
778 c6_S1 = gmx_mul_pr(eps_S1, sig6_S1);
780 c6_S2 = gmx_mul_pr(eps_S2, sig6_S2);
781 c6_S3 = gmx_mul_pr(eps_S3, sig6_S3);
783 c12_S0 = gmx_mul_pr(c6_S0, sig6_S0);
784 c12_S1 = gmx_mul_pr(c6_S1, sig6_S1);
786 c12_S2 = gmx_mul_pr(c6_S2, sig6_S2);
787 c12_S3 = gmx_mul_pr(c6_S3, sig6_S3);
790 #endif /* LJ_COMB_LB */
796 /* Extract the group pair index per j pair.
797 * Energy groups are stored per i-cluster, so things get
798 * complicated when the i- and j-cluster size don't match.
803 egps_j = nbat->energrp[cj>>1];
804 egp_jj[0] = ((egps_j >> ((cj & 1)*egps_jshift)) & egps_jmask)*egps_jstride;
806 /* We assume UNROLLI <= UNROLLJ */
808 for (jdi = 0; jdi < UNROLLJ/UNROLLI; jdi++)
811 egps_j = nbat->energrp[cj*(UNROLLJ/UNROLLI)+jdi];
812 for (jj = 0; jj < (UNROLLI/2); jj++)
814 egp_jj[jdi*(UNROLLI/2)+jj] = ((egps_j >> (jj*egps_jshift)) & egps_jmask)*egps_jstride;
822 #ifndef ENERGY_GROUPS
823 vctot_S = gmx_add_pr(vctot_S, gmx_sum4_pr(vcoul_S0, vcoul_S1, vcoul_S2, vcoul_S3));
825 add_ener_grp(vcoul_S0, vctp[0], egp_jj);
826 add_ener_grp(vcoul_S1, vctp[1], egp_jj);
827 add_ener_grp(vcoul_S2, vctp[2], egp_jj);
828 add_ener_grp(vcoul_S3, vctp[3], egp_jj);
833 /* Calculate the LJ energies */
834 VLJ6_S0 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S0, gmx_mul_pr(c6_S0, sh_invrc6_S)));
835 VLJ6_S1 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S1, gmx_mul_pr(c6_S1, sh_invrc6_S)));
837 VLJ6_S2 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S2, gmx_mul_pr(c6_S2, sh_invrc6_S)));
838 VLJ6_S3 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S3, gmx_mul_pr(c6_S3, sh_invrc6_S)));
840 VLJ12_S0 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S0, gmx_mul_pr(c12_S0, sh_invrc12_S)));
841 VLJ12_S1 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S1, gmx_mul_pr(c12_S1, sh_invrc12_S)));
843 VLJ12_S2 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S2, gmx_mul_pr(c12_S2, sh_invrc12_S)));
844 VLJ12_S3 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S3, gmx_mul_pr(c12_S3, sh_invrc12_S)));
847 VLJ_S0 = gmx_sub_pr(VLJ12_S0, VLJ6_S0);
848 VLJ_S1 = gmx_sub_pr(VLJ12_S1, VLJ6_S1);
850 VLJ_S2 = gmx_sub_pr(VLJ12_S2, VLJ6_S2);
851 VLJ_S3 = gmx_sub_pr(VLJ12_S3, VLJ6_S3);
853 /* The potential shift should be removed for pairs beyond cut-off */
854 VLJ_S0 = gmx_blendzero_pr(VLJ_S0, wco_vdw_S0);
855 VLJ_S1 = gmx_blendzero_pr(VLJ_S1, wco_vdw_S1);
857 VLJ_S2 = gmx_blendzero_pr(VLJ_S2, wco_vdw_S2);
858 VLJ_S3 = gmx_blendzero_pr(VLJ_S3, wco_vdw_S3);
861 /* The potential shift should be removed for excluded pairs */
862 VLJ_S0 = gmx_blendzero_pr(VLJ_S0, interact_S0);
863 VLJ_S1 = gmx_blendzero_pr(VLJ_S1, interact_S1);
865 VLJ_S2 = gmx_blendzero_pr(VLJ_S2, interact_S2);
866 VLJ_S3 = gmx_blendzero_pr(VLJ_S3, interact_S3);
869 #ifndef ENERGY_GROUPS
870 Vvdwtot_S = gmx_add_pr(Vvdwtot_S,
872 gmx_sum4_pr(VLJ_S0, VLJ_S1, VLJ_S2, VLJ_S3)
874 gmx_add_pr(VLJ_S0, VLJ_S1)
878 add_ener_grp(VLJ_S0, vvdwtp[0], egp_jj);
879 add_ener_grp(VLJ_S1, vvdwtp[1], egp_jj);
881 add_ener_grp(VLJ_S2, vvdwtp[2], egp_jj);
882 add_ener_grp(VLJ_S3, vvdwtp[3], egp_jj);
886 #endif /* CALC_ENERGIES */
889 fscal_S0 = gmx_mul_pr(rinvsq_S0,
891 gmx_add_pr(frcoul_S0,
895 gmx_sub_pr(FrLJ12_S0, FrLJ6_S0)));
896 fscal_S1 = gmx_mul_pr(rinvsq_S1,
898 gmx_add_pr(frcoul_S1,
902 gmx_sub_pr(FrLJ12_S1, FrLJ6_S1)));
904 fscal_S0 = gmx_mul_pr(rinvsq_S0, frcoul_S0);
905 fscal_S1 = gmx_mul_pr(rinvsq_S1, frcoul_S1);
907 #if defined CALC_LJ && !defined HALF_LJ
908 fscal_S2 = gmx_mul_pr(rinvsq_S2,
910 gmx_add_pr(frcoul_S2,
914 gmx_sub_pr(FrLJ12_S2, FrLJ6_S2)));
915 fscal_S3 = gmx_mul_pr(rinvsq_S3,
917 gmx_add_pr(frcoul_S3,
921 gmx_sub_pr(FrLJ12_S3, FrLJ6_S3)));
923 /* Atom 2 and 3 don't have LJ, so only add Coulomb forces */
924 fscal_S2 = gmx_mul_pr(rinvsq_S2, frcoul_S2);
925 fscal_S3 = gmx_mul_pr(rinvsq_S3, frcoul_S3);
928 /* Calculate temporary vectorial force */
929 tx_S0 = gmx_mul_pr(fscal_S0, dx_S0);
930 tx_S1 = gmx_mul_pr(fscal_S1, dx_S1);
931 tx_S2 = gmx_mul_pr(fscal_S2, dx_S2);
932 tx_S3 = gmx_mul_pr(fscal_S3, dx_S3);
933 ty_S0 = gmx_mul_pr(fscal_S0, dy_S0);
934 ty_S1 = gmx_mul_pr(fscal_S1, dy_S1);
935 ty_S2 = gmx_mul_pr(fscal_S2, dy_S2);
936 ty_S3 = gmx_mul_pr(fscal_S3, dy_S3);
937 tz_S0 = gmx_mul_pr(fscal_S0, dz_S0);
938 tz_S1 = gmx_mul_pr(fscal_S1, dz_S1);
939 tz_S2 = gmx_mul_pr(fscal_S2, dz_S2);
940 tz_S3 = gmx_mul_pr(fscal_S3, dz_S3);
942 /* Increment i atom force */
943 fix_S0 = gmx_add_pr(fix_S0, tx_S0);
944 fix_S1 = gmx_add_pr(fix_S1, tx_S1);
945 fix_S2 = gmx_add_pr(fix_S2, tx_S2);
946 fix_S3 = gmx_add_pr(fix_S3, tx_S3);
947 fiy_S0 = gmx_add_pr(fiy_S0, ty_S0);
948 fiy_S1 = gmx_add_pr(fiy_S1, ty_S1);
949 fiy_S2 = gmx_add_pr(fiy_S2, ty_S2);
950 fiy_S3 = gmx_add_pr(fiy_S3, ty_S3);
951 fiz_S0 = gmx_add_pr(fiz_S0, tz_S0);
952 fiz_S1 = gmx_add_pr(fiz_S1, tz_S1);
953 fiz_S2 = gmx_add_pr(fiz_S2, tz_S2);
954 fiz_S3 = gmx_add_pr(fiz_S3, tz_S3);
956 /* Decrement j atom force */
958 gmx_sub_pr( gmx_load_pr(f+ajx), gmx_sum4_pr(tx_S0, tx_S1, tx_S2, tx_S3) ));
960 gmx_sub_pr( gmx_load_pr(f+ajy), gmx_sum4_pr(ty_S0, ty_S1, ty_S2, ty_S3) ));
962 gmx_sub_pr( gmx_load_pr(f+ajz), gmx_sum4_pr(tz_S0, tz_S1, tz_S2, tz_S3) ));
975 #undef NBNXN_CUTOFF_USE_BLENDV