2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 /* This is the innermost loop contents for the 4 x N atom SIMD kernel.
37 * This flavor of the kernel calculates interactions of 4 i-atoms
38 * with N j-atoms stored in N wide SIMD registers.
42 /* When calculating RF or Ewald interactions we calculate the electrostatic
43 * forces on excluded atom pairs here in the non-bonded loops.
44 * But when energies and/or virial is required we calculate them
45 * separately to as then it is easier to separate the energy and virial
48 #if defined CHECK_EXCLS && defined CALC_COULOMB
52 /* Without exclusions and energies we only need to mask the cut-off,
53 * this can be faster when we have defined gmx_blendv_pr, i.e. an instruction
54 * that selects from two SIMD registers based on the contents of a third.
56 #if !(defined CHECK_EXCLS || defined CALC_ENERGIES) && defined GMX_SIMD_HAVE_BLENDV
57 /* With RF and tabulated Coulomb we replace cmp+and with sub+blendv.
58 * With gcc this is slower, except for RF on Sandy Bridge.
59 * Tested with gcc 4.6.2, 4.6.3 and 4.7.1.
61 #if (defined CALC_COUL_RF || defined CALC_COUL_TAB) && (!defined __GNUC__ || (defined CALC_COUL_RF && defined GMX_X86_AVX_256))
62 #define NBNXN_CUTOFF_USE_BLENDV
64 /* With analytical Ewald we replace cmp+and+and with sub+blendv+blendv.
65 * This is only faster with icc on Sandy Bridge (PS kernel slower than gcc 4.7).
68 #if defined CALC_COUL_EWALD && defined __INTEL_COMPILER && defined GMX_X86_AVX_256
69 #define NBNXN_CUTOFF_USE_BLENDV
74 int cj, aj, ajx, ajy, ajz;
77 /* Energy group indices for two atoms packed into one int */
78 int egp_jj[UNROLLJ/2];
82 /* Interaction (non-exclusion) mask of all 1's or 0's */
83 gmx_mm_pb interact_S0;
84 gmx_mm_pb interact_S1;
85 gmx_mm_pb interact_S2;
86 gmx_mm_pb interact_S3;
89 gmx_mm_pr jx_S, jy_S, jz_S;
90 gmx_mm_pr dx_S0, dy_S0, dz_S0;
91 gmx_mm_pr dx_S1, dy_S1, dz_S1;
92 gmx_mm_pr dx_S2, dy_S2, dz_S2;
93 gmx_mm_pr dx_S3, dy_S3, dz_S3;
94 gmx_mm_pr tx_S0, ty_S0, tz_S0;
95 gmx_mm_pr tx_S1, ty_S1, tz_S1;
96 gmx_mm_pr tx_S2, ty_S2, tz_S2;
97 gmx_mm_pr tx_S3, ty_S3, tz_S3;
98 gmx_mm_pr rsq_S0, rinv_S0, rinvsq_S0;
99 gmx_mm_pr rsq_S1, rinv_S1, rinvsq_S1;
100 gmx_mm_pr rsq_S2, rinv_S2, rinvsq_S2;
101 gmx_mm_pr rsq_S3, rinv_S3, rinvsq_S3;
102 #ifndef NBNXN_CUTOFF_USE_BLENDV
103 /* wco: within cut-off, mask of all 1's or 0's */
109 #ifdef VDW_CUTOFF_CHECK
110 gmx_mm_pb wco_vdw_S0;
111 gmx_mm_pb wco_vdw_S1;
113 gmx_mm_pb wco_vdw_S2;
114 gmx_mm_pb wco_vdw_S3;
119 /* 1/r masked with the interaction mask */
120 gmx_mm_pr rinv_ex_S0;
121 gmx_mm_pr rinv_ex_S1;
122 gmx_mm_pr rinv_ex_S2;
123 gmx_mm_pr rinv_ex_S3;
131 /* The force (PME mesh force) we need to subtract from 1/r^2 */
137 #ifdef CALC_COUL_EWALD
138 gmx_mm_pr brsq_S0, brsq_S1, brsq_S2, brsq_S3;
139 gmx_mm_pr ewcorr_S0, ewcorr_S1, ewcorr_S2, ewcorr_S3;
142 /* frcoul = (1/r - fsub)*r */
148 /* For tables: r, rs=r/sp, rf=floor(rs), frac=rs-rf */
149 gmx_mm_pr r_S0, rs_S0, rf_S0, frac_S0;
150 gmx_mm_pr r_S1, rs_S1, rf_S1, frac_S1;
151 gmx_mm_pr r_S2, rs_S2, rf_S2, frac_S2;
152 gmx_mm_pr r_S3, rs_S3, rf_S3, frac_S3;
153 /* Table index: rs truncated to an int */
154 gmx_epi32 ti_S0, ti_S1, ti_S2, ti_S3;
155 /* Linear force table values */
156 gmx_mm_pr ctab0_S0, ctab1_S0;
157 gmx_mm_pr ctab0_S1, ctab1_S1;
158 gmx_mm_pr ctab0_S2, ctab1_S2;
159 gmx_mm_pr ctab0_S3, ctab1_S3;
161 /* Quadratic energy table value */
168 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
169 /* The potential (PME mesh) we need to subtract from 1/r */
176 /* Electrostatic potential */
183 /* The force times 1/r */
191 /* LJ sigma_j/2 and sqrt(epsilon_j) */
192 gmx_mm_pr hsig_j_S, seps_j_S;
193 /* LJ sigma_ij and epsilon_ij */
194 gmx_mm_pr sig_S0, eps_S0;
195 gmx_mm_pr sig_S1, eps_S1;
197 gmx_mm_pr sig_S2, eps_S2;
198 gmx_mm_pr sig_S3, eps_S3;
201 gmx_mm_pr sig2_S0, sig6_S0;
202 gmx_mm_pr sig2_S1, sig6_S1;
204 gmx_mm_pr sig2_S2, sig6_S2;
205 gmx_mm_pr sig2_S3, sig6_S3;
207 #endif /* LJ_COMB_LB */
211 gmx_mm_pr c6s_j_S, c12s_j_S;
214 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
215 /* Index for loading LJ parameters, complicated when interleaving */
220 /* LJ C6 and C12 parameters, used with geometric comb. rule */
221 gmx_mm_pr c6_S0, c12_S0;
222 gmx_mm_pr c6_S1, c12_S1;
224 gmx_mm_pr c6_S2, c12_S2;
225 gmx_mm_pr c6_S3, c12_S3;
229 /* Intermediate variables for LJ calculation */
231 gmx_mm_pr rinvsix_S0;
232 gmx_mm_pr rinvsix_S1;
234 gmx_mm_pr rinvsix_S2;
235 gmx_mm_pr rinvsix_S3;
239 gmx_mm_pr sir_S0, sir2_S0, sir6_S0;
240 gmx_mm_pr sir_S1, sir2_S1, sir6_S1;
242 gmx_mm_pr sir_S2, sir2_S2, sir6_S2;
243 gmx_mm_pr sir_S3, sir2_S3, sir6_S3;
247 gmx_mm_pr FrLJ6_S0, FrLJ12_S0;
248 gmx_mm_pr FrLJ6_S1, FrLJ12_S1;
250 gmx_mm_pr FrLJ6_S2, FrLJ12_S2;
251 gmx_mm_pr FrLJ6_S3, FrLJ12_S3;
254 gmx_mm_pr VLJ6_S0, VLJ12_S0, VLJ_S0;
255 gmx_mm_pr VLJ6_S1, VLJ12_S1, VLJ_S1;
257 gmx_mm_pr VLJ6_S2, VLJ12_S2, VLJ_S2;
258 gmx_mm_pr VLJ6_S3, VLJ12_S3, VLJ_S3;
263 /* j-cluster index */
266 /* Atom indices (of the first atom in the cluster) */
268 #if defined CALC_LJ && (defined LJ_COMB_GEOM || defined LJ_COMB_LB)
269 #if UNROLLJ == STRIDE
272 aj2 = (cj>>1)*2*STRIDE + (cj & 1)*UNROLLJ;
275 #if UNROLLJ == STRIDE
278 ajx = (cj>>1)*DIM*STRIDE + (cj & 1)*UNROLLJ;
284 gmx_load_simd_4xn_interactions(l_cj[cjind].excl,
285 filter_S0, filter_S1,
286 filter_S2, filter_S3,
287 #ifdef GMX_CPU_ACCELERATION_IBM_QPX
288 l_cj[cjind].interaction_mask_indices,
289 nbat->simd_interaction_array,
291 /* The struct fields do not exist
292 except on BlueGene/Q */
296 &interact_S0, &interact_S1,
297 &interact_S2, &interact_S3);
298 #endif /* CHECK_EXCLS */
300 /* load j atom coordinates */
301 jx_S = gmx_load_pr(x+ajx);
302 jy_S = gmx_load_pr(x+ajy);
303 jz_S = gmx_load_pr(x+ajz);
305 /* Calculate distance */
306 dx_S0 = gmx_sub_pr(ix_S0, jx_S);
307 dy_S0 = gmx_sub_pr(iy_S0, jy_S);
308 dz_S0 = gmx_sub_pr(iz_S0, jz_S);
309 dx_S1 = gmx_sub_pr(ix_S1, jx_S);
310 dy_S1 = gmx_sub_pr(iy_S1, jy_S);
311 dz_S1 = gmx_sub_pr(iz_S1, jz_S);
312 dx_S2 = gmx_sub_pr(ix_S2, jx_S);
313 dy_S2 = gmx_sub_pr(iy_S2, jy_S);
314 dz_S2 = gmx_sub_pr(iz_S2, jz_S);
315 dx_S3 = gmx_sub_pr(ix_S3, jx_S);
316 dy_S3 = gmx_sub_pr(iy_S3, jy_S);
317 dz_S3 = gmx_sub_pr(iz_S3, jz_S);
319 /* rsq = dx*dx+dy*dy+dz*dz */
320 rsq_S0 = gmx_calc_rsq_pr(dx_S0, dy_S0, dz_S0);
321 rsq_S1 = gmx_calc_rsq_pr(dx_S1, dy_S1, dz_S1);
322 rsq_S2 = gmx_calc_rsq_pr(dx_S2, dy_S2, dz_S2);
323 rsq_S3 = gmx_calc_rsq_pr(dx_S3, dy_S3, dz_S3);
325 #ifndef NBNXN_CUTOFF_USE_BLENDV
326 wco_S0 = gmx_cmplt_pr(rsq_S0, rc2_S);
327 wco_S1 = gmx_cmplt_pr(rsq_S1, rc2_S);
328 wco_S2 = gmx_cmplt_pr(rsq_S2, rc2_S);
329 wco_S3 = gmx_cmplt_pr(rsq_S3, rc2_S);
334 /* Only remove the (sub-)diagonal to avoid double counting */
335 #if UNROLLJ == UNROLLI
338 wco_S0 = gmx_and_pb(wco_S0, diagonal_mask_S0);
339 wco_S1 = gmx_and_pb(wco_S1, diagonal_mask_S1);
340 wco_S2 = gmx_and_pb(wco_S2, diagonal_mask_S2);
341 wco_S3 = gmx_and_pb(wco_S3, diagonal_mask_S3);
344 #if UNROLLJ < UNROLLI
347 wco_S0 = gmx_and_pb(wco_S0, diagonal_mask0_S0);
348 wco_S1 = gmx_and_pb(wco_S1, diagonal_mask0_S1);
349 wco_S2 = gmx_and_pb(wco_S2, diagonal_mask0_S2);
350 wco_S3 = gmx_and_pb(wco_S3, diagonal_mask0_S3);
352 if (cj == ci_sh*2 + 1)
354 wco_S0 = gmx_and_pb(wco_S0, diagonal_mask1_S0);
355 wco_S1 = gmx_and_pb(wco_S1, diagonal_mask1_S1);
356 wco_S2 = gmx_and_pb(wco_S2, diagonal_mask1_S2);
357 wco_S3 = gmx_and_pb(wco_S3, diagonal_mask1_S3);
362 wco_S0 = gmx_and_pb(wco_S0, diagonal_mask0_S0);
363 wco_S1 = gmx_and_pb(wco_S1, diagonal_mask0_S1);
364 wco_S2 = gmx_and_pb(wco_S2, diagonal_mask0_S2);
365 wco_S3 = gmx_and_pb(wco_S3, diagonal_mask0_S3);
367 else if (cj*2 + 1 == ci_sh)
369 wco_S0 = gmx_and_pb(wco_S0, diagonal_mask1_S0);
370 wco_S1 = gmx_and_pb(wco_S1, diagonal_mask1_S1);
371 wco_S2 = gmx_and_pb(wco_S2, diagonal_mask1_S2);
372 wco_S3 = gmx_and_pb(wco_S3, diagonal_mask1_S3);
376 #else /* EXCL_FORCES */
377 /* No exclusion forces: remove all excluded atom pairs from the list */
378 wco_S0 = gmx_and_pb(wco_S0, interact_S0);
379 wco_S1 = gmx_and_pb(wco_S1, interact_S1);
380 wco_S2 = gmx_and_pb(wco_S2, interact_S2);
381 wco_S3 = gmx_and_pb(wco_S3, interact_S3);
388 real tmpa[2*GMX_SIMD_WIDTH_HERE], *tmp;
389 tmp = gmx_simd_align_real(tmpa);
390 for (i = 0; i < UNROLLI; i++)
392 gmx_store_pr(tmp, gmx_sub_pr(rc2_S, i == 0 ? rsq_S0 : (i == 1 ? rsq_S1 : (i == 2 ? rsq_S2 : rsq_S3))));
393 for (j = 0; j < UNROLLJ; j++)
405 /* For excluded pairs add a small number to avoid r^-6 = NaN */
406 rsq_S0 = gmx_masknot_add_pr(interact_S0, rsq_S0, avoid_sing_S);
407 rsq_S1 = gmx_masknot_add_pr(interact_S1, rsq_S1, avoid_sing_S);
408 rsq_S2 = gmx_masknot_add_pr(interact_S2, rsq_S2, avoid_sing_S);
409 rsq_S3 = gmx_masknot_add_pr(interact_S3, rsq_S3, avoid_sing_S);
414 rinv_S0 = gmx_invsqrt_pr(rsq_S0);
415 rinv_S1 = gmx_invsqrt_pr(rsq_S1);
416 rinv_S2 = gmx_invsqrt_pr(rsq_S2);
417 rinv_S3 = gmx_invsqrt_pr(rsq_S3);
419 gmx_mm_invsqrt2_pd(rsq_S0, rsq_S1, &rinv_S0, &rinv_S1);
420 gmx_mm_invsqrt2_pd(rsq_S2, rsq_S3, &rinv_S2, &rinv_S3);
424 /* Load parameters for j atom */
425 jq_S = gmx_load_pr(q+aj);
426 qq_S0 = gmx_mul_pr(iq_S0, jq_S);
427 qq_S1 = gmx_mul_pr(iq_S1, jq_S);
428 qq_S2 = gmx_mul_pr(iq_S2, jq_S);
429 qq_S3 = gmx_mul_pr(iq_S3, jq_S);
434 #if !defined LJ_COMB_GEOM && !defined LJ_COMB_LB && !defined FIX_LJ_C
435 load_lj_pair_params(nbfp0, type, aj, &c6_S0, &c12_S0);
436 load_lj_pair_params(nbfp1, type, aj, &c6_S1, &c12_S1);
438 load_lj_pair_params(nbfp2, type, aj, &c6_S2, &c12_S2);
439 load_lj_pair_params(nbfp3, type, aj, &c6_S3, &c12_S3);
441 #endif /* not defined any LJ rule */
444 c6s_j_S = gmx_load_pr(ljc+aj2+0);
445 c12s_j_S = gmx_load_pr(ljc+aj2+STRIDE);
446 c6_S0 = gmx_mul_pr(c6s_S0, c6s_j_S );
447 c6_S1 = gmx_mul_pr(c6s_S1, c6s_j_S );
449 c6_S2 = gmx_mul_pr(c6s_S2, c6s_j_S );
450 c6_S3 = gmx_mul_pr(c6s_S3, c6s_j_S );
452 c12_S0 = gmx_mul_pr(c12s_S0, c12s_j_S);
453 c12_S1 = gmx_mul_pr(c12s_S1, c12s_j_S);
455 c12_S2 = gmx_mul_pr(c12s_S2, c12s_j_S);
456 c12_S3 = gmx_mul_pr(c12s_S3, c12s_j_S);
458 #endif /* LJ_COMB_GEOM */
461 hsig_j_S = gmx_load_pr(ljc+aj2+0);
462 seps_j_S = gmx_load_pr(ljc+aj2+STRIDE);
464 sig_S0 = gmx_add_pr(hsig_i_S0, hsig_j_S);
465 sig_S1 = gmx_add_pr(hsig_i_S1, hsig_j_S);
466 eps_S0 = gmx_mul_pr(seps_i_S0, seps_j_S);
467 eps_S1 = gmx_mul_pr(seps_i_S1, seps_j_S);
469 sig_S2 = gmx_add_pr(hsig_i_S2, hsig_j_S);
470 sig_S3 = gmx_add_pr(hsig_i_S3, hsig_j_S);
471 eps_S2 = gmx_mul_pr(seps_i_S2, seps_j_S);
472 eps_S3 = gmx_mul_pr(seps_i_S3, seps_j_S);
474 #endif /* LJ_COMB_LB */
478 #ifndef NBNXN_CUTOFF_USE_BLENDV
479 rinv_S0 = gmx_blendzero_pr(rinv_S0, wco_S0);
480 rinv_S1 = gmx_blendzero_pr(rinv_S1, wco_S1);
481 rinv_S2 = gmx_blendzero_pr(rinv_S2, wco_S2);
482 rinv_S3 = gmx_blendzero_pr(rinv_S3, wco_S3);
484 /* We only need to mask for the cut-off: blendv is faster */
485 rinv_S0 = gmx_blendv_pr(rinv_S0, zero_S, gmx_sub_pr(rc2_S, rsq_S0));
486 rinv_S1 = gmx_blendv_pr(rinv_S1, zero_S, gmx_sub_pr(rc2_S, rsq_S1));
487 rinv_S2 = gmx_blendv_pr(rinv_S2, zero_S, gmx_sub_pr(rc2_S, rsq_S2));
488 rinv_S3 = gmx_blendv_pr(rinv_S3, zero_S, gmx_sub_pr(rc2_S, rsq_S3));
491 rinvsq_S0 = gmx_mul_pr(rinv_S0, rinv_S0);
492 rinvsq_S1 = gmx_mul_pr(rinv_S1, rinv_S1);
493 rinvsq_S2 = gmx_mul_pr(rinv_S2, rinv_S2);
494 rinvsq_S3 = gmx_mul_pr(rinv_S3, rinv_S3);
497 /* Note that here we calculate force*r, not the usual force/r.
498 * This allows avoiding masking the reaction-field contribution,
499 * as frcoul is later multiplied by rinvsq which has been
500 * masked with the cut-off check.
504 /* Only add 1/r for non-excluded atom pairs */
505 rinv_ex_S0 = gmx_blendzero_pr(rinv_S0, interact_S0);
506 rinv_ex_S1 = gmx_blendzero_pr(rinv_S1, interact_S1);
507 rinv_ex_S2 = gmx_blendzero_pr(rinv_S2, interact_S2);
508 rinv_ex_S3 = gmx_blendzero_pr(rinv_S3, interact_S3);
510 /* No exclusion forces, we always need 1/r */
511 #define rinv_ex_S0 rinv_S0
512 #define rinv_ex_S1 rinv_S1
513 #define rinv_ex_S2 rinv_S2
514 #define rinv_ex_S3 rinv_S3
518 /* Electrostatic interactions */
519 frcoul_S0 = gmx_mul_pr(qq_S0, gmx_madd_pr(rsq_S0, mrc_3_S, rinv_ex_S0));
520 frcoul_S1 = gmx_mul_pr(qq_S1, gmx_madd_pr(rsq_S1, mrc_3_S, rinv_ex_S1));
521 frcoul_S2 = gmx_mul_pr(qq_S2, gmx_madd_pr(rsq_S2, mrc_3_S, rinv_ex_S2));
522 frcoul_S3 = gmx_mul_pr(qq_S3, gmx_madd_pr(rsq_S3, mrc_3_S, rinv_ex_S3));
525 vcoul_S0 = gmx_mul_pr(qq_S0, gmx_add_pr(rinv_ex_S0, gmx_add_pr(gmx_mul_pr(rsq_S0, hrc_3_S), moh_rc_S)));
526 vcoul_S1 = gmx_mul_pr(qq_S1, gmx_add_pr(rinv_ex_S1, gmx_add_pr(gmx_mul_pr(rsq_S1, hrc_3_S), moh_rc_S)));
527 vcoul_S2 = gmx_mul_pr(qq_S2, gmx_add_pr(rinv_ex_S2, gmx_add_pr(gmx_mul_pr(rsq_S2, hrc_3_S), moh_rc_S)));
528 vcoul_S3 = gmx_mul_pr(qq_S3, gmx_add_pr(rinv_ex_S3, gmx_add_pr(gmx_mul_pr(rsq_S3, hrc_3_S), moh_rc_S)));
532 #ifdef CALC_COUL_EWALD
533 /* We need to mask (or limit) rsq for the cut-off,
534 * as large distances can cause an overflow in gmx_pmecorrF/V.
536 #ifndef NBNXN_CUTOFF_USE_BLENDV
537 brsq_S0 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S0, wco_S0));
538 brsq_S1 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S1, wco_S1));
539 brsq_S2 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S2, wco_S2));
540 brsq_S3 = gmx_mul_pr(beta2_S, gmx_blendzero_pr(rsq_S3, wco_S3));
542 /* Strangely, putting mul on a separate line is slower (icc 13) */
543 brsq_S0 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S0, zero_S, gmx_sub_pr(rc2_S, rsq_S0)));
544 brsq_S1 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S1, zero_S, gmx_sub_pr(rc2_S, rsq_S1)));
545 brsq_S2 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S2, zero_S, gmx_sub_pr(rc2_S, rsq_S2)));
546 brsq_S3 = gmx_mul_pr(beta2_S, gmx_blendv_pr(rsq_S3, zero_S, gmx_sub_pr(rc2_S, rsq_S3)));
548 ewcorr_S0 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S0), beta_S);
549 ewcorr_S1 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S1), beta_S);
550 ewcorr_S2 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S2), beta_S);
551 ewcorr_S3 = gmx_mul_pr(gmx_pmecorrF_pr(brsq_S3), beta_S);
552 frcoul_S0 = gmx_mul_pr(qq_S0, gmx_madd_pr(ewcorr_S0, brsq_S0, rinv_ex_S0));
553 frcoul_S1 = gmx_mul_pr(qq_S1, gmx_madd_pr(ewcorr_S1, brsq_S1, rinv_ex_S1));
554 frcoul_S2 = gmx_mul_pr(qq_S2, gmx_madd_pr(ewcorr_S2, brsq_S2, rinv_ex_S2));
555 frcoul_S3 = gmx_mul_pr(qq_S3, gmx_madd_pr(ewcorr_S3, brsq_S3, rinv_ex_S3));
558 vc_sub_S0 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S0), beta_S);
559 vc_sub_S1 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S1), beta_S);
560 vc_sub_S2 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S2), beta_S);
561 vc_sub_S3 = gmx_mul_pr(gmx_pmecorrV_pr(brsq_S3), beta_S);
564 #endif /* CALC_COUL_EWALD */
567 /* Electrostatic interactions */
568 r_S0 = gmx_mul_pr(rsq_S0, rinv_S0);
569 r_S1 = gmx_mul_pr(rsq_S1, rinv_S1);
570 r_S2 = gmx_mul_pr(rsq_S2, rinv_S2);
571 r_S3 = gmx_mul_pr(rsq_S3, rinv_S3);
572 /* Convert r to scaled table units */
573 rs_S0 = gmx_mul_pr(r_S0, invtsp_S);
574 rs_S1 = gmx_mul_pr(r_S1, invtsp_S);
575 rs_S2 = gmx_mul_pr(r_S2, invtsp_S);
576 rs_S3 = gmx_mul_pr(r_S3, invtsp_S);
577 /* Truncate scaled r to an int */
578 ti_S0 = gmx_cvttpr_epi32(rs_S0);
579 ti_S1 = gmx_cvttpr_epi32(rs_S1);
580 ti_S2 = gmx_cvttpr_epi32(rs_S2);
581 ti_S3 = gmx_cvttpr_epi32(rs_S3);
582 #ifdef GMX_SIMD_HAVE_FLOOR
583 /* SSE4.1 floor is faster than gmx_cvtepi32_ps int->float cast */
584 rf_S0 = gmx_floor_pr(rs_S0);
585 rf_S1 = gmx_floor_pr(rs_S1);
586 rf_S2 = gmx_floor_pr(rs_S2);
587 rf_S3 = gmx_floor_pr(rs_S3);
589 rf_S0 = gmx_cvtepi32_pr(ti_S0);
590 rf_S1 = gmx_cvtepi32_pr(ti_S1);
591 rf_S2 = gmx_cvtepi32_pr(ti_S2);
592 rf_S3 = gmx_cvtepi32_pr(ti_S3);
594 frac_S0 = gmx_sub_pr(rs_S0, rf_S0);
595 frac_S1 = gmx_sub_pr(rs_S1, rf_S1);
596 frac_S2 = gmx_sub_pr(rs_S2, rf_S2);
597 frac_S3 = gmx_sub_pr(rs_S3, rf_S3);
599 /* Load and interpolate table forces and possibly energies.
600 * Force and energy can be combined in one table, stride 4: FDV0
601 * or in two separate tables with stride 1: F and V
602 * Currently single precision uses FDV0, double F and V.
604 #ifndef CALC_ENERGIES
605 load_table_f(tab_coul_F, ti_S0, ti0, &ctab0_S0, &ctab1_S0);
606 load_table_f(tab_coul_F, ti_S1, ti1, &ctab0_S1, &ctab1_S1);
607 load_table_f(tab_coul_F, ti_S2, ti2, &ctab0_S2, &ctab1_S2);
608 load_table_f(tab_coul_F, ti_S3, ti3, &ctab0_S3, &ctab1_S3);
611 load_table_f_v(tab_coul_F, ti_S0, ti0, &ctab0_S0, &ctab1_S0, &ctabv_S0);
612 load_table_f_v(tab_coul_F, ti_S1, ti1, &ctab0_S1, &ctab1_S1, &ctabv_S1);
613 load_table_f_v(tab_coul_F, ti_S2, ti2, &ctab0_S2, &ctab1_S2, &ctabv_S2);
614 load_table_f_v(tab_coul_F, ti_S3, ti3, &ctab0_S3, &ctab1_S3, &ctabv_S3);
616 load_table_f_v(tab_coul_F, tab_coul_V, ti_S0, ti0, &ctab0_S0, &ctab1_S0, &ctabv_S0);
617 load_table_f_v(tab_coul_F, tab_coul_V, ti_S1, ti1, &ctab0_S1, &ctab1_S1, &ctabv_S1);
618 load_table_f_v(tab_coul_F, tab_coul_V, ti_S2, ti2, &ctab0_S2, &ctab1_S2, &ctabv_S2);
619 load_table_f_v(tab_coul_F, tab_coul_V, ti_S3, ti3, &ctab0_S3, &ctab1_S3, &ctabv_S3);
622 fsub_S0 = gmx_add_pr(ctab0_S0, gmx_mul_pr(frac_S0, ctab1_S0));
623 fsub_S1 = gmx_add_pr(ctab0_S1, gmx_mul_pr(frac_S1, ctab1_S1));
624 fsub_S2 = gmx_add_pr(ctab0_S2, gmx_mul_pr(frac_S2, ctab1_S2));
625 fsub_S3 = gmx_add_pr(ctab0_S3, gmx_mul_pr(frac_S3, ctab1_S3));
626 frcoul_S0 = gmx_mul_pr(qq_S0, gmx_sub_pr(rinv_ex_S0, gmx_mul_pr(fsub_S0, r_S0)));
627 frcoul_S1 = gmx_mul_pr(qq_S1, gmx_sub_pr(rinv_ex_S1, gmx_mul_pr(fsub_S1, r_S1)));
628 frcoul_S2 = gmx_mul_pr(qq_S2, gmx_sub_pr(rinv_ex_S2, gmx_mul_pr(fsub_S2, r_S2)));
629 frcoul_S3 = gmx_mul_pr(qq_S3, gmx_sub_pr(rinv_ex_S3, gmx_mul_pr(fsub_S3, r_S3)));
632 vc_sub_S0 = gmx_add_pr(ctabv_S0, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S0), gmx_add_pr(ctab0_S0, fsub_S0)));
633 vc_sub_S1 = gmx_add_pr(ctabv_S1, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S1), gmx_add_pr(ctab0_S1, fsub_S1)));
634 vc_sub_S2 = gmx_add_pr(ctabv_S2, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S2), gmx_add_pr(ctab0_S2, fsub_S2)));
635 vc_sub_S3 = gmx_add_pr(ctabv_S3, gmx_mul_pr(gmx_mul_pr(mhalfsp_S, frac_S3), gmx_add_pr(ctab0_S3, fsub_S3)));
637 #endif /* CALC_COUL_TAB */
639 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
640 #ifndef NO_SHIFT_EWALD
641 /* Add Ewald potential shift to vc_sub for convenience */
643 vc_sub_S0 = gmx_add_pr(vc_sub_S0, gmx_blendzero_pr(sh_ewald_S, interact_S0));
644 vc_sub_S1 = gmx_add_pr(vc_sub_S1, gmx_blendzero_pr(sh_ewald_S, interact_S1));
645 vc_sub_S2 = gmx_add_pr(vc_sub_S2, gmx_blendzero_pr(sh_ewald_S, interact_S2));
646 vc_sub_S3 = gmx_add_pr(vc_sub_S3, gmx_blendzero_pr(sh_ewald_S, interact_S3));
648 vc_sub_S0 = gmx_add_pr(vc_sub_S0, sh_ewald_S);
649 vc_sub_S1 = gmx_add_pr(vc_sub_S1, sh_ewald_S);
650 vc_sub_S2 = gmx_add_pr(vc_sub_S2, sh_ewald_S);
651 vc_sub_S3 = gmx_add_pr(vc_sub_S3, sh_ewald_S);
655 vcoul_S0 = gmx_mul_pr(qq_S0, gmx_sub_pr(rinv_ex_S0, vc_sub_S0));
656 vcoul_S1 = gmx_mul_pr(qq_S1, gmx_sub_pr(rinv_ex_S1, vc_sub_S1));
657 vcoul_S2 = gmx_mul_pr(qq_S2, gmx_sub_pr(rinv_ex_S2, vc_sub_S2));
658 vcoul_S3 = gmx_mul_pr(qq_S3, gmx_sub_pr(rinv_ex_S3, vc_sub_S3));
663 /* Mask energy for cut-off and diagonal */
664 vcoul_S0 = gmx_blendzero_pr(vcoul_S0, wco_S0);
665 vcoul_S1 = gmx_blendzero_pr(vcoul_S1, wco_S1);
666 vcoul_S2 = gmx_blendzero_pr(vcoul_S2, wco_S2);
667 vcoul_S3 = gmx_blendzero_pr(vcoul_S3, wco_S3);
670 #endif /* CALC_COULOMB */
673 /* Lennard-Jones interaction */
675 #ifdef VDW_CUTOFF_CHECK
676 wco_vdw_S0 = gmx_cmplt_pr(rsq_S0, rcvdw2_S);
677 wco_vdw_S1 = gmx_cmplt_pr(rsq_S1, rcvdw2_S);
679 wco_vdw_S2 = gmx_cmplt_pr(rsq_S2, rcvdw2_S);
680 wco_vdw_S3 = gmx_cmplt_pr(rsq_S3, rcvdw2_S);
683 /* Same cut-off for Coulomb and VdW, reuse the registers */
684 #define wco_vdw_S0 wco_S0
685 #define wco_vdw_S1 wco_S1
686 #define wco_vdw_S2 wco_S2
687 #define wco_vdw_S3 wco_S3
691 rinvsix_S0 = gmx_mul_pr(rinvsq_S0, gmx_mul_pr(rinvsq_S0, rinvsq_S0));
692 rinvsix_S1 = gmx_mul_pr(rinvsq_S1, gmx_mul_pr(rinvsq_S1, rinvsq_S1));
694 rinvsix_S0 = gmx_blendzero_pr(rinvsix_S0, interact_S0);
695 rinvsix_S1 = gmx_blendzero_pr(rinvsix_S1, interact_S1);
698 rinvsix_S2 = gmx_mul_pr(rinvsq_S2, gmx_mul_pr(rinvsq_S2, rinvsq_S2));
699 rinvsix_S3 = gmx_mul_pr(rinvsq_S3, gmx_mul_pr(rinvsq_S3, rinvsq_S3));
701 rinvsix_S2 = gmx_blendzero_pr(rinvsix_S2, interact_S2);
702 rinvsix_S3 = gmx_blendzero_pr(rinvsix_S3, interact_S3);
705 #ifdef VDW_CUTOFF_CHECK
706 rinvsix_S0 = gmx_blendzero_pr(rinvsix_S0, wco_vdw_S0);
707 rinvsix_S1 = gmx_blendzero_pr(rinvsix_S1, wco_vdw_S1);
709 rinvsix_S2 = gmx_blendzero_pr(rinvsix_S2, wco_vdw_S2);
710 rinvsix_S3 = gmx_blendzero_pr(rinvsix_S3, wco_vdw_S3);
713 FrLJ6_S0 = gmx_mul_pr(c6_S0, rinvsix_S0);
714 FrLJ6_S1 = gmx_mul_pr(c6_S1, rinvsix_S1);
716 FrLJ6_S2 = gmx_mul_pr(c6_S2, rinvsix_S2);
717 FrLJ6_S3 = gmx_mul_pr(c6_S3, rinvsix_S3);
719 FrLJ12_S0 = gmx_mul_pr(c12_S0, gmx_mul_pr(rinvsix_S0, rinvsix_S0));
720 FrLJ12_S1 = gmx_mul_pr(c12_S1, gmx_mul_pr(rinvsix_S1, rinvsix_S1));
722 FrLJ12_S2 = gmx_mul_pr(c12_S2, gmx_mul_pr(rinvsix_S2, rinvsix_S2));
723 FrLJ12_S3 = gmx_mul_pr(c12_S3, gmx_mul_pr(rinvsix_S3, rinvsix_S3));
725 #endif /* not LJ_COMB_LB */
728 sir_S0 = gmx_mul_pr(sig_S0, rinv_S0);
729 sir_S1 = gmx_mul_pr(sig_S1, rinv_S1);
731 sir_S2 = gmx_mul_pr(sig_S2, rinv_S2);
732 sir_S3 = gmx_mul_pr(sig_S3, rinv_S3);
734 sir2_S0 = gmx_mul_pr(sir_S0, sir_S0);
735 sir2_S1 = gmx_mul_pr(sir_S1, sir_S1);
737 sir2_S2 = gmx_mul_pr(sir_S2, sir_S2);
738 sir2_S3 = gmx_mul_pr(sir_S3, sir_S3);
740 sir6_S0 = gmx_mul_pr(sir2_S0, gmx_mul_pr(sir2_S0, sir2_S0));
741 sir6_S1 = gmx_mul_pr(sir2_S1, gmx_mul_pr(sir2_S1, sir2_S1));
743 sir6_S0 = gmx_blendzero_pr(sir6_S0, interact_S0);
744 sir6_S1 = gmx_blendzero_pr(sir6_S1, interact_S1);
747 sir6_S2 = gmx_mul_pr(sir2_S2, gmx_mul_pr(sir2_S2, sir2_S2));
748 sir6_S3 = gmx_mul_pr(sir2_S3, gmx_mul_pr(sir2_S3, sir2_S3));
750 sir6_S2 = gmx_blendzero_pr(sir6_S2, interact_S2);
751 sir6_S3 = gmx_blendzero_pr(sir6_S3, interact_S3);
754 #ifdef VDW_CUTOFF_CHECK
755 sir6_S0 = gmx_blendzero_pr(sir6_S0, wco_vdw_S0);
756 sir6_S1 = gmx_blendzero_pr(sir6_S1, wco_vdw_S1);
758 sir6_S2 = gmx_blendzero_pr(sir6_S2, wco_vdw_S2);
759 sir6_S3 = gmx_blendzero_pr(sir6_S3, wco_vdw_S3);
762 FrLJ6_S0 = gmx_mul_pr(eps_S0, sir6_S0);
763 FrLJ6_S1 = gmx_mul_pr(eps_S1, sir6_S1);
765 FrLJ6_S2 = gmx_mul_pr(eps_S2, sir6_S2);
766 FrLJ6_S3 = gmx_mul_pr(eps_S3, sir6_S3);
768 FrLJ12_S0 = gmx_mul_pr(FrLJ6_S0, sir6_S0);
769 FrLJ12_S1 = gmx_mul_pr(FrLJ6_S1, sir6_S1);
771 FrLJ12_S2 = gmx_mul_pr(FrLJ6_S2, sir6_S2);
772 FrLJ12_S3 = gmx_mul_pr(FrLJ6_S3, sir6_S3);
774 #if defined CALC_ENERGIES
775 /* We need C6 and C12 to calculate the LJ potential shift */
776 sig2_S0 = gmx_mul_pr(sig_S0, sig_S0);
777 sig2_S1 = gmx_mul_pr(sig_S1, sig_S1);
779 sig2_S2 = gmx_mul_pr(sig_S2, sig_S2);
780 sig2_S3 = gmx_mul_pr(sig_S3, sig_S3);
782 sig6_S0 = gmx_mul_pr(sig2_S0, gmx_mul_pr(sig2_S0, sig2_S0));
783 sig6_S1 = gmx_mul_pr(sig2_S1, gmx_mul_pr(sig2_S1, sig2_S1));
785 sig6_S2 = gmx_mul_pr(sig2_S2, gmx_mul_pr(sig2_S2, sig2_S2));
786 sig6_S3 = gmx_mul_pr(sig2_S3, gmx_mul_pr(sig2_S3, sig2_S3));
788 c6_S0 = gmx_mul_pr(eps_S0, sig6_S0);
789 c6_S1 = gmx_mul_pr(eps_S1, sig6_S1);
791 c6_S2 = gmx_mul_pr(eps_S2, sig6_S2);
792 c6_S3 = gmx_mul_pr(eps_S3, sig6_S3);
794 c12_S0 = gmx_mul_pr(c6_S0, sig6_S0);
795 c12_S1 = gmx_mul_pr(c6_S1, sig6_S1);
797 c12_S2 = gmx_mul_pr(c6_S2, sig6_S2);
798 c12_S3 = gmx_mul_pr(c6_S3, sig6_S3);
801 #endif /* LJ_COMB_LB */
807 /* Extract the group pair index per j pair.
808 * Energy groups are stored per i-cluster, so things get
809 * complicated when the i- and j-cluster size don't match.
814 egps_j = nbat->energrp[cj>>1];
815 egp_jj[0] = ((egps_j >> ((cj & 1)*egps_jshift)) & egps_jmask)*egps_jstride;
817 /* We assume UNROLLI <= UNROLLJ */
819 for (jdi = 0; jdi < UNROLLJ/UNROLLI; jdi++)
822 egps_j = nbat->energrp[cj*(UNROLLJ/UNROLLI)+jdi];
823 for (jj = 0; jj < (UNROLLI/2); jj++)
825 egp_jj[jdi*(UNROLLI/2)+jj] = ((egps_j >> (jj*egps_jshift)) & egps_jmask)*egps_jstride;
833 #ifndef ENERGY_GROUPS
834 vctot_S = gmx_add_pr(vctot_S, gmx_sum4_pr(vcoul_S0, vcoul_S1, vcoul_S2, vcoul_S3));
836 add_ener_grp(vcoul_S0, vctp[0], egp_jj);
837 add_ener_grp(vcoul_S1, vctp[1], egp_jj);
838 add_ener_grp(vcoul_S2, vctp[2], egp_jj);
839 add_ener_grp(vcoul_S3, vctp[3], egp_jj);
844 /* Calculate the LJ energies */
845 VLJ6_S0 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S0, gmx_mul_pr(c6_S0, sh_invrc6_S)));
846 VLJ6_S1 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S1, gmx_mul_pr(c6_S1, sh_invrc6_S)));
848 VLJ6_S2 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S2, gmx_mul_pr(c6_S2, sh_invrc6_S)));
849 VLJ6_S3 = gmx_mul_pr(sixth_S, gmx_sub_pr(FrLJ6_S3, gmx_mul_pr(c6_S3, sh_invrc6_S)));
851 VLJ12_S0 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S0, gmx_mul_pr(c12_S0, sh_invrc12_S)));
852 VLJ12_S1 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S1, gmx_mul_pr(c12_S1, sh_invrc12_S)));
854 VLJ12_S2 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S2, gmx_mul_pr(c12_S2, sh_invrc12_S)));
855 VLJ12_S3 = gmx_mul_pr(twelveth_S, gmx_sub_pr(FrLJ12_S3, gmx_mul_pr(c12_S3, sh_invrc12_S)));
858 VLJ_S0 = gmx_sub_pr(VLJ12_S0, VLJ6_S0);
859 VLJ_S1 = gmx_sub_pr(VLJ12_S1, VLJ6_S1);
861 VLJ_S2 = gmx_sub_pr(VLJ12_S2, VLJ6_S2);
862 VLJ_S3 = gmx_sub_pr(VLJ12_S3, VLJ6_S3);
864 /* The potential shift should be removed for pairs beyond cut-off */
865 VLJ_S0 = gmx_blendzero_pr(VLJ_S0, wco_vdw_S0);
866 VLJ_S1 = gmx_blendzero_pr(VLJ_S1, wco_vdw_S1);
868 VLJ_S2 = gmx_blendzero_pr(VLJ_S2, wco_vdw_S2);
869 VLJ_S3 = gmx_blendzero_pr(VLJ_S3, wco_vdw_S3);
872 /* The potential shift should be removed for excluded pairs */
873 VLJ_S0 = gmx_blendzero_pr(VLJ_S0, interact_S0);
874 VLJ_S1 = gmx_blendzero_pr(VLJ_S1, interact_S1);
876 VLJ_S2 = gmx_blendzero_pr(VLJ_S2, interact_S2);
877 VLJ_S3 = gmx_blendzero_pr(VLJ_S3, interact_S3);
880 #ifndef ENERGY_GROUPS
882 Vvdwtot_S = gmx_add_pr(Vvdwtot_S,
883 gmx_sum4_pr(VLJ_S0, VLJ_S1, VLJ_S2, VLJ_S3)
886 Vvdwtot_S = gmx_add_pr(Vvdwtot_S,
887 gmx_add_pr(VLJ_S0, VLJ_S1)
891 add_ener_grp(VLJ_S0, vvdwtp[0], egp_jj);
892 add_ener_grp(VLJ_S1, vvdwtp[1], egp_jj);
894 add_ener_grp(VLJ_S2, vvdwtp[2], egp_jj);
895 add_ener_grp(VLJ_S3, vvdwtp[3], egp_jj);
899 #endif /* CALC_ENERGIES */
903 fscal_S0 = gmx_mul_pr(rinvsq_S0,
904 gmx_add_pr(frcoul_S0,
905 gmx_sub_pr(FrLJ12_S0, FrLJ6_S0)));
907 fscal_S0 = gmx_mul_pr(rinvsq_S0,
909 gmx_sub_pr(FrLJ12_S0, FrLJ6_S0)));
912 fscal_S1 = gmx_mul_pr(rinvsq_S1,
913 gmx_add_pr(frcoul_S1,
914 gmx_sub_pr(FrLJ12_S1, FrLJ6_S1)));
916 fscal_S1 = gmx_mul_pr(rinvsq_S1,
918 gmx_sub_pr(FrLJ12_S1, FrLJ6_S1)));
921 fscal_S0 = gmx_mul_pr(rinvsq_S0, frcoul_S0);
922 fscal_S1 = gmx_mul_pr(rinvsq_S1, frcoul_S1);
924 #if defined CALC_LJ && !defined HALF_LJ
926 fscal_S2 = gmx_mul_pr(rinvsq_S2,
927 gmx_add_pr(frcoul_S2,
928 gmx_sub_pr(FrLJ12_S2, FrLJ6_S2)));
930 fscal_S2 = gmx_mul_pr(rinvsq_S2,
932 gmx_sub_pr(FrLJ12_S2, FrLJ6_S2)));
935 fscal_S3 = gmx_mul_pr(rinvsq_S3,
936 gmx_add_pr(frcoul_S3,
937 gmx_sub_pr(FrLJ12_S3, FrLJ6_S3)));
939 fscal_S3 = gmx_mul_pr(rinvsq_S3,
941 gmx_sub_pr(FrLJ12_S3, FrLJ6_S3)));
944 /* Atom 2 and 3 don't have LJ, so only add Coulomb forces */
945 fscal_S2 = gmx_mul_pr(rinvsq_S2, frcoul_S2);
946 fscal_S3 = gmx_mul_pr(rinvsq_S3, frcoul_S3);
949 /* Calculate temporary vectorial force */
950 tx_S0 = gmx_mul_pr(fscal_S0, dx_S0);
951 tx_S1 = gmx_mul_pr(fscal_S1, dx_S1);
952 tx_S2 = gmx_mul_pr(fscal_S2, dx_S2);
953 tx_S3 = gmx_mul_pr(fscal_S3, dx_S3);
954 ty_S0 = gmx_mul_pr(fscal_S0, dy_S0);
955 ty_S1 = gmx_mul_pr(fscal_S1, dy_S1);
956 ty_S2 = gmx_mul_pr(fscal_S2, dy_S2);
957 ty_S3 = gmx_mul_pr(fscal_S3, dy_S3);
958 tz_S0 = gmx_mul_pr(fscal_S0, dz_S0);
959 tz_S1 = gmx_mul_pr(fscal_S1, dz_S1);
960 tz_S2 = gmx_mul_pr(fscal_S2, dz_S2);
961 tz_S3 = gmx_mul_pr(fscal_S3, dz_S3);
963 /* Increment i atom force */
964 fix_S0 = gmx_add_pr(fix_S0, tx_S0);
965 fix_S1 = gmx_add_pr(fix_S1, tx_S1);
966 fix_S2 = gmx_add_pr(fix_S2, tx_S2);
967 fix_S3 = gmx_add_pr(fix_S3, tx_S3);
968 fiy_S0 = gmx_add_pr(fiy_S0, ty_S0);
969 fiy_S1 = gmx_add_pr(fiy_S1, ty_S1);
970 fiy_S2 = gmx_add_pr(fiy_S2, ty_S2);
971 fiy_S3 = gmx_add_pr(fiy_S3, ty_S3);
972 fiz_S0 = gmx_add_pr(fiz_S0, tz_S0);
973 fiz_S1 = gmx_add_pr(fiz_S1, tz_S1);
974 fiz_S2 = gmx_add_pr(fiz_S2, tz_S2);
975 fiz_S3 = gmx_add_pr(fiz_S3, tz_S3);
977 /* Decrement j atom force */
979 gmx_sub_pr( gmx_load_pr(f+ajx), gmx_sum4_pr(tx_S0, tx_S1, tx_S2, tx_S3) ));
981 gmx_sub_pr( gmx_load_pr(f+ajy), gmx_sum4_pr(ty_S0, ty_S1, ty_S2, ty_S3) ));
983 gmx_sub_pr( gmx_load_pr(f+ajz), gmx_sum4_pr(tz_S0, tz_S1, tz_S2, tz_S3) ));
996 #undef NBNXN_CUTOFF_USE_BLENDV