2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 const nbnxn_ci_t *nbln;
38 const nbnxn_cj_t *l_cj;
43 const real *nbfp0, *nbfp1, *nbfp2 = NULL, *nbfp3 = NULL;
48 gmx_bool do_LJ, half_LJ, do_coul;
49 int sci, scix, sciy, sciz, sci2;
50 int cjind0, cjind1, cjind;
55 int egps_ishift, egps_imask;
56 int egps_jshift, egps_jmask, egps_jstride;
58 real *vvdwtp[UNROLLI];
65 gmx_mm_pr ix_S0, iy_S0, iz_S0;
66 gmx_mm_pr ix_S1, iy_S1, iz_S1;
67 gmx_mm_pr ix_S2, iy_S2, iz_S2;
68 gmx_mm_pr ix_S3, iy_S3, iz_S3;
69 gmx_mm_pr fix_S0, fiy_S0, fiz_S0;
70 gmx_mm_pr fix_S1, fiy_S1, fiz_S1;
71 gmx_mm_pr fix_S2, fiy_S2, fiz_S2;
72 gmx_mm_pr fix_S3, fiy_S3, fiz_S3;
74 /* We use an i-force SIMD register width of 4 */
75 gmx_mm_pr4 fix_S, fiy_S, fiz_S;
77 /* We use an i-force SIMD register width of 2 */
78 gmx_mm_pr fix0_S, fiy0_S, fiz0_S;
79 gmx_mm_pr fix2_S, fiy2_S, fiz2_S;
82 gmx_mm_pr diagonal_jmi_S;
83 #if UNROLLI == UNROLLJ
84 gmx_mm_pb diagonal_mask_S0, diagonal_mask_S1, diagonal_mask_S2, diagonal_mask_S3;
86 gmx_mm_pb diagonal_mask0_S0, diagonal_mask0_S1, diagonal_mask0_S2, diagonal_mask0_S3;
87 gmx_mm_pb diagonal_mask1_S0, diagonal_mask1_S1, diagonal_mask1_S2, diagonal_mask1_S3;
90 unsigned *exclusion_filter;
91 gmx_exclfilter filter_S0, filter_S1, filter_S2, filter_S3;
93 gmx_mm_pr zero_S = gmx_set1_pr(0.0);
95 gmx_mm_pr one_S = gmx_set1_pr(1.0);
96 gmx_mm_pr iq_S0 = gmx_setzero_pr();
97 gmx_mm_pr iq_S1 = gmx_setzero_pr();
98 gmx_mm_pr iq_S2 = gmx_setzero_pr();
99 gmx_mm_pr iq_S3 = gmx_setzero_pr();
102 gmx_mm_pr hrc_3_S, moh_rc_S;
106 /* Coulomb table variables */
108 const real *tab_coul_F;
110 const real *tab_coul_V;
112 /* Thread-local working buffers for force and potential lookups */
113 int ti0_array[2*GMX_SIMD_WIDTH_HERE], *ti0 = NULL;
114 int ti1_array[2*GMX_SIMD_WIDTH_HERE], *ti1 = NULL;
115 int ti2_array[2*GMX_SIMD_WIDTH_HERE], *ti2 = NULL;
116 int ti3_array[2*GMX_SIMD_WIDTH_HERE], *ti3 = NULL;
122 #ifdef CALC_COUL_EWALD
123 gmx_mm_pr beta2_S, beta_S;
126 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
127 gmx_mm_pr sh_ewald_S;
133 gmx_mm_pr hsig_i_S0, seps_i_S0;
134 gmx_mm_pr hsig_i_S1, seps_i_S1;
135 gmx_mm_pr hsig_i_S2, seps_i_S2;
136 gmx_mm_pr hsig_i_S3, seps_i_S3;
139 real pvdw_array[2*UNROLLI*UNROLLJ+3];
140 real *pvdw_c6, *pvdw_c12;
141 gmx_mm_pr c6_S0, c12_S0;
142 gmx_mm_pr c6_S1, c12_S1;
143 gmx_mm_pr c6_S2, c12_S2;
144 gmx_mm_pr c6_S3, c12_S3;
150 gmx_mm_pr c6s_S0, c12s_S0;
151 gmx_mm_pr c6s_S1, c12s_S1;
152 gmx_mm_pr c6s_S2 = gmx_setzero_pr(), c12s_S2 = gmx_setzero_pr();
153 gmx_mm_pr c6s_S3 = gmx_setzero_pr(), c12s_S3 = gmx_setzero_pr();
155 #endif /* LJ_COMB_LB */
157 gmx_mm_pr vctot_S, Vvdwtot_S;
158 gmx_mm_pr sixth_S, twelveth_S;
160 gmx_mm_pr avoid_sing_S;
162 #ifdef VDW_CUTOFF_CHECK
167 gmx_mm_pr sh_invrc6_S, sh_invrc12_S;
169 /* cppcheck-suppress unassignedVariable */
170 real tmpsum_array[15], *tmpsum;
172 #ifdef CALC_SHIFTFORCES
173 /* cppcheck-suppress unassignedVariable */
174 real shf_array[15], *shf;
183 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
186 /* No combination rule used */
187 nbfp_ptr = (4 == nbfp_stride) ? nbat->nbfp_s4 : nbat->nbfp;
190 /* Load j-i for the first i */
191 diagonal_jmi_S = gmx_load_pr(nbat->simd_4xn_diagonal_j_minus_i);
192 /* Generate all the diagonal masks as comparison results */
193 #if UNROLLI == UNROLLJ
194 diagonal_mask_S0 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
195 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
196 diagonal_mask_S1 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
197 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
198 diagonal_mask_S2 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
199 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
200 diagonal_mask_S3 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
202 #if UNROLLI == 2*UNROLLJ || 2*UNROLLI == UNROLLJ
203 diagonal_mask0_S0 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
204 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
205 diagonal_mask0_S1 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
206 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
207 diagonal_mask0_S2 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
208 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
209 diagonal_mask0_S3 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
210 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
212 #if UNROLLI == 2*UNROLLJ
213 /* Load j-i for the second half of the j-cluster */
214 diagonal_jmi_S = gmx_load_pr(nbat->simd_4xn_diagonal_j_minus_i + UNROLLJ);
217 diagonal_mask1_S0 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
218 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
219 diagonal_mask1_S1 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
220 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
221 diagonal_mask1_S2 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
222 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
223 diagonal_mask1_S3 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
227 /* Load masks for topology exclusion masking. filter_stride is
228 static const, so the conditional will be optimized away. */
229 if (1 == filter_stride)
231 exclusion_filter = nbat->simd_exclusion_filter1;
233 else /* (2 == filter_stride) */
235 exclusion_filter = nbat->simd_exclusion_filter2;
238 /* Here we cast the exclusion filters from unsigned * to int * or real *.
239 * Since we only check bits, the actual value they represent does not
240 * matter, as long as both filter and mask data are treated the same way.
242 filter_S0 = gmx_load_exclusion_filter(exclusion_filter + 0*UNROLLJ*filter_stride);
243 filter_S1 = gmx_load_exclusion_filter(exclusion_filter + 1*UNROLLJ*filter_stride);
244 filter_S2 = gmx_load_exclusion_filter(exclusion_filter + 2*UNROLLJ*filter_stride);
245 filter_S3 = gmx_load_exclusion_filter(exclusion_filter + 3*UNROLLJ*filter_stride);
248 /* Generate aligned table index pointers */
249 ti0 = prepare_table_load_buffer(ti0_array);
250 ti1 = prepare_table_load_buffer(ti1_array);
251 ti2 = prepare_table_load_buffer(ti2_array);
252 ti3 = prepare_table_load_buffer(ti3_array);
254 invtsp_S = gmx_set1_pr(ic->tabq_scale);
256 mhalfsp_S = gmx_set1_pr(-0.5/ic->tabq_scale);
260 tab_coul_F = ic->tabq_coul_FDV0;
262 tab_coul_F = ic->tabq_coul_F;
263 tab_coul_V = ic->tabq_coul_V;
265 #endif /* CALC_COUL_TAB */
267 #ifdef CALC_COUL_EWALD
268 beta2_S = gmx_set1_pr(ic->ewaldcoeff*ic->ewaldcoeff);
269 beta_S = gmx_set1_pr(ic->ewaldcoeff);
272 #if (defined CALC_COUL_TAB || defined CALC_COUL_EWALD) && defined CALC_ENERGIES
273 sh_ewald_S = gmx_set1_pr(ic->sh_ewald);
279 shiftvec = shift_vec[0];
282 avoid_sing_S = gmx_set1_pr(NBNXN_AVOID_SING_R2_INC);
284 /* The kernel either supports rcoulomb = rvdw or rcoulomb >= rvdw */
285 rc2_S = gmx_set1_pr(ic->rcoulomb*ic->rcoulomb);
286 #ifdef VDW_CUTOFF_CHECK
287 rcvdw2_S = gmx_set1_pr(ic->rvdw*ic->rvdw);
291 sixth_S = gmx_set1_pr(1.0/6.0);
292 twelveth_S = gmx_set1_pr(1.0/12.0);
294 sh_invrc6_S = gmx_set1_pr(ic->sh_invrc6);
295 sh_invrc12_S = gmx_set1_pr(ic->sh_invrc6*ic->sh_invrc6);
298 mrc_3_S = gmx_set1_pr(-2*ic->k_rf);
301 hrc_3_S = gmx_set1_pr(ic->k_rf);
303 moh_rc_S = gmx_set1_pr(-ic->c_rf);
307 tmpsum = gmx_simd_align_real(tmpsum_array);
309 #ifdef CALC_SHIFTFORCES
310 shf = gmx_simd_align_real(shf_array);
314 pvdw_c6 = gmx_simd_align_real(pvdw_array+3);
315 pvdw_c12 = pvdw_c6 + UNROLLI*UNROLLJ;
317 for (jp = 0; jp < UNROLLJ; jp++)
319 pvdw_c6 [0*UNROLLJ+jp] = nbat->nbfp[0*2];
320 pvdw_c6 [1*UNROLLJ+jp] = nbat->nbfp[0*2];
321 pvdw_c6 [2*UNROLLJ+jp] = nbat->nbfp[0*2];
322 pvdw_c6 [3*UNROLLJ+jp] = nbat->nbfp[0*2];
324 pvdw_c12[0*UNROLLJ+jp] = nbat->nbfp[0*2+1];
325 pvdw_c12[1*UNROLLJ+jp] = nbat->nbfp[0*2+1];
326 pvdw_c12[2*UNROLLJ+jp] = nbat->nbfp[0*2+1];
327 pvdw_c12[3*UNROLLJ+jp] = nbat->nbfp[0*2+1];
329 c6_S0 = gmx_load_pr(pvdw_c6 +0*UNROLLJ);
330 c6_S1 = gmx_load_pr(pvdw_c6 +1*UNROLLJ);
331 c6_S2 = gmx_load_pr(pvdw_c6 +2*UNROLLJ);
332 c6_S3 = gmx_load_pr(pvdw_c6 +3*UNROLLJ);
334 c12_S0 = gmx_load_pr(pvdw_c12+0*UNROLLJ);
335 c12_S1 = gmx_load_pr(pvdw_c12+1*UNROLLJ);
336 c12_S2 = gmx_load_pr(pvdw_c12+2*UNROLLJ);
337 c12_S3 = gmx_load_pr(pvdw_c12+3*UNROLLJ);
338 #endif /* FIX_LJ_C */
341 egps_ishift = nbat->neg_2log;
342 egps_imask = (1<<egps_ishift) - 1;
343 egps_jshift = 2*nbat->neg_2log;
344 egps_jmask = (1<<egps_jshift) - 1;
345 egps_jstride = (UNROLLJ>>1)*UNROLLJ;
346 /* Major division is over i-particle energy groups, determine the stride */
347 Vstride_i = nbat->nenergrp*(1<<nbat->neg_2log)*egps_jstride;
353 for (n = 0; n < nbl->nci; n++)
357 ish = (nbln->shift & NBNXN_CI_SHIFT);
359 cjind0 = nbln->cj_ind_start;
360 cjind1 = nbln->cj_ind_end;
362 ci_sh = (ish == CENTRAL ? ci : -1);
364 shX_S = gmx_load1_pr(shiftvec+ish3);
365 shY_S = gmx_load1_pr(shiftvec+ish3+1);
366 shZ_S = gmx_load1_pr(shiftvec+ish3+2);
373 sci = (ci>>1)*STRIDE;
374 scix = sci*DIM + (ci & 1)*(STRIDE>>1);
375 sci2 = sci*2 + (ci & 1)*(STRIDE>>1);
376 sci += (ci & 1)*(STRIDE>>1);
379 /* We have 5 LJ/C combinations, but use only three inner loops,
380 * as the other combinations are unlikely and/or not much faster:
381 * inner half-LJ + C for half-LJ + C / no-LJ + C
382 * inner LJ + C for full-LJ + C
383 * inner LJ for full-LJ + no-C / half-LJ + no-C
385 do_LJ = (nbln->shift & NBNXN_CI_DO_LJ(0));
386 do_coul = (nbln->shift & NBNXN_CI_DO_COUL(0));
387 half_LJ = ((nbln->shift & NBNXN_CI_HALF_LJ(0)) || !do_LJ) && do_coul;
390 egps_i = nbat->energrp[ci];
394 for (ia = 0; ia < UNROLLI; ia++)
396 egp_ia = (egps_i >> (ia*egps_ishift)) & egps_imask;
397 vvdwtp[ia] = Vvdw + egp_ia*Vstride_i;
398 vctp[ia] = Vc + egp_ia*Vstride_i;
402 #if defined CALC_ENERGIES
404 if (do_coul && l_cj[nbln->cj_ind_start].cj == ci_sh)
407 if (do_coul && l_cj[nbln->cj_ind_start].cj == (ci_sh<<1))
410 if (do_coul && l_cj[nbln->cj_ind_start].cj == (ci_sh>>1))
417 Vc_sub_self = 0.5*ic->c_rf;
421 Vc_sub_self = 0.5*tab_coul_F[2];
423 Vc_sub_self = 0.5*tab_coul_V[0];
426 #ifdef CALC_COUL_EWALD
428 Vc_sub_self = 0.5*ic->ewaldcoeff*M_2_SQRTPI;
431 for (ia = 0; ia < UNROLLI; ia++)
437 vctp[ia][((egps_i>>(ia*egps_ishift)) & egps_imask)*egps_jstride]
441 -= facel*qi*qi*Vc_sub_self;
446 /* Load i atom data */
447 sciy = scix + STRIDE;
448 sciz = sciy + STRIDE;
449 ix_S0 = gmx_add_pr(gmx_load1_pr(x+scix), shX_S);
450 ix_S1 = gmx_add_pr(gmx_load1_pr(x+scix+1), shX_S);
451 ix_S2 = gmx_add_pr(gmx_load1_pr(x+scix+2), shX_S);
452 ix_S3 = gmx_add_pr(gmx_load1_pr(x+scix+3), shX_S);
453 iy_S0 = gmx_add_pr(gmx_load1_pr(x+sciy), shY_S);
454 iy_S1 = gmx_add_pr(gmx_load1_pr(x+sciy+1), shY_S);
455 iy_S2 = gmx_add_pr(gmx_load1_pr(x+sciy+2), shY_S);
456 iy_S3 = gmx_add_pr(gmx_load1_pr(x+sciy+3), shY_S);
457 iz_S0 = gmx_add_pr(gmx_load1_pr(x+sciz), shZ_S);
458 iz_S1 = gmx_add_pr(gmx_load1_pr(x+sciz+1), shZ_S);
459 iz_S2 = gmx_add_pr(gmx_load1_pr(x+sciz+2), shZ_S);
460 iz_S3 = gmx_add_pr(gmx_load1_pr(x+sciz+3), shZ_S);
464 iq_S0 = gmx_set1_pr(facel*q[sci]);
465 iq_S1 = gmx_set1_pr(facel*q[sci+1]);
466 iq_S2 = gmx_set1_pr(facel*q[sci+2]);
467 iq_S3 = gmx_set1_pr(facel*q[sci+3]);
471 hsig_i_S0 = gmx_load1_pr(ljc+sci2+0);
472 hsig_i_S1 = gmx_load1_pr(ljc+sci2+1);
473 hsig_i_S2 = gmx_load1_pr(ljc+sci2+2);
474 hsig_i_S3 = gmx_load1_pr(ljc+sci2+3);
475 seps_i_S0 = gmx_load1_pr(ljc+sci2+STRIDE+0);
476 seps_i_S1 = gmx_load1_pr(ljc+sci2+STRIDE+1);
477 seps_i_S2 = gmx_load1_pr(ljc+sci2+STRIDE+2);
478 seps_i_S3 = gmx_load1_pr(ljc+sci2+STRIDE+3);
481 c6s_S0 = gmx_load1_pr(ljc+sci2+0);
482 c6s_S1 = gmx_load1_pr(ljc+sci2+1);
485 c6s_S2 = gmx_load1_pr(ljc+sci2+2);
486 c6s_S3 = gmx_load1_pr(ljc+sci2+3);
488 c12s_S0 = gmx_load1_pr(ljc+sci2+STRIDE+0);
489 c12s_S1 = gmx_load1_pr(ljc+sci2+STRIDE+1);
492 c12s_S2 = gmx_load1_pr(ljc+sci2+STRIDE+2);
493 c12s_S3 = gmx_load1_pr(ljc+sci2+STRIDE+3);
496 nbfp0 = nbfp_ptr + type[sci ]*nbat->ntype*nbfp_stride;
497 nbfp1 = nbfp_ptr + type[sci+1]*nbat->ntype*nbfp_stride;
500 nbfp2 = nbfp_ptr + type[sci+2]*nbat->ntype*nbfp_stride;
501 nbfp3 = nbfp_ptr + type[sci+3]*nbat->ntype*nbfp_stride;
506 /* Zero the potential energy for this list */
507 Vvdwtot_S = gmx_setzero_pr();
508 vctot_S = gmx_setzero_pr();
510 /* Clear i atom forces */
511 fix_S0 = gmx_setzero_pr();
512 fix_S1 = gmx_setzero_pr();
513 fix_S2 = gmx_setzero_pr();
514 fix_S3 = gmx_setzero_pr();
515 fiy_S0 = gmx_setzero_pr();
516 fiy_S1 = gmx_setzero_pr();
517 fiy_S2 = gmx_setzero_pr();
518 fiy_S3 = gmx_setzero_pr();
519 fiz_S0 = gmx_setzero_pr();
520 fiz_S1 = gmx_setzero_pr();
521 fiz_S2 = gmx_setzero_pr();
522 fiz_S3 = gmx_setzero_pr();
526 /* Currently all kernels use (at least half) LJ */
533 while (cjind < cjind1 && nbl->cj[cjind].excl != NBNXN_INTERACTION_MASK_ALL)
535 #include "nbnxn_kernel_simd_4xn_inner.h"
539 for (; (cjind < cjind1); cjind++)
541 #include "nbnxn_kernel_simd_4xn_inner.h"
550 while (cjind < cjind1 && nbl->cj[cjind].excl != NBNXN_INTERACTION_MASK_ALL)
552 #include "nbnxn_kernel_simd_4xn_inner.h"
556 for (; (cjind < cjind1); cjind++)
558 #include "nbnxn_kernel_simd_4xn_inner.h"
565 while (cjind < cjind1 && nbl->cj[cjind].excl != NBNXN_INTERACTION_MASK_ALL)
567 #include "nbnxn_kernel_simd_4xn_inner.h"
571 for (; (cjind < cjind1); cjind++)
573 #include "nbnxn_kernel_simd_4xn_inner.h"
577 ninner += cjind1 - cjind0;
579 /* Add accumulated i-forces to the force array */
581 fix_S = gmx_mm_transpose_sum4_pr(fix_S0, fix_S1, fix_S2, fix_S3);
582 gmx_store_pr4(f+scix, gmx_add_pr4(fix_S, gmx_load_pr4(f+scix)));
584 fiy_S = gmx_mm_transpose_sum4_pr(fiy_S0, fiy_S1, fiy_S2, fiy_S3);
585 gmx_store_pr4(f+sciy, gmx_add_pr4(fiy_S, gmx_load_pr4(f+sciy)));
587 fiz_S = gmx_mm_transpose_sum4_pr(fiz_S0, fiz_S1, fiz_S2, fiz_S3);
588 gmx_store_pr4(f+sciz, gmx_add_pr4(fiz_S, gmx_load_pr4(f+sciz)));
590 #ifdef CALC_SHIFTFORCES
591 fshift[ish3+0] += gmx_sum_simd4(fix_S, shf);
592 fshift[ish3+1] += gmx_sum_simd4(fiy_S, shf);
593 fshift[ish3+2] += gmx_sum_simd4(fiz_S, shf);
596 fix0_S = gmx_mm_transpose_sum2_pr(fix_S0, fix_S1);
597 gmx_store_pr(f+scix, gmx_add_pr(fix0_S, gmx_load_pr(f+scix)));
598 fix2_S = gmx_mm_transpose_sum2_pr(fix_S2, fix_S3);
599 gmx_store_pr(f+scix+2, gmx_add_pr(fix2_S, gmx_load_pr(f+scix+2)));
601 fiy0_S = gmx_mm_transpose_sum2_pr(fiy_S0, fiy_S1);
602 gmx_store_pr(f+sciy, gmx_add_pr(fiy0_S, gmx_load_pr(f+sciy)));
603 fiy2_S = gmx_mm_transpose_sum2_pr(fiy_S2, fiy_S3);
604 gmx_store_pr(f+sciy+2, gmx_add_pr(fiy2_S, gmx_load_pr(f+sciy+2)));
606 fiz0_S = gmx_mm_transpose_sum2_pr(fiz_S0, fiz_S1);
607 gmx_store_pr(f+sciz, gmx_add_pr(fiz0_S, gmx_load_pr(f+sciz)));
608 fiz2_S = gmx_mm_transpose_sum2_pr(fiz_S2, fiz_S3);
609 gmx_store_pr(f+sciz+2, gmx_add_pr(fiz2_S, gmx_load_pr(f+sciz+2)));
611 #ifdef CALC_SHIFTFORCES
612 fshift[ish3+0] += gmx_sum_simd2(gmx_add_pr(fix0_S, fix2_S), shf);
613 fshift[ish3+1] += gmx_sum_simd2(gmx_add_pr(fiy0_S, fiy2_S), shf);
614 fshift[ish3+2] += gmx_sum_simd2(gmx_add_pr(fiz0_S, fiz2_S), shf);
621 *Vc += gmx_sum_simd(vctot_S, tmpsum);
624 *Vvdw += gmx_sum_simd(Vvdwtot_S, tmpsum);
627 /* Outer loop uses 6 flops/iteration */
631 printf("atom pairs %d\n", npair);