2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2009, The GROMACS Development Team
6 * Copyright (c) 2012,2013, by the GROMACS development team, led by
7 * David van der Spoel, Berk Hess, Erik Lindahl, and including many
8 * others, as listed in the AUTHORS file in the top-level source
9 * directory and at http://www.gromacs.org.
11 * GROMACS is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public License
13 * as published by the Free Software Foundation; either version 2.1
14 * of the License, or (at your option) any later version.
16 * GROMACS is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with GROMACS; if not, see
23 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
24 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 * If you want to redistribute modifications to GROMACS, please
27 * consider that scientific software is very special. Version
28 * control is crucial - bugs must be traceable. We will be happy to
29 * consider code for inclusion in the official distribution, but
30 * derived work must not be called official GROMACS. Details are found
31 * in the README & COPYING files - if they are missing, get the
32 * official version at http://www.gromacs.org.
34 * To help us fund GROMACS development, we humbly ask that you cite
35 * the research papers on the package. Check out http://www.gromacs.org.
39 const nbnxn_ci_t *nbln;
40 const nbnxn_cj_t *l_cj;
45 const real *nbfp0, *nbfp1, *nbfp2 = NULL, *nbfp3 = NULL;
50 gmx_bool do_LJ, half_LJ, do_coul;
51 int sci, scix, sciy, sciz, sci2;
52 int cjind0, cjind1, cjind;
57 int egps_ishift, egps_imask;
58 int egps_jshift, egps_jmask, egps_jstride;
60 real *vvdwtp[UNROLLI];
67 gmx_mm_pr ix_S0, iy_S0, iz_S0;
68 gmx_mm_pr ix_S1, iy_S1, iz_S1;
69 gmx_mm_pr ix_S2, iy_S2, iz_S2;
70 gmx_mm_pr ix_S3, iy_S3, iz_S3;
71 gmx_mm_pr fix_S0, fiy_S0, fiz_S0;
72 gmx_mm_pr fix_S1, fiy_S1, fiz_S1;
73 gmx_mm_pr fix_S2, fiy_S2, fiz_S2;
74 gmx_mm_pr fix_S3, fiy_S3, fiz_S3;
76 /* We use an i-force SIMD register width of 4 */
78 #define gmx_mm_pr4 gmx_mm_pr
79 #define gmx_load_pr4 gmx_load_pr
80 #define gmx_store_pr4 gmx_store_pr
81 #define gmx_add_pr4 gmx_add_pr
83 /* The pr4 stuff is defined in nbnxn_kernel_simd_utils.h */
85 gmx_mm_pr4 fix_S, fiy_S, fiz_S;
87 /* We use an i-force SIMD register width of 2 */
88 gmx_mm_pr fix0_S, fiy0_S, fiz0_S;
89 gmx_mm_pr fix2_S, fiy2_S, fiz2_S;
92 gmx_mm_pr diagonal_jmi_S;
93 #if UNROLLI == UNROLLJ
94 gmx_mm_pb diagonal_mask_S0, diagonal_mask_S1, diagonal_mask_S2, diagonal_mask_S3;
96 gmx_mm_pb diagonal_mask0_S0, diagonal_mask0_S1, diagonal_mask0_S2, diagonal_mask0_S3;
97 gmx_mm_pb diagonal_mask1_S0, diagonal_mask1_S1, diagonal_mask1_S2, diagonal_mask1_S3;
100 unsigned *exclusion_filter;
101 gmx_exclfilter filter_S0, filter_S1, filter_S2, filter_S3;
103 gmx_mm_pr zero_S = gmx_set1_pr(0.0);
105 gmx_mm_pr one_S = gmx_set1_pr(1.0);
106 gmx_mm_pr iq_S0 = gmx_setzero_pr();
107 gmx_mm_pr iq_S1 = gmx_setzero_pr();
108 gmx_mm_pr iq_S2 = gmx_setzero_pr();
109 gmx_mm_pr iq_S3 = gmx_setzero_pr();
112 gmx_mm_pr hrc_3_S, moh_rc_S;
116 /* Coulomb table variables */
118 const real *tab_coul_F;
120 const real *tab_coul_V;
122 /* Thread-local working buffers for force and potential lookups */
123 int ti0_array[2*GMX_SIMD_WIDTH_HERE-1], *ti0 = NULL;
124 int ti1_array[2*GMX_SIMD_WIDTH_HERE-1], *ti1 = NULL;
125 int ti2_array[2*GMX_SIMD_WIDTH_HERE-1], *ti2 = NULL;
126 int ti3_array[2*GMX_SIMD_WIDTH_HERE-1], *ti3 = NULL;
132 #ifdef CALC_COUL_EWALD
133 gmx_mm_pr beta2_S, beta_S;
136 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
137 gmx_mm_pr sh_ewald_S;
143 gmx_mm_pr hsig_i_S0, seps_i_S0;
144 gmx_mm_pr hsig_i_S1, seps_i_S1;
145 gmx_mm_pr hsig_i_S2, seps_i_S2;
146 gmx_mm_pr hsig_i_S3, seps_i_S3;
149 real pvdw_array[2*UNROLLI*UNROLLJ+3];
150 real *pvdw_c6, *pvdw_c12;
151 gmx_mm_pr c6_S0, c12_S0;
152 gmx_mm_pr c6_S1, c12_S1;
153 gmx_mm_pr c6_S2, c12_S2;
154 gmx_mm_pr c6_S3, c12_S3;
160 gmx_mm_pr c6s_S0, c12s_S0;
161 gmx_mm_pr c6s_S1, c12s_S1;
162 gmx_mm_pr c6s_S2 = gmx_setzero_pr(), c12s_S2 = gmx_setzero_pr();
163 gmx_mm_pr c6s_S3 = gmx_setzero_pr(), c12s_S3 = gmx_setzero_pr();
165 #endif /* LJ_COMB_LB */
167 gmx_mm_pr vctot_S, Vvdwtot_S;
168 gmx_mm_pr sixth_S, twelveth_S;
170 gmx_mm_pr avoid_sing_S;
172 #ifdef VDW_CUTOFF_CHECK
177 gmx_mm_pr sh_invrc6_S, sh_invrc12_S;
179 /* cppcheck-suppress unassignedVariable */
180 real tmpsum_array[15], *tmpsum;
182 #ifdef CALC_SHIFTFORCES
183 /* cppcheck-suppress unassignedVariable */
184 real shf_array[15], *shf;
193 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
196 /* No combination rule used */
197 nbfp_ptr = (4 == nbfp_stride) ? nbat->nbfp_s4 : nbat->nbfp;
200 /* Load j-i for the first i */
201 diagonal_jmi_S = gmx_load_pr(nbat->simd_4xn_diagonal_j_minus_i);
202 /* Generate all the diagonal masks as comparison results */
203 #if UNROLLI == UNROLLJ
204 diagonal_mask_S0 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
205 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
206 diagonal_mask_S1 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
207 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
208 diagonal_mask_S2 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
209 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
210 diagonal_mask_S3 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
212 #if UNROLLI == 2*UNROLLJ || 2*UNROLLI == UNROLLJ
213 diagonal_mask0_S0 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
214 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
215 diagonal_mask0_S1 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
216 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
217 diagonal_mask0_S2 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
218 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
219 diagonal_mask0_S3 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
220 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
222 #if UNROLLI == 2*UNROLLJ
223 /* Load j-i for the second half of the j-cluster */
224 diagonal_jmi_S = gmx_load_pr(nbat->simd_4xn_diagonal_j_minus_i + UNROLLJ);
227 diagonal_mask1_S0 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
228 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
229 diagonal_mask1_S1 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
230 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
231 diagonal_mask1_S2 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
232 diagonal_jmi_S = gmx_sub_pr(diagonal_jmi_S, one_S);
233 diagonal_mask1_S3 = gmx_cmplt_pr(zero_S, diagonal_jmi_S);
237 /* Load masks for topology exclusion masking. filter_stride is
238 static const, so the conditional will be optimized away. */
239 if (1 == filter_stride)
241 exclusion_filter = nbat->simd_exclusion_filter1;
243 else /* (2 == filter_stride) */
245 exclusion_filter = nbat->simd_exclusion_filter2;
248 /* Here we cast the exclusion filters from unsigned * to int * or real *.
249 * Since we only check bits, the actual value they represent does not
250 * matter, as long as both filter and mask data are treated the same way.
252 filter_S0 = gmx_load_exclusion_filter(exclusion_filter + 0*UNROLLJ*filter_stride);
253 filter_S1 = gmx_load_exclusion_filter(exclusion_filter + 1*UNROLLJ*filter_stride);
254 filter_S2 = gmx_load_exclusion_filter(exclusion_filter + 2*UNROLLJ*filter_stride);
255 filter_S3 = gmx_load_exclusion_filter(exclusion_filter + 3*UNROLLJ*filter_stride);
258 /* Generate aligned table index pointers */
259 ti0 = prepare_table_load_buffer(ti0_array);
260 ti1 = prepare_table_load_buffer(ti1_array);
261 ti2 = prepare_table_load_buffer(ti2_array);
262 ti3 = prepare_table_load_buffer(ti3_array);
264 invtsp_S = gmx_set1_pr(ic->tabq_scale);
266 mhalfsp_S = gmx_set1_pr(-0.5/ic->tabq_scale);
270 tab_coul_F = ic->tabq_coul_FDV0;
272 tab_coul_F = ic->tabq_coul_F;
273 tab_coul_V = ic->tabq_coul_V;
275 #endif /* CALC_COUL_TAB */
277 #ifdef CALC_COUL_EWALD
278 beta2_S = gmx_set1_pr(ic->ewaldcoeff*ic->ewaldcoeff);
279 beta_S = gmx_set1_pr(ic->ewaldcoeff);
282 #if (defined CALC_COUL_TAB || defined CALC_COUL_EWALD) && defined CALC_ENERGIES
283 sh_ewald_S = gmx_set1_pr(ic->sh_ewald);
289 shiftvec = shift_vec[0];
292 avoid_sing_S = gmx_set1_pr(NBNXN_AVOID_SING_R2_INC);
294 /* The kernel either supports rcoulomb = rvdw or rcoulomb >= rvdw */
295 rc2_S = gmx_set1_pr(ic->rcoulomb*ic->rcoulomb);
296 #ifdef VDW_CUTOFF_CHECK
297 rcvdw2_S = gmx_set1_pr(ic->rvdw*ic->rvdw);
301 sixth_S = gmx_set1_pr(1.0/6.0);
302 twelveth_S = gmx_set1_pr(1.0/12.0);
304 sh_invrc6_S = gmx_set1_pr(ic->sh_invrc6);
305 sh_invrc12_S = gmx_set1_pr(ic->sh_invrc6*ic->sh_invrc6);
308 mrc_3_S = gmx_set1_pr(-2*ic->k_rf);
311 hrc_3_S = gmx_set1_pr(ic->k_rf);
313 moh_rc_S = gmx_set1_pr(-ic->c_rf);
317 tmpsum = gmx_simd_align_real(tmpsum_array);
319 #ifdef CALC_SHIFTFORCES
320 shf = gmx_simd_align_real(shf_array);
324 pvdw_c6 = gmx_simd_align_real(pvdw_array+3);
325 pvdw_c12 = pvdw_c6 + UNROLLI*UNROLLJ;
327 for (jp = 0; jp < UNROLLJ; jp++)
329 pvdw_c6 [0*UNROLLJ+jp] = nbat->nbfp[0*2];
330 pvdw_c6 [1*UNROLLJ+jp] = nbat->nbfp[0*2];
331 pvdw_c6 [2*UNROLLJ+jp] = nbat->nbfp[0*2];
332 pvdw_c6 [3*UNROLLJ+jp] = nbat->nbfp[0*2];
334 pvdw_c12[0*UNROLLJ+jp] = nbat->nbfp[0*2+1];
335 pvdw_c12[1*UNROLLJ+jp] = nbat->nbfp[0*2+1];
336 pvdw_c12[2*UNROLLJ+jp] = nbat->nbfp[0*2+1];
337 pvdw_c12[3*UNROLLJ+jp] = nbat->nbfp[0*2+1];
339 c6_S0 = gmx_load_pr(pvdw_c6 +0*UNROLLJ);
340 c6_S1 = gmx_load_pr(pvdw_c6 +1*UNROLLJ);
341 c6_S2 = gmx_load_pr(pvdw_c6 +2*UNROLLJ);
342 c6_S3 = gmx_load_pr(pvdw_c6 +3*UNROLLJ);
344 c12_S0 = gmx_load_pr(pvdw_c12+0*UNROLLJ);
345 c12_S1 = gmx_load_pr(pvdw_c12+1*UNROLLJ);
346 c12_S2 = gmx_load_pr(pvdw_c12+2*UNROLLJ);
347 c12_S3 = gmx_load_pr(pvdw_c12+3*UNROLLJ);
348 #endif /* FIX_LJ_C */
351 egps_ishift = nbat->neg_2log;
352 egps_imask = (1<<egps_ishift) - 1;
353 egps_jshift = 2*nbat->neg_2log;
354 egps_jmask = (1<<egps_jshift) - 1;
355 egps_jstride = (UNROLLJ>>1)*UNROLLJ;
356 /* Major division is over i-particle energy groups, determine the stride */
357 Vstride_i = nbat->nenergrp*(1<<nbat->neg_2log)*egps_jstride;
363 for (n = 0; n < nbl->nci; n++)
367 ish = (nbln->shift & NBNXN_CI_SHIFT);
369 cjind0 = nbln->cj_ind_start;
370 cjind1 = nbln->cj_ind_end;
372 ci_sh = (ish == CENTRAL ? ci : -1);
374 shX_S = gmx_load1_pr(shiftvec+ish3);
375 shY_S = gmx_load1_pr(shiftvec+ish3+1);
376 shZ_S = gmx_load1_pr(shiftvec+ish3+2);
383 sci = (ci>>1)*STRIDE;
384 scix = sci*DIM + (ci & 1)*(STRIDE>>1);
385 sci2 = sci*2 + (ci & 1)*(STRIDE>>1);
386 sci += (ci & 1)*(STRIDE>>1);
389 /* We have 5 LJ/C combinations, but use only three inner loops,
390 * as the other combinations are unlikely and/or not much faster:
391 * inner half-LJ + C for half-LJ + C / no-LJ + C
392 * inner LJ + C for full-LJ + C
393 * inner LJ for full-LJ + no-C / half-LJ + no-C
395 do_LJ = (nbln->shift & NBNXN_CI_DO_LJ(0));
396 do_coul = (nbln->shift & NBNXN_CI_DO_COUL(0));
397 half_LJ = ((nbln->shift & NBNXN_CI_HALF_LJ(0)) || !do_LJ) && do_coul;
400 egps_i = nbat->energrp[ci];
404 for (ia = 0; ia < UNROLLI; ia++)
406 egp_ia = (egps_i >> (ia*egps_ishift)) & egps_imask;
407 vvdwtp[ia] = Vvdw + egp_ia*Vstride_i;
408 vctp[ia] = Vc + egp_ia*Vstride_i;
412 #if defined CALC_ENERGIES
414 if (do_coul && l_cj[nbln->cj_ind_start].cj == ci_sh)
417 if (do_coul && l_cj[nbln->cj_ind_start].cj == (ci_sh<<1))
420 if (do_coul && l_cj[nbln->cj_ind_start].cj == (ci_sh>>1))
427 Vc_sub_self = 0.5*ic->c_rf;
431 Vc_sub_self = 0.5*tab_coul_F[2];
433 Vc_sub_self = 0.5*tab_coul_V[0];
436 #ifdef CALC_COUL_EWALD
438 Vc_sub_self = 0.5*ic->ewaldcoeff*M_2_SQRTPI;
441 for (ia = 0; ia < UNROLLI; ia++)
447 vctp[ia][((egps_i>>(ia*egps_ishift)) & egps_imask)*egps_jstride]
451 -= facel*qi*qi*Vc_sub_self;
456 /* Load i atom data */
457 sciy = scix + STRIDE;
458 sciz = sciy + STRIDE;
459 ix_S0 = gmx_add_pr(gmx_load1_pr(x+scix), shX_S);
460 ix_S1 = gmx_add_pr(gmx_load1_pr(x+scix+1), shX_S);
461 ix_S2 = gmx_add_pr(gmx_load1_pr(x+scix+2), shX_S);
462 ix_S3 = gmx_add_pr(gmx_load1_pr(x+scix+3), shX_S);
463 iy_S0 = gmx_add_pr(gmx_load1_pr(x+sciy), shY_S);
464 iy_S1 = gmx_add_pr(gmx_load1_pr(x+sciy+1), shY_S);
465 iy_S2 = gmx_add_pr(gmx_load1_pr(x+sciy+2), shY_S);
466 iy_S3 = gmx_add_pr(gmx_load1_pr(x+sciy+3), shY_S);
467 iz_S0 = gmx_add_pr(gmx_load1_pr(x+sciz), shZ_S);
468 iz_S1 = gmx_add_pr(gmx_load1_pr(x+sciz+1), shZ_S);
469 iz_S2 = gmx_add_pr(gmx_load1_pr(x+sciz+2), shZ_S);
470 iz_S3 = gmx_add_pr(gmx_load1_pr(x+sciz+3), shZ_S);
474 iq_S0 = gmx_set1_pr(facel*q[sci]);
475 iq_S1 = gmx_set1_pr(facel*q[sci+1]);
476 iq_S2 = gmx_set1_pr(facel*q[sci+2]);
477 iq_S3 = gmx_set1_pr(facel*q[sci+3]);
481 hsig_i_S0 = gmx_load1_pr(ljc+sci2+0);
482 hsig_i_S1 = gmx_load1_pr(ljc+sci2+1);
483 hsig_i_S2 = gmx_load1_pr(ljc+sci2+2);
484 hsig_i_S3 = gmx_load1_pr(ljc+sci2+3);
485 seps_i_S0 = gmx_load1_pr(ljc+sci2+STRIDE+0);
486 seps_i_S1 = gmx_load1_pr(ljc+sci2+STRIDE+1);
487 seps_i_S2 = gmx_load1_pr(ljc+sci2+STRIDE+2);
488 seps_i_S3 = gmx_load1_pr(ljc+sci2+STRIDE+3);
491 c6s_S0 = gmx_load1_pr(ljc+sci2+0);
492 c6s_S1 = gmx_load1_pr(ljc+sci2+1);
495 c6s_S2 = gmx_load1_pr(ljc+sci2+2);
496 c6s_S3 = gmx_load1_pr(ljc+sci2+3);
498 c12s_S0 = gmx_load1_pr(ljc+sci2+STRIDE+0);
499 c12s_S1 = gmx_load1_pr(ljc+sci2+STRIDE+1);
502 c12s_S2 = gmx_load1_pr(ljc+sci2+STRIDE+2);
503 c12s_S3 = gmx_load1_pr(ljc+sci2+STRIDE+3);
506 nbfp0 = nbfp_ptr + type[sci ]*nbat->ntype*nbfp_stride;
507 nbfp1 = nbfp_ptr + type[sci+1]*nbat->ntype*nbfp_stride;
510 nbfp2 = nbfp_ptr + type[sci+2]*nbat->ntype*nbfp_stride;
511 nbfp3 = nbfp_ptr + type[sci+3]*nbat->ntype*nbfp_stride;
516 /* Zero the potential energy for this list */
517 Vvdwtot_S = gmx_setzero_pr();
518 vctot_S = gmx_setzero_pr();
520 /* Clear i atom forces */
521 fix_S0 = gmx_setzero_pr();
522 fix_S1 = gmx_setzero_pr();
523 fix_S2 = gmx_setzero_pr();
524 fix_S3 = gmx_setzero_pr();
525 fiy_S0 = gmx_setzero_pr();
526 fiy_S1 = gmx_setzero_pr();
527 fiy_S2 = gmx_setzero_pr();
528 fiy_S3 = gmx_setzero_pr();
529 fiz_S0 = gmx_setzero_pr();
530 fiz_S1 = gmx_setzero_pr();
531 fiz_S2 = gmx_setzero_pr();
532 fiz_S3 = gmx_setzero_pr();
536 /* Currently all kernels use (at least half) LJ */
543 while (cjind < cjind1 && nbl->cj[cjind].excl != NBNXN_INTERACTION_MASK_ALL)
545 #include "nbnxn_kernel_simd_4xn_inner.h"
549 for (; (cjind < cjind1); cjind++)
551 #include "nbnxn_kernel_simd_4xn_inner.h"
560 while (cjind < cjind1 && nbl->cj[cjind].excl != NBNXN_INTERACTION_MASK_ALL)
562 #include "nbnxn_kernel_simd_4xn_inner.h"
566 for (; (cjind < cjind1); cjind++)
568 #include "nbnxn_kernel_simd_4xn_inner.h"
575 while (cjind < cjind1 && nbl->cj[cjind].excl != NBNXN_INTERACTION_MASK_ALL)
577 #include "nbnxn_kernel_simd_4xn_inner.h"
581 for (; (cjind < cjind1); cjind++)
583 #include "nbnxn_kernel_simd_4xn_inner.h"
587 ninner += cjind1 - cjind0;
589 /* Add accumulated i-forces to the force array */
591 fix_S = gmx_mm_transpose_sum4_pr(fix_S0, fix_S1, fix_S2, fix_S3);
592 gmx_store_pr4(f+scix, gmx_add_pr4(fix_S, gmx_load_pr4(f+scix)));
594 fiy_S = gmx_mm_transpose_sum4_pr(fiy_S0, fiy_S1, fiy_S2, fiy_S3);
595 gmx_store_pr4(f+sciy, gmx_add_pr4(fiy_S, gmx_load_pr4(f+sciy)));
597 fiz_S = gmx_mm_transpose_sum4_pr(fiz_S0, fiz_S1, fiz_S2, fiz_S3);
598 gmx_store_pr4(f+sciz, gmx_add_pr4(fiz_S, gmx_load_pr4(f+sciz)));
600 #ifdef CALC_SHIFTFORCES
601 gmx_store_pr4(shf, fix_S);
602 fshift[ish3+0] += SUM_SIMD4(shf);
603 gmx_store_pr4(shf, fiy_S);
604 fshift[ish3+1] += SUM_SIMD4(shf);
605 gmx_store_pr4(shf, fiz_S);
606 fshift[ish3+2] += SUM_SIMD4(shf);
609 fix0_S = gmx_mm_transpose_sum2_pr(fix_S0, fix_S1);
610 gmx_store_pr(f+scix, gmx_add_pr(fix0_S, gmx_load_pr(f+scix)));
611 fix2_S = gmx_mm_transpose_sum2_pr(fix_S2, fix_S3);
612 gmx_store_pr(f+scix+2, gmx_add_pr(fix2_S, gmx_load_pr(f+scix+2)));
614 fiy0_S = gmx_mm_transpose_sum2_pr(fiy_S0, fiy_S1);
615 gmx_store_pr(f+sciy, gmx_add_pr(fiy0_S, gmx_load_pr(f+sciy)));
616 fiy2_S = gmx_mm_transpose_sum2_pr(fiy_S2, fiy_S3);
617 gmx_store_pr(f+sciy+2, gmx_add_pr(fiy2_S, gmx_load_pr(f+sciy+2)));
619 fiz0_S = gmx_mm_transpose_sum2_pr(fiz_S0, fiz_S1);
620 gmx_store_pr(f+sciz, gmx_add_pr(fiz0_S, gmx_load_pr(f+sciz)));
621 fiz2_S = gmx_mm_transpose_sum2_pr(fiz_S2, fiz_S3);
622 gmx_store_pr(f+sciz+2, gmx_add_pr(fiz2_S, gmx_load_pr(f+sciz+2)));
624 #ifdef CALC_SHIFTFORCES
625 gmx_store_pr(shf, gmx_add_pr(fix0_S, fix2_S));
626 fshift[ish3+0] += shf[0] + shf[1];
627 gmx_store_pr(shf, gmx_add_pr(fiy0_S, fiy2_S));
628 fshift[ish3+1] += shf[0] + shf[1];
629 gmx_store_pr(shf, gmx_add_pr(fiz0_S, fiz2_S));
630 fshift[ish3+2] += shf[0] + shf[1];
637 gmx_store_pr(tmpsum, vctot_S);
638 *Vc += SUM_SIMD(tmpsum);
641 gmx_store_pr(tmpsum, Vvdwtot_S);
642 *Vvdw += SUM_SIMD(tmpsum);
645 /* Outer loop uses 6 flops/iteration */
649 printf("atom pairs %d\n", npair);