2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 const nbnxn_ci_t *nbln;
38 const nbnxn_cj_t *l_cj;
43 const real *nbfp0, *nbfp1, *nbfp2 = NULL, *nbfp3 = NULL;
48 gmx_bool do_LJ, half_LJ, do_coul;
49 int sci, scix, sciy, sciz, sci2;
50 int cjind0, cjind1, cjind;
55 int egps_ishift, egps_imask;
56 int egps_jshift, egps_jmask, egps_jstride;
58 real *vvdwtp[UNROLLI];
62 gmx_simd_real_t shX_S;
63 gmx_simd_real_t shY_S;
64 gmx_simd_real_t shZ_S;
65 gmx_simd_real_t ix_S0, iy_S0, iz_S0;
66 gmx_simd_real_t ix_S1, iy_S1, iz_S1;
67 gmx_simd_real_t ix_S2, iy_S2, iz_S2;
68 gmx_simd_real_t ix_S3, iy_S3, iz_S3;
69 gmx_simd_real_t fix_S0, fiy_S0, fiz_S0;
70 gmx_simd_real_t fix_S1, fiy_S1, fiz_S1;
71 gmx_simd_real_t fix_S2, fiy_S2, fiz_S2;
72 gmx_simd_real_t fix_S3, fiy_S3, fiz_S3;
74 /* We use an i-force SIMD register width of 4 */
75 gmx_mm_pr4 fix_S, fiy_S, fiz_S;
77 /* We use an i-force SIMD register width of 2 */
78 gmx_simd_real_t fix0_S, fiy0_S, fiz0_S;
79 gmx_simd_real_t fix2_S, fiy2_S, fiz2_S;
82 gmx_simd_real_t diagonal_jmi_S;
83 #if UNROLLI == UNROLLJ
84 gmx_simd_bool_t diagonal_mask_S0, diagonal_mask_S1, diagonal_mask_S2, diagonal_mask_S3;
86 gmx_simd_bool_t diagonal_mask0_S0, diagonal_mask0_S1, diagonal_mask0_S2, diagonal_mask0_S3;
87 gmx_simd_bool_t diagonal_mask1_S0, diagonal_mask1_S1, diagonal_mask1_S2, diagonal_mask1_S3;
90 unsigned *exclusion_filter;
91 gmx_exclfilter filter_S0, filter_S1, filter_S2, filter_S3;
93 gmx_simd_real_t zero_S = gmx_simd_set1_r(0.0);
95 gmx_simd_real_t one_S = gmx_simd_set1_r(1.0);
96 gmx_simd_real_t iq_S0 = gmx_simd_setzero_r();
97 gmx_simd_real_t iq_S1 = gmx_simd_setzero_r();
98 gmx_simd_real_t iq_S2 = gmx_simd_setzero_r();
99 gmx_simd_real_t iq_S3 = gmx_simd_setzero_r();
100 gmx_simd_real_t mrc_3_S;
102 gmx_simd_real_t hrc_3_S, moh_rc_S;
106 /* Coulomb table variables */
107 gmx_simd_real_t invtsp_S;
108 const real *tab_coul_F;
110 const real *tab_coul_V;
112 /* Thread-local working buffers for force and potential lookups */
113 int ti0_array[2*GMX_SIMD_REAL_WIDTH], *ti0 = NULL;
114 int ti1_array[2*GMX_SIMD_REAL_WIDTH], *ti1 = NULL;
115 int ti2_array[2*GMX_SIMD_REAL_WIDTH], *ti2 = NULL;
116 int ti3_array[2*GMX_SIMD_REAL_WIDTH], *ti3 = NULL;
118 gmx_simd_real_t mhalfsp_S;
122 #ifdef CALC_COUL_EWALD
123 gmx_simd_real_t beta2_S, beta_S;
126 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
127 gmx_simd_real_t sh_ewald_S;
133 gmx_simd_real_t hsig_i_S0, seps_i_S0;
134 gmx_simd_real_t hsig_i_S1, seps_i_S1;
135 gmx_simd_real_t hsig_i_S2, seps_i_S2;
136 gmx_simd_real_t hsig_i_S3, seps_i_S3;
139 real pvdw_array[2*UNROLLI*UNROLLJ+3];
140 real *pvdw_c6, *pvdw_c12;
141 gmx_simd_real_t c6_S0, c12_S0;
142 gmx_simd_real_t c6_S1, c12_S1;
143 gmx_simd_real_t c6_S2, c12_S2;
144 gmx_simd_real_t c6_S3, c12_S3;
150 gmx_simd_real_t c6s_S0, c12s_S0;
151 gmx_simd_real_t c6s_S1, c12s_S1;
152 gmx_simd_real_t c6s_S2 = gmx_simd_setzero_r();
153 gmx_simd_real_t c12s_S2 = gmx_simd_setzero_r();
154 gmx_simd_real_t c6s_S3 = gmx_simd_setzero_r();
155 gmx_simd_real_t c12s_S3 = gmx_simd_setzero_r();
157 #endif /* LJ_COMB_LB */
159 gmx_simd_real_t vctot_S, Vvdwtot_S;
160 gmx_simd_real_t sixth_S, twelveth_S;
162 gmx_simd_real_t avoid_sing_S;
163 gmx_simd_real_t rc2_S;
164 #ifdef VDW_CUTOFF_CHECK
165 gmx_simd_real_t rcvdw2_S;
169 gmx_simd_real_t sh_invrc6_S, sh_invrc12_S;
171 /* cppcheck-suppress unassignedVariable */
172 real tmpsum_array[GMX_SIMD_REAL_WIDTH*2], *tmpsum;
174 #ifdef CALC_SHIFTFORCES
175 /* cppcheck-suppress unassignedVariable */
176 real shf_array[GMX_SIMD_REAL_WIDTH*2], *shf;
185 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB
188 /* No combination rule used */
189 nbfp_ptr = (4 == nbfp_stride) ? nbat->nbfp_s4 : nbat->nbfp;
192 /* Load j-i for the first i */
193 diagonal_jmi_S = gmx_simd_load_r(nbat->simd_4xn_diagonal_j_minus_i);
194 /* Generate all the diagonal masks as comparison results */
195 #if UNROLLI == UNROLLJ
196 diagonal_mask_S0 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
197 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
198 diagonal_mask_S1 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
199 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
200 diagonal_mask_S2 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
201 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
202 diagonal_mask_S3 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
204 #if UNROLLI == 2*UNROLLJ || 2*UNROLLI == UNROLLJ
205 diagonal_mask0_S0 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
206 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
207 diagonal_mask0_S1 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
208 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
209 diagonal_mask0_S2 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
210 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
211 diagonal_mask0_S3 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
212 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
214 #if UNROLLI == 2*UNROLLJ
215 /* Load j-i for the second half of the j-cluster */
216 diagonal_jmi_S = gmx_simd_load_r(nbat->simd_4xn_diagonal_j_minus_i + UNROLLJ);
219 diagonal_mask1_S0 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
220 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
221 diagonal_mask1_S1 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
222 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
223 diagonal_mask1_S2 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
224 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
225 diagonal_mask1_S3 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
229 /* Load masks for topology exclusion masking. filter_stride is
230 static const, so the conditional will be optimized away. */
231 if (1 == filter_stride)
233 exclusion_filter = nbat->simd_exclusion_filter1;
235 else /* (2 == filter_stride) */
237 exclusion_filter = nbat->simd_exclusion_filter2;
240 /* Here we cast the exclusion filters from unsigned * to int * or real *.
241 * Since we only check bits, the actual value they represent does not
242 * matter, as long as both filter and mask data are treated the same way.
244 filter_S0 = gmx_load_exclusion_filter(exclusion_filter + 0*UNROLLJ*filter_stride);
245 filter_S1 = gmx_load_exclusion_filter(exclusion_filter + 1*UNROLLJ*filter_stride);
246 filter_S2 = gmx_load_exclusion_filter(exclusion_filter + 2*UNROLLJ*filter_stride);
247 filter_S3 = gmx_load_exclusion_filter(exclusion_filter + 3*UNROLLJ*filter_stride);
250 /* Generate aligned table index pointers */
251 ti0 = prepare_table_load_buffer(ti0_array);
252 ti1 = prepare_table_load_buffer(ti1_array);
253 ti2 = prepare_table_load_buffer(ti2_array);
254 ti3 = prepare_table_load_buffer(ti3_array);
256 invtsp_S = gmx_simd_set1_r(ic->tabq_scale);
258 mhalfsp_S = gmx_simd_set1_r(-0.5/ic->tabq_scale);
262 tab_coul_F = ic->tabq_coul_FDV0;
264 tab_coul_F = ic->tabq_coul_F;
265 tab_coul_V = ic->tabq_coul_V;
267 #endif /* CALC_COUL_TAB */
269 #ifdef CALC_COUL_EWALD
270 beta2_S = gmx_simd_set1_r(ic->ewaldcoeff_q*ic->ewaldcoeff_q);
271 beta_S = gmx_simd_set1_r(ic->ewaldcoeff_q);
274 #if (defined CALC_COUL_TAB || defined CALC_COUL_EWALD) && defined CALC_ENERGIES
275 sh_ewald_S = gmx_simd_set1_r(ic->sh_ewald);
281 shiftvec = shift_vec[0];
284 avoid_sing_S = gmx_simd_set1_r(NBNXN_AVOID_SING_R2_INC);
286 /* The kernel either supports rcoulomb = rvdw or rcoulomb >= rvdw */
287 rc2_S = gmx_simd_set1_r(ic->rcoulomb*ic->rcoulomb);
288 #ifdef VDW_CUTOFF_CHECK
289 rcvdw2_S = gmx_simd_set1_r(ic->rvdw*ic->rvdw);
293 sixth_S = gmx_simd_set1_r(1.0/6.0);
294 twelveth_S = gmx_simd_set1_r(1.0/12.0);
296 sh_invrc6_S = gmx_simd_set1_r(ic->sh_invrc6);
297 sh_invrc12_S = gmx_simd_set1_r(ic->sh_invrc6*ic->sh_invrc6);
300 mrc_3_S = gmx_simd_set1_r(-2*ic->k_rf);
303 hrc_3_S = gmx_simd_set1_r(ic->k_rf);
305 moh_rc_S = gmx_simd_set1_r(-ic->c_rf);
309 tmpsum = gmx_simd_align_r(tmpsum_array);
311 #ifdef CALC_SHIFTFORCES
312 shf = gmx_simd_align_r(shf_array);
316 pvdw_c6 = gmx_simd_align_r(pvdw_array+3);
317 pvdw_c12 = pvdw_c6 + UNROLLI*UNROLLJ;
319 for (jp = 0; jp < UNROLLJ; jp++)
321 pvdw_c6 [0*UNROLLJ+jp] = nbat->nbfp[0*2];
322 pvdw_c6 [1*UNROLLJ+jp] = nbat->nbfp[0*2];
323 pvdw_c6 [2*UNROLLJ+jp] = nbat->nbfp[0*2];
324 pvdw_c6 [3*UNROLLJ+jp] = nbat->nbfp[0*2];
326 pvdw_c12[0*UNROLLJ+jp] = nbat->nbfp[0*2+1];
327 pvdw_c12[1*UNROLLJ+jp] = nbat->nbfp[0*2+1];
328 pvdw_c12[2*UNROLLJ+jp] = nbat->nbfp[0*2+1];
329 pvdw_c12[3*UNROLLJ+jp] = nbat->nbfp[0*2+1];
331 c6_S0 = gmx_simd_load_r(pvdw_c6 +0*UNROLLJ);
332 c6_S1 = gmx_simd_load_r(pvdw_c6 +1*UNROLLJ);
333 c6_S2 = gmx_simd_load_r(pvdw_c6 +2*UNROLLJ);
334 c6_S3 = gmx_simd_load_r(pvdw_c6 +3*UNROLLJ);
336 c12_S0 = gmx_simd_load_r(pvdw_c12+0*UNROLLJ);
337 c12_S1 = gmx_simd_load_r(pvdw_c12+1*UNROLLJ);
338 c12_S2 = gmx_simd_load_r(pvdw_c12+2*UNROLLJ);
339 c12_S3 = gmx_simd_load_r(pvdw_c12+3*UNROLLJ);
340 #endif /* FIX_LJ_C */
343 egps_ishift = nbat->neg_2log;
344 egps_imask = (1<<egps_ishift) - 1;
345 egps_jshift = 2*nbat->neg_2log;
346 egps_jmask = (1<<egps_jshift) - 1;
347 egps_jstride = (UNROLLJ>>1)*UNROLLJ;
348 /* Major division is over i-particle energy groups, determine the stride */
349 Vstride_i = nbat->nenergrp*(1<<nbat->neg_2log)*egps_jstride;
355 for (n = 0; n < nbl->nci; n++)
359 ish = (nbln->shift & NBNXN_CI_SHIFT);
361 cjind0 = nbln->cj_ind_start;
362 cjind1 = nbln->cj_ind_end;
364 ci_sh = (ish == CENTRAL ? ci : -1);
366 shX_S = gmx_simd_load1_r(shiftvec+ish3);
367 shY_S = gmx_simd_load1_r(shiftvec+ish3+1);
368 shZ_S = gmx_simd_load1_r(shiftvec+ish3+2);
375 sci = (ci>>1)*STRIDE;
376 scix = sci*DIM + (ci & 1)*(STRIDE>>1);
377 sci2 = sci*2 + (ci & 1)*(STRIDE>>1);
378 sci += (ci & 1)*(STRIDE>>1);
381 /* We have 5 LJ/C combinations, but use only three inner loops,
382 * as the other combinations are unlikely and/or not much faster:
383 * inner half-LJ + C for half-LJ + C / no-LJ + C
384 * inner LJ + C for full-LJ + C
385 * inner LJ for full-LJ + no-C / half-LJ + no-C
387 do_LJ = (nbln->shift & NBNXN_CI_DO_LJ(0));
388 do_coul = (nbln->shift & NBNXN_CI_DO_COUL(0));
389 half_LJ = ((nbln->shift & NBNXN_CI_HALF_LJ(0)) || !do_LJ) && do_coul;
392 egps_i = nbat->energrp[ci];
396 for (ia = 0; ia < UNROLLI; ia++)
398 egp_ia = (egps_i >> (ia*egps_ishift)) & egps_imask;
399 vvdwtp[ia] = Vvdw + egp_ia*Vstride_i;
400 vctp[ia] = Vc + egp_ia*Vstride_i;
404 #if defined CALC_ENERGIES
406 if (do_coul && l_cj[nbln->cj_ind_start].cj == ci_sh)
409 if (do_coul && l_cj[nbln->cj_ind_start].cj == (ci_sh<<1))
412 if (do_coul && l_cj[nbln->cj_ind_start].cj == (ci_sh>>1))
419 Vc_sub_self = 0.5*ic->c_rf;
423 Vc_sub_self = 0.5*tab_coul_F[2];
425 Vc_sub_self = 0.5*tab_coul_V[0];
428 #ifdef CALC_COUL_EWALD
430 Vc_sub_self = 0.5*ic->ewaldcoeff_q*M_2_SQRTPI;
433 for (ia = 0; ia < UNROLLI; ia++)
439 vctp[ia][((egps_i>>(ia*egps_ishift)) & egps_imask)*egps_jstride]
443 -= facel*qi*qi*Vc_sub_self;
448 /* Load i atom data */
449 sciy = scix + STRIDE;
450 sciz = sciy + STRIDE;
451 ix_S0 = gmx_simd_add_r(gmx_simd_load1_r(x+scix), shX_S);
452 ix_S1 = gmx_simd_add_r(gmx_simd_load1_r(x+scix+1), shX_S);
453 ix_S2 = gmx_simd_add_r(gmx_simd_load1_r(x+scix+2), shX_S);
454 ix_S3 = gmx_simd_add_r(gmx_simd_load1_r(x+scix+3), shX_S);
455 iy_S0 = gmx_simd_add_r(gmx_simd_load1_r(x+sciy), shY_S);
456 iy_S1 = gmx_simd_add_r(gmx_simd_load1_r(x+sciy+1), shY_S);
457 iy_S2 = gmx_simd_add_r(gmx_simd_load1_r(x+sciy+2), shY_S);
458 iy_S3 = gmx_simd_add_r(gmx_simd_load1_r(x+sciy+3), shY_S);
459 iz_S0 = gmx_simd_add_r(gmx_simd_load1_r(x+sciz), shZ_S);
460 iz_S1 = gmx_simd_add_r(gmx_simd_load1_r(x+sciz+1), shZ_S);
461 iz_S2 = gmx_simd_add_r(gmx_simd_load1_r(x+sciz+2), shZ_S);
462 iz_S3 = gmx_simd_add_r(gmx_simd_load1_r(x+sciz+3), shZ_S);
466 iq_S0 = gmx_simd_set1_r(facel*q[sci]);
467 iq_S1 = gmx_simd_set1_r(facel*q[sci+1]);
468 iq_S2 = gmx_simd_set1_r(facel*q[sci+2]);
469 iq_S3 = gmx_simd_set1_r(facel*q[sci+3]);
473 hsig_i_S0 = gmx_simd_load1_r(ljc+sci2+0);
474 hsig_i_S1 = gmx_simd_load1_r(ljc+sci2+1);
475 hsig_i_S2 = gmx_simd_load1_r(ljc+sci2+2);
476 hsig_i_S3 = gmx_simd_load1_r(ljc+sci2+3);
477 seps_i_S0 = gmx_simd_load1_r(ljc+sci2+STRIDE+0);
478 seps_i_S1 = gmx_simd_load1_r(ljc+sci2+STRIDE+1);
479 seps_i_S2 = gmx_simd_load1_r(ljc+sci2+STRIDE+2);
480 seps_i_S3 = gmx_simd_load1_r(ljc+sci2+STRIDE+3);
483 c6s_S0 = gmx_simd_load1_r(ljc+sci2+0);
484 c6s_S1 = gmx_simd_load1_r(ljc+sci2+1);
487 c6s_S2 = gmx_simd_load1_r(ljc+sci2+2);
488 c6s_S3 = gmx_simd_load1_r(ljc+sci2+3);
490 c12s_S0 = gmx_simd_load1_r(ljc+sci2+STRIDE+0);
491 c12s_S1 = gmx_simd_load1_r(ljc+sci2+STRIDE+1);
494 c12s_S2 = gmx_simd_load1_r(ljc+sci2+STRIDE+2);
495 c12s_S3 = gmx_simd_load1_r(ljc+sci2+STRIDE+3);
498 nbfp0 = nbfp_ptr + type[sci ]*nbat->ntype*nbfp_stride;
499 nbfp1 = nbfp_ptr + type[sci+1]*nbat->ntype*nbfp_stride;
502 nbfp2 = nbfp_ptr + type[sci+2]*nbat->ntype*nbfp_stride;
503 nbfp3 = nbfp_ptr + type[sci+3]*nbat->ntype*nbfp_stride;
508 /* Zero the potential energy for this list */
509 Vvdwtot_S = gmx_simd_setzero_r();
510 vctot_S = gmx_simd_setzero_r();
512 /* Clear i atom forces */
513 fix_S0 = gmx_simd_setzero_r();
514 fix_S1 = gmx_simd_setzero_r();
515 fix_S2 = gmx_simd_setzero_r();
516 fix_S3 = gmx_simd_setzero_r();
517 fiy_S0 = gmx_simd_setzero_r();
518 fiy_S1 = gmx_simd_setzero_r();
519 fiy_S2 = gmx_simd_setzero_r();
520 fiy_S3 = gmx_simd_setzero_r();
521 fiz_S0 = gmx_simd_setzero_r();
522 fiz_S1 = gmx_simd_setzero_r();
523 fiz_S2 = gmx_simd_setzero_r();
524 fiz_S3 = gmx_simd_setzero_r();
528 /* Currently all kernels use (at least half) LJ */
535 while (cjind < cjind1 && nbl->cj[cjind].excl != NBNXN_INTERACTION_MASK_ALL)
537 #include "nbnxn_kernel_simd_4xn_inner.h"
541 for (; (cjind < cjind1); cjind++)
543 #include "nbnxn_kernel_simd_4xn_inner.h"
552 while (cjind < cjind1 && nbl->cj[cjind].excl != NBNXN_INTERACTION_MASK_ALL)
554 #include "nbnxn_kernel_simd_4xn_inner.h"
558 for (; (cjind < cjind1); cjind++)
560 #include "nbnxn_kernel_simd_4xn_inner.h"
567 while (cjind < cjind1 && nbl->cj[cjind].excl != NBNXN_INTERACTION_MASK_ALL)
569 #include "nbnxn_kernel_simd_4xn_inner.h"
573 for (; (cjind < cjind1); cjind++)
575 #include "nbnxn_kernel_simd_4xn_inner.h"
579 ninner += cjind1 - cjind0;
581 /* Add accumulated i-forces to the force array */
583 fix_S = gmx_mm_transpose_sum4_pr(fix_S0, fix_S1, fix_S2, fix_S3);
584 gmx_store_pr4(f+scix, gmx_add_pr4(fix_S, gmx_load_pr4(f+scix)));
586 fiy_S = gmx_mm_transpose_sum4_pr(fiy_S0, fiy_S1, fiy_S2, fiy_S3);
587 gmx_store_pr4(f+sciy, gmx_add_pr4(fiy_S, gmx_load_pr4(f+sciy)));
589 fiz_S = gmx_mm_transpose_sum4_pr(fiz_S0, fiz_S1, fiz_S2, fiz_S3);
590 gmx_store_pr4(f+sciz, gmx_add_pr4(fiz_S, gmx_load_pr4(f+sciz)));
592 #ifdef CALC_SHIFTFORCES
593 fshift[ish3+0] += gmx_sum_simd4(fix_S, shf);
594 fshift[ish3+1] += gmx_sum_simd4(fiy_S, shf);
595 fshift[ish3+2] += gmx_sum_simd4(fiz_S, shf);
598 fix0_S = gmx_mm_transpose_sum2_pr(fix_S0, fix_S1);
599 gmx_simd_store_r(f+scix, gmx_simd_add_r(fix0_S, gmx_simd_load_r(f+scix)));
600 fix2_S = gmx_mm_transpose_sum2_pr(fix_S2, fix_S3);
601 gmx_simd_store_r(f+scix+2, gmx_simd_add_r(fix2_S, gmx_simd_load_r(f+scix+2)));
603 fiy0_S = gmx_mm_transpose_sum2_pr(fiy_S0, fiy_S1);
604 gmx_simd_store_r(f+sciy, gmx_simd_add_r(fiy0_S, gmx_simd_load_r(f+sciy)));
605 fiy2_S = gmx_mm_transpose_sum2_pr(fiy_S2, fiy_S3);
606 gmx_simd_store_r(f+sciy+2, gmx_simd_add_r(fiy2_S, gmx_simd_load_r(f+sciy+2)));
608 fiz0_S = gmx_mm_transpose_sum2_pr(fiz_S0, fiz_S1);
609 gmx_simd_store_r(f+sciz, gmx_simd_add_r(fiz0_S, gmx_simd_load_r(f+sciz)));
610 fiz2_S = gmx_mm_transpose_sum2_pr(fiz_S2, fiz_S3);
611 gmx_simd_store_r(f+sciz+2, gmx_simd_add_r(fiz2_S, gmx_simd_load_r(f+sciz+2)));
613 #ifdef CALC_SHIFTFORCES
614 fshift[ish3+0] += gmx_sum_simd2(gmx_simd_add_r(fix0_S, fix2_S), shf);
615 fshift[ish3+1] += gmx_sum_simd2(gmx_simd_add_r(fiy0_S, fiy2_S), shf);
616 fshift[ish3+2] += gmx_sum_simd2(gmx_simd_add_r(fiz0_S, fiz2_S), shf);
623 *Vc += gmx_sum_simd(vctot_S, tmpsum);
626 *Vvdw += gmx_sum_simd(Vvdwtot_S, tmpsum);
629 /* Outer loop uses 6 flops/iteration */
633 printf("atom pairs %d\n", npair);