2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
37 /* Half-width SIMD operations are required here.
38 * As the 4xn kernels are the "standard" kernels and some special operations
39 * are required only here, we define those in nbnxn_kernel_simd_utils_...
41 * Half-width SIMD real type:
44 * Half-width SIMD operations
45 * Load reals at half-width aligned pointer b into half-width SIMD register a:
47 * Set all entries in half-width SIMD register *a to b:
49 * Load one real at b and one real at b+1 into halves of a, respectively:
50 * gmx_load1p1_pr(a, b)
51 * Load reals at half-width aligned pointer b into two halves of a:
53 * Store half-width SIMD register b into half width aligned memory a:
57 * Sum over 4 half SIMD registers:
59 * Sum the elements of halfs of each input register and store sums in out:
60 * gmx_mm_transpose_sum4h_pr(a, b)
61 * Extract two half-width registers *b, *c from a full width register a:
62 * gmx_pr_to_2hpr(a, b, c)
67 const nbnxn_ci_t *nbln;
68 const nbnxn_cj_t *l_cj;
75 gmx_bool do_LJ, half_LJ, do_coul;
76 int cjind0, cjind1, cjind;
80 int egps_ishift, egps_imask;
81 int egps_jshift, egps_jmask, egps_jstride;
83 real *vvdwtp[UNROLLI];
87 gmx_simd_real_t shX_S;
88 gmx_simd_real_t shY_S;
89 gmx_simd_real_t shZ_S;
90 gmx_simd_real_t ix_S0, iy_S0, iz_S0;
91 gmx_simd_real_t ix_S2, iy_S2, iz_S2;
92 gmx_simd_real_t fix_S0, fiy_S0, fiz_S0;
93 gmx_simd_real_t fix_S2, fiy_S2, fiz_S2;
94 /* We use an i-force SIMD register width of 4 */
95 /* The simd4 stuff might be defined in nbnxn_kernel_simd_utils.h */
96 gmx_simd4_real_t fix_S, fiy_S, fiz_S;
98 gmx_simd_real_t diagonal_jmi_S;
99 #if UNROLLI == UNROLLJ
100 gmx_simd_bool_t diagonal_mask_S0, diagonal_mask_S2;
102 gmx_simd_bool_t diagonal_mask0_S0, diagonal_mask0_S2;
103 gmx_simd_bool_t diagonal_mask1_S0, diagonal_mask1_S2;
106 unsigned *exclusion_filter;
107 gmx_exclfilter filter_S0, filter_S2;
109 gmx_simd_real_t zero_S = gmx_simd_set1_r(0.0);
111 gmx_simd_real_t one_S = gmx_simd_set1_r(1.0);
112 gmx_simd_real_t iq_S0 = gmx_simd_setzero_r();
113 gmx_simd_real_t iq_S2 = gmx_simd_setzero_r();
116 gmx_simd_real_t mrc_3_S;
118 gmx_simd_real_t hrc_3_S, moh_rc_S;
123 /* Coulomb table variables */
124 gmx_simd_real_t invtsp_S;
125 const real *tab_coul_F;
127 const real *tab_coul_V;
129 /* Thread-local working buffers for force and potential lookups */
130 int ti0_array[2*GMX_SIMD_REAL_WIDTH], *ti0 = NULL;
131 int ti2_array[2*GMX_SIMD_REAL_WIDTH], *ti2 = NULL;
133 gmx_simd_real_t mhalfsp_S;
137 #ifdef CALC_COUL_EWALD
138 gmx_simd_real_t beta2_S, beta_S;
141 #if defined CALC_ENERGIES && (defined CALC_COUL_EWALD || defined CALC_COUL_TAB)
142 gmx_simd_real_t sh_ewald_S;
145 #if defined LJ_CUT && defined CALC_ENERGIES
146 gmx_simd_real_t p6_cpot_S, p12_cpot_S;
149 gmx_simd_real_t rswitch_S;
150 gmx_simd_real_t swV3_S, swV4_S, swV5_S;
151 gmx_simd_real_t swF2_S, swF3_S, swF4_S;
153 #ifdef LJ_FORCE_SWITCH
154 gmx_simd_real_t rswitch_S;
155 gmx_simd_real_t p6_fc2_S, p6_fc3_S;
156 gmx_simd_real_t p12_fc2_S, p12_fc3_S;
158 gmx_simd_real_t p6_vc3_S, p6_vc4_S;
159 gmx_simd_real_t p12_vc3_S, p12_vc4_S;
160 gmx_simd_real_t p6_6cpot_S, p12_12cpot_S;
164 real lj_ewaldcoeff2, lj_ewaldcoeff6_6;
165 gmx_simd_real_t mone_S, half_S, lje_c2_S, lje_c6_6_S;
171 gmx_simd_real_t hsig_i_S0, seps_i_S0;
172 gmx_simd_real_t hsig_i_S2, seps_i_S2;
175 real pvdw_array[2*UNROLLI*UNROLLJ+GMX_SIMD_REAL_WIDTH];
176 real *pvdw_c6, *pvdw_c12;
179 #if defined LJ_COMB_GEOM || defined LJ_EWALD_GEOM
182 #endif /* LJ_COMB_LB */
184 gmx_simd_real_t avoid_sing_S;
185 gmx_simd_real_t rc2_S;
186 #ifdef VDW_CUTOFF_CHECK
187 gmx_simd_real_t rcvdw2_S;
196 #if defined LJ_COMB_GEOM || defined LJ_COMB_LB || defined LJ_EWALD_GEOM
199 #if !(defined LJ_COMB_GEOM || defined LJ_COMB_LB || defined FIX_LJ_C)
200 /* No combination rule used */
201 real *nbfp_ptr = (4 == nbfp_stride) ? nbat->nbfp_s4 : nbat->nbfp;
202 const int *type = nbat->type;
205 /* Load j-i for the first i */
206 diagonal_jmi_S = gmx_simd_load_r(nbat->simd_2xnn_diagonal_j_minus_i);
207 /* Generate all the diagonal masks as comparison results */
208 #if UNROLLI == UNROLLJ
209 diagonal_mask_S0 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
210 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
211 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
212 diagonal_mask_S2 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
214 #if 2*UNROLLI == UNROLLJ
215 diagonal_mask0_S0 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
216 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
217 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
218 diagonal_mask0_S2 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
219 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
220 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
221 diagonal_mask1_S0 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
222 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
223 diagonal_jmi_S = gmx_simd_sub_r(diagonal_jmi_S, one_S);
224 diagonal_mask1_S2 = gmx_simd_cmplt_r(zero_S, diagonal_jmi_S);
228 /* Load masks for topology exclusion masking. filter_stride is
229 static const, so the conditional will be optimized away. */
230 if (1 == filter_stride)
232 exclusion_filter = nbat->simd_exclusion_filter1;
234 else /* (2 == filter_stride) */
236 exclusion_filter = nbat->simd_exclusion_filter2;
239 /* Here we cast the exclusion filters from unsigned * to int * or real *.
240 * Since we only check bits, the actual value they represent does not
241 * matter, as long as both filter and mask data are treated the same way.
243 filter_S0 = gmx_load_exclusion_filter(exclusion_filter + 0*2*UNROLLJ*filter_stride);
244 filter_S2 = gmx_load_exclusion_filter(exclusion_filter + 1*2*UNROLLJ*filter_stride);
247 /* Reaction-field constants */
248 mrc_3_S = gmx_simd_set1_r(-2*ic->k_rf);
250 hrc_3_S = gmx_simd_set1_r(ic->k_rf);
251 moh_rc_S = gmx_simd_set1_r(-ic->c_rf);
256 /* Generate aligned table index pointers */
257 ti0 = prepare_table_load_buffer(ti0_array);
258 ti2 = prepare_table_load_buffer(ti2_array);
260 invtsp_S = gmx_simd_set1_r(ic->tabq_scale);
262 mhalfsp_S = gmx_simd_set1_r(-0.5/ic->tabq_scale);
266 tab_coul_F = ic->tabq_coul_FDV0;
268 tab_coul_F = ic->tabq_coul_F;
269 tab_coul_V = ic->tabq_coul_V;
271 #endif /* CALC_COUL_TAB */
273 #ifdef CALC_COUL_EWALD
274 beta2_S = gmx_simd_set1_r(ic->ewaldcoeff_q*ic->ewaldcoeff_q);
275 beta_S = gmx_simd_set1_r(ic->ewaldcoeff_q);
278 #if (defined CALC_COUL_TAB || defined CALC_COUL_EWALD) && defined CALC_ENERGIES
279 sh_ewald_S = gmx_simd_set1_r(ic->sh_ewald);
282 /* LJ function constants */
283 #if defined CALC_ENERGIES || defined LJ_POT_SWITCH
284 gmx_simd_real_t sixth_S = gmx_simd_set1_r(1.0/6.0);
285 gmx_simd_real_t twelveth_S = gmx_simd_set1_r(1.0/12.0);
288 #if defined LJ_CUT && defined CALC_ENERGIES
289 /* We shift the potential by cpot, which can be zero */
290 p6_cpot_S = gmx_simd_set1_r(ic->dispersion_shift.cpot);
291 p12_cpot_S = gmx_simd_set1_r(ic->repulsion_shift.cpot);
294 rswitch_S = gmx_simd_set1_r(ic->rvdw_switch);
295 swV3_S = gmx_simd_set1_r(ic->vdw_switch.c3);
296 swV4_S = gmx_simd_set1_r(ic->vdw_switch.c4);
297 swV5_S = gmx_simd_set1_r(ic->vdw_switch.c5);
298 swF2_S = gmx_simd_set1_r(3*ic->vdw_switch.c3);
299 swF3_S = gmx_simd_set1_r(4*ic->vdw_switch.c4);
300 swF4_S = gmx_simd_set1_r(5*ic->vdw_switch.c5);
302 #ifdef LJ_FORCE_SWITCH
303 rswitch_S = gmx_simd_set1_r(ic->rvdw_switch);
304 p6_fc2_S = gmx_simd_set1_r(ic->dispersion_shift.c2);
305 p6_fc3_S = gmx_simd_set1_r(ic->dispersion_shift.c3);
306 p12_fc2_S = gmx_simd_set1_r(ic->repulsion_shift.c2);
307 p12_fc3_S = gmx_simd_set1_r(ic->repulsion_shift.c3);
310 gmx_simd_real_t mthird_S = gmx_simd_set1_r(-1.0/3.0);
311 gmx_simd_real_t mfourth_S = gmx_simd_set1_r(-1.0/4.0);
313 p6_vc3_S = gmx_simd_mul_r(mthird_S, p6_fc2_S);
314 p6_vc4_S = gmx_simd_mul_r(mfourth_S, p6_fc3_S);
315 p6_6cpot_S = gmx_simd_set1_r(ic->dispersion_shift.cpot/6);
316 p12_vc3_S = gmx_simd_mul_r(mthird_S, p12_fc2_S);
317 p12_vc4_S = gmx_simd_mul_r(mfourth_S, p12_fc3_S);
318 p12_12cpot_S = gmx_simd_set1_r(ic->repulsion_shift.cpot/12);
323 mone_S = gmx_simd_set1_r(-1.0);
324 half_S = gmx_simd_set1_r(0.5);
325 lj_ewaldcoeff2 = ic->ewaldcoeff_lj*ic->ewaldcoeff_lj;
326 lj_ewaldcoeff6_6 = lj_ewaldcoeff2*lj_ewaldcoeff2*lj_ewaldcoeff2/6;
327 lje_c2_S = gmx_simd_set1_r(lj_ewaldcoeff2);
328 lje_c6_6_S = gmx_simd_set1_r(lj_ewaldcoeff6_6);
330 /* Determine the grid potential at the cut-off */
331 gmx_simd_real_t lje_vc_S = gmx_simd_set1_r(ic->sh_lj_ewald);
335 /* The kernel either supports rcoulomb = rvdw or rcoulomb >= rvdw */
336 rc2_S = gmx_simd_set1_r(ic->rcoulomb*ic->rcoulomb);
337 #ifdef VDW_CUTOFF_CHECK
338 rcvdw2_S = gmx_simd_set1_r(ic->rvdw*ic->rvdw);
341 avoid_sing_S = gmx_simd_set1_r(NBNXN_AVOID_SING_R2_INC);
345 shiftvec = shift_vec[0];
349 pvdw_c6 = gmx_simd_align_r(pvdw_array);
350 pvdw_c12 = pvdw_c6 + UNROLLI*UNROLLJ;
352 for (jp = 0; jp < UNROLLJ; jp++)
354 pvdw_c6 [0*UNROLLJ+jp] = nbat->nbfp[0*2];
355 pvdw_c6 [1*UNROLLJ+jp] = nbat->nbfp[0*2];
356 pvdw_c6 [2*UNROLLJ+jp] = nbat->nbfp[0*2];
357 pvdw_c6 [3*UNROLLJ+jp] = nbat->nbfp[0*2];
359 pvdw_c12[0*UNROLLJ+jp] = nbat->nbfp[0*2+1];
360 pvdw_c12[1*UNROLLJ+jp] = nbat->nbfp[0*2+1];
361 pvdw_c12[2*UNROLLJ+jp] = nbat->nbfp[0*2+1];
362 pvdw_c12[3*UNROLLJ+jp] = nbat->nbfp[0*2+1];
364 gmx_simd_real_t c6_S0 = gmx_simd_load_r(pvdw_c6 +0*UNROLLJ);
365 gmx_simd_real_t c6_S1 = gmx_simd_load_r(pvdw_c6 +1*UNROLLJ);
366 gmx_simd_real_t c6_S2 = gmx_simd_load_r(pvdw_c6 +2*UNROLLJ);
367 gmx_simd_real_t c6_S3 = gmx_simd_load_r(pvdw_c6 +3*UNROLLJ);
369 gmx_simd_real_t c12_S0 = gmx_simd_load_r(pvdw_c12+0*UNROLLJ);
370 gmx_simd_real_t c12_S1 = gmx_simd_load_r(pvdw_c12+1*UNROLLJ);
371 gmx_simd_real_t c12_S2 = gmx_simd_load_r(pvdw_c12+2*UNROLLJ);
372 gmx_simd_real_t c12_S3 = gmx_simd_load_r(pvdw_c12+3*UNROLLJ);
373 #endif /* FIX_LJ_C */
376 egps_ishift = nbat->neg_2log;
377 egps_imask = (1<<egps_ishift) - 1;
378 egps_jshift = 2*nbat->neg_2log;
379 egps_jmask = (1<<egps_jshift) - 1;
380 egps_jstride = (UNROLLJ>>1)*UNROLLJ;
381 /* Major division is over i-particle energy groups, determine the stride */
382 Vstride_i = nbat->nenergrp*(1<<nbat->neg_2log)*egps_jstride;
388 for (n = 0; n < nbl->nci; n++)
392 ish = (nbln->shift & NBNXN_CI_SHIFT);
394 cjind0 = nbln->cj_ind_start;
395 cjind1 = nbln->cj_ind_end;
397 ci_sh = (ish == CENTRAL ? ci : -1);
399 shX_S = gmx_simd_load1_r(shiftvec+ish3);
400 shY_S = gmx_simd_load1_r(shiftvec+ish3+1);
401 shZ_S = gmx_simd_load1_r(shiftvec+ish3+2);
406 #if defined LJ_COMB_LB || defined LJ_COMB_GEOM || defined LJ_EWALD_GEOM
410 int sci = (ci>>1)*STRIDE;
411 int scix = sci*DIM + (ci & 1)*(STRIDE>>1);
412 #if defined LJ_COMB_LB || defined LJ_COMB_GEOM || defined LJ_EWALD_GEOM
413 int sci2 = sci*2 + (ci & 1)*(STRIDE>>1);
415 sci += (ci & 1)*(STRIDE>>1);
418 /* We have 5 LJ/C combinations, but use only three inner loops,
419 * as the other combinations are unlikely and/or not much faster:
420 * inner half-LJ + C for half-LJ + C / no-LJ + C
421 * inner LJ + C for full-LJ + C
422 * inner LJ for full-LJ + no-C / half-LJ + no-C
424 do_LJ = (nbln->shift & NBNXN_CI_DO_LJ(0));
425 do_coul = (nbln->shift & NBNXN_CI_DO_COUL(0));
426 half_LJ = ((nbln->shift & NBNXN_CI_HALF_LJ(0)) || !do_LJ) && do_coul;
429 egps_i = nbat->energrp[ci];
433 for (ia = 0; ia < UNROLLI; ia++)
435 egp_ia = (egps_i >> (ia*egps_ishift)) & egps_imask;
436 vvdwtp[ia] = Vvdw + egp_ia*Vstride_i;
437 vctp[ia] = Vc + egp_ia*Vstride_i;
444 gmx_bool do_self = TRUE;
446 gmx_bool do_self = do_coul;
449 if (do_self && l_cj[nbln->cj_ind_start].cj == ci_sh)
452 if (do_self && l_cj[nbln->cj_ind_start].cj == (ci_sh>>1))
461 Vc_sub_self = 0.5*ic->c_rf;
465 Vc_sub_self = 0.5*tab_coul_F[2];
467 Vc_sub_self = 0.5*tab_coul_V[0];
470 #ifdef CALC_COUL_EWALD
472 Vc_sub_self = 0.5*ic->ewaldcoeff_q*M_2_SQRTPI;
475 for (ia = 0; ia < UNROLLI; ia++)
481 vctp[ia][((egps_i>>(ia*egps_ishift)) & egps_imask)*egps_jstride]
485 -= facel*qi*qi*Vc_sub_self;
493 for (ia = 0; ia < UNROLLI; ia++)
497 c6_i = nbat->nbfp[nbat->type[sci+ia]*(nbat->ntype + 1)*2]/6;
499 vvdwtp[ia][((egps_i>>(ia*egps_ishift)) & egps_imask)*egps_jstride]
503 += 0.5*c6_i*lj_ewaldcoeff6_6;
506 #endif /* LJ_EWALD */
510 /* Load i atom data */
511 int sciy = scix + STRIDE;
512 int sciz = sciy + STRIDE;
513 gmx_load1p1_pr(&ix_S0, x+scix);
514 gmx_load1p1_pr(&ix_S2, x+scix+2);
515 gmx_load1p1_pr(&iy_S0, x+sciy);
516 gmx_load1p1_pr(&iy_S2, x+sciy+2);
517 gmx_load1p1_pr(&iz_S0, x+sciz);
518 gmx_load1p1_pr(&iz_S2, x+sciz+2);
519 ix_S0 = gmx_simd_add_r(ix_S0, shX_S);
520 ix_S2 = gmx_simd_add_r(ix_S2, shX_S);
521 iy_S0 = gmx_simd_add_r(iy_S0, shY_S);
522 iy_S2 = gmx_simd_add_r(iy_S2, shY_S);
523 iz_S0 = gmx_simd_add_r(iz_S0, shZ_S);
524 iz_S2 = gmx_simd_add_r(iz_S2, shZ_S);
528 gmx_simd_real_t facel_S;
530 facel_S = gmx_simd_set1_r(facel);
532 gmx_load1p1_pr(&iq_S0, q+sci);
533 gmx_load1p1_pr(&iq_S2, q+sci+2);
534 iq_S0 = gmx_simd_mul_r(facel_S, iq_S0);
535 iq_S2 = gmx_simd_mul_r(facel_S, iq_S2);
539 gmx_load1p1_pr(&hsig_i_S0, ljc+sci2+0);
540 gmx_load1p1_pr(&hsig_i_S2, ljc+sci2+2);
541 gmx_load1p1_pr(&seps_i_S0, ljc+sci2+STRIDE+0);
542 gmx_load1p1_pr(&seps_i_S2, ljc+sci2+STRIDE+2);
545 gmx_simd_real_t c6s_S0, c12s_S0;
546 gmx_simd_real_t c6s_S2, c12s_S2;
548 gmx_load1p1_pr(&c6s_S0, ljc+sci2+0);
551 gmx_load1p1_pr(&c6s_S2, ljc+sci2+2);
553 gmx_load1p1_pr(&c12s_S0, ljc+sci2+STRIDE+0);
556 gmx_load1p1_pr(&c12s_S2, ljc+sci2+STRIDE+2);
558 #elif !defined LJ_COMB_LB && !defined FIX_LJ_C
559 const real *nbfp0 = nbfp_ptr + type[sci ]*nbat->ntype*nbfp_stride;
560 const real *nbfp1 = nbfp_ptr + type[sci+1]*nbat->ntype*nbfp_stride;
561 const real *nbfp2 = NULL, *nbfp3 = NULL;
564 nbfp2 = nbfp_ptr + type[sci+2]*nbat->ntype*nbfp_stride;
565 nbfp3 = nbfp_ptr + type[sci+3]*nbat->ntype*nbfp_stride;
570 /* We need the geometrically combined C6 for the PME grid correction */
571 gmx_simd_real_t c6s_S0, c6s_S2;
572 gmx_load1p1_pr(&c6s_S0, ljc+sci2+0);
575 gmx_load1p1_pr(&c6s_S2, ljc+sci2+2);
579 /* Zero the potential energy for this list */
581 gmx_simd_real_t Vvdwtot_S = gmx_simd_setzero_r();
582 gmx_simd_real_t vctot_S = gmx_simd_setzero_r();
585 /* Clear i atom forces */
586 fix_S0 = gmx_simd_setzero_r();
587 fix_S2 = gmx_simd_setzero_r();
588 fiy_S0 = gmx_simd_setzero_r();
589 fiy_S2 = gmx_simd_setzero_r();
590 fiz_S0 = gmx_simd_setzero_r();
591 fiz_S2 = gmx_simd_setzero_r();
595 /* Currently all kernels use (at least half) LJ */
599 /* Coulomb: all i-atoms, LJ: first half i-atoms */
603 while (cjind < cjind1 && nbl->cj[cjind].excl != NBNXN_INTERACTION_MASK_ALL)
605 #include "gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn_inner.h"
609 for (; (cjind < cjind1); cjind++)
611 #include "gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn_inner.h"
618 /* Coulomb: all i-atoms, LJ: all i-atoms */
621 while (cjind < cjind1 && nbl->cj[cjind].excl != NBNXN_INTERACTION_MASK_ALL)
623 #include "gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn_inner.h"
627 for (; (cjind < cjind1); cjind++)
629 #include "gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn_inner.h"
635 /* Coulomb: none, LJ: all i-atoms */
637 while (cjind < cjind1 && nbl->cj[cjind].excl != NBNXN_INTERACTION_MASK_ALL)
639 #include "gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn_inner.h"
643 for (; (cjind < cjind1); cjind++)
645 #include "gromacs/mdlib/nbnxn_kernels/simd_2xnn/nbnxn_kernel_simd_2xnn_inner.h"
649 ninner += cjind1 - cjind0;
651 /* Add accumulated i-forces to the force array */
652 fix_S = gmx_mm_transpose_sum4h_pr(fix_S0, fix_S2);
653 gmx_simd4_store_r(f+scix, gmx_simd4_add_r(fix_S, gmx_simd4_load_r(f+scix)));
655 fiy_S = gmx_mm_transpose_sum4h_pr(fiy_S0, fiy_S2);
656 gmx_simd4_store_r(f+sciy, gmx_simd4_add_r(fiy_S, gmx_simd4_load_r(f+sciy)));
658 fiz_S = gmx_mm_transpose_sum4h_pr(fiz_S0, fiz_S2);
659 gmx_simd4_store_r(f+sciz, gmx_simd4_add_r(fiz_S, gmx_simd4_load_r(f+sciz)));
661 #ifdef CALC_SHIFTFORCES
662 fshift[ish3+0] += gmx_simd4_reduce_r(fix_S);
663 fshift[ish3+1] += gmx_simd4_reduce_r(fiy_S);
664 fshift[ish3+2] += gmx_simd4_reduce_r(fiz_S);
670 *Vc += gmx_simd_reduce_r(vctot_S);
672 *Vvdw += gmx_simd_reduce_r(Vvdwtot_S);
675 /* Outer loop uses 6 flops/iteration */
679 printf("atom pairs %d\n", npair);