2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_single kernel generator.
44 #include "../nb_kernel.h"
45 #include "gromacs/gmxlib/nrnb.h"
47 #include "kernelutil_x86_avx_256_single.h"
50 * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwNone_GeomW4P1_VF_avx_256_single
51 * Electrostatics interaction: Ewald
52 * VdW interaction: None
53 * Geometry: Water4-Particle
54 * Calculate force/pot: PotentialAndForce
57 nb_kernel_ElecEwSw_VdwNone_GeomW4P1_VF_avx_256_single
58 (t_nblist * gmx_restrict nlist,
59 rvec * gmx_restrict xx,
60 rvec * gmx_restrict ff,
61 struct t_forcerec * gmx_restrict fr,
62 t_mdatoms * gmx_restrict mdatoms,
63 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
64 t_nrnb * gmx_restrict nrnb)
66 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
67 * just 0 for non-waters.
68 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
69 * jnr indices corresponding to data put in the four positions in the SIMD register.
71 int i_shift_offset,i_coord_offset,outeriter,inneriter;
72 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
73 int jnrA,jnrB,jnrC,jnrD;
74 int jnrE,jnrF,jnrG,jnrH;
75 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
76 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
77 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
78 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
79 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
81 real *shiftvec,*fshift,*x,*f;
82 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
84 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
85 real * vdwioffsetptr1;
86 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
87 real * vdwioffsetptr2;
88 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
89 real * vdwioffsetptr3;
90 __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
91 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
92 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
93 __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
94 __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
95 __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
96 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
99 __m128i ewitab_lo,ewitab_hi;
100 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
101 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
103 __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
104 real rswitch_scalar,d_scalar;
105 __m256 dummy_mask,cutoff_mask;
106 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
107 __m256 one = _mm256_set1_ps(1.0);
108 __m256 two = _mm256_set1_ps(2.0);
114 jindex = nlist->jindex;
116 shiftidx = nlist->shift;
118 shiftvec = fr->shift_vec[0];
119 fshift = fr->fshift[0];
120 facel = _mm256_set1_ps(fr->ic->epsfac);
121 charge = mdatoms->chargeA;
123 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
124 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
125 beta2 = _mm256_mul_ps(beta,beta);
126 beta3 = _mm256_mul_ps(beta,beta2);
128 ewtab = fr->ic->tabq_coul_FDV0;
129 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
130 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
132 /* Setup water-specific parameters */
133 inr = nlist->iinr[0];
134 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
135 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
136 iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
138 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
139 rcutoff_scalar = fr->ic->rcoulomb;
140 rcutoff = _mm256_set1_ps(rcutoff_scalar);
141 rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
143 rswitch_scalar = fr->ic->rcoulomb_switch;
144 rswitch = _mm256_set1_ps(rswitch_scalar);
145 /* Setup switch parameters */
146 d_scalar = rcutoff_scalar-rswitch_scalar;
147 d = _mm256_set1_ps(d_scalar);
148 swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
149 swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
150 swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
151 swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
152 swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
153 swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
155 /* Avoid stupid compiler warnings */
156 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
169 for(iidx=0;iidx<4*DIM;iidx++)
174 /* Start outer loop over neighborlists */
175 for(iidx=0; iidx<nri; iidx++)
177 /* Load shift vector for this list */
178 i_shift_offset = DIM*shiftidx[iidx];
180 /* Load limits for loop over neighbors */
181 j_index_start = jindex[iidx];
182 j_index_end = jindex[iidx+1];
184 /* Get outer coordinate index */
186 i_coord_offset = DIM*inr;
188 /* Load i particle coords and add shift vector */
189 gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
190 &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
192 fix1 = _mm256_setzero_ps();
193 fiy1 = _mm256_setzero_ps();
194 fiz1 = _mm256_setzero_ps();
195 fix2 = _mm256_setzero_ps();
196 fiy2 = _mm256_setzero_ps();
197 fiz2 = _mm256_setzero_ps();
198 fix3 = _mm256_setzero_ps();
199 fiy3 = _mm256_setzero_ps();
200 fiz3 = _mm256_setzero_ps();
202 /* Reset potential sums */
203 velecsum = _mm256_setzero_ps();
205 /* Start inner kernel loop */
206 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
209 /* Get j neighbor index, and coordinate index */
218 j_coord_offsetA = DIM*jnrA;
219 j_coord_offsetB = DIM*jnrB;
220 j_coord_offsetC = DIM*jnrC;
221 j_coord_offsetD = DIM*jnrD;
222 j_coord_offsetE = DIM*jnrE;
223 j_coord_offsetF = DIM*jnrF;
224 j_coord_offsetG = DIM*jnrG;
225 j_coord_offsetH = DIM*jnrH;
227 /* load j atom coordinates */
228 gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
229 x+j_coord_offsetC,x+j_coord_offsetD,
230 x+j_coord_offsetE,x+j_coord_offsetF,
231 x+j_coord_offsetG,x+j_coord_offsetH,
234 /* Calculate displacement vector */
235 dx10 = _mm256_sub_ps(ix1,jx0);
236 dy10 = _mm256_sub_ps(iy1,jy0);
237 dz10 = _mm256_sub_ps(iz1,jz0);
238 dx20 = _mm256_sub_ps(ix2,jx0);
239 dy20 = _mm256_sub_ps(iy2,jy0);
240 dz20 = _mm256_sub_ps(iz2,jz0);
241 dx30 = _mm256_sub_ps(ix3,jx0);
242 dy30 = _mm256_sub_ps(iy3,jy0);
243 dz30 = _mm256_sub_ps(iz3,jz0);
245 /* Calculate squared distance and things based on it */
246 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
247 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
248 rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
250 rinv10 = avx256_invsqrt_f(rsq10);
251 rinv20 = avx256_invsqrt_f(rsq20);
252 rinv30 = avx256_invsqrt_f(rsq30);
254 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
255 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
256 rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
258 /* Load parameters for j particles */
259 jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
260 charge+jnrC+0,charge+jnrD+0,
261 charge+jnrE+0,charge+jnrF+0,
262 charge+jnrG+0,charge+jnrH+0);
264 fjx0 = _mm256_setzero_ps();
265 fjy0 = _mm256_setzero_ps();
266 fjz0 = _mm256_setzero_ps();
268 /**************************
269 * CALCULATE INTERACTIONS *
270 **************************/
272 if (gmx_mm256_any_lt(rsq10,rcutoff2))
275 r10 = _mm256_mul_ps(rsq10,rinv10);
277 /* Compute parameters for interactions between i and j atoms */
278 qq10 = _mm256_mul_ps(iq1,jq0);
280 /* EWALD ELECTROSTATICS */
282 /* Analytical PME correction */
283 zeta2 = _mm256_mul_ps(beta2,rsq10);
284 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
285 pmecorrF = avx256_pmecorrF_f(zeta2);
286 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
287 felec = _mm256_mul_ps(qq10,felec);
288 pmecorrV = avx256_pmecorrV_f(zeta2);
289 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
290 velec = _mm256_sub_ps(rinv10,pmecorrV);
291 velec = _mm256_mul_ps(qq10,velec);
293 d = _mm256_sub_ps(r10,rswitch);
294 d = _mm256_max_ps(d,_mm256_setzero_ps());
295 d2 = _mm256_mul_ps(d,d);
296 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
298 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
300 /* Evaluate switch function */
301 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
302 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
303 velec = _mm256_mul_ps(velec,sw);
304 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
306 /* Update potential sum for this i atom from the interaction with this j atom. */
307 velec = _mm256_and_ps(velec,cutoff_mask);
308 velecsum = _mm256_add_ps(velecsum,velec);
312 fscal = _mm256_and_ps(fscal,cutoff_mask);
314 /* Calculate temporary vectorial force */
315 tx = _mm256_mul_ps(fscal,dx10);
316 ty = _mm256_mul_ps(fscal,dy10);
317 tz = _mm256_mul_ps(fscal,dz10);
319 /* Update vectorial force */
320 fix1 = _mm256_add_ps(fix1,tx);
321 fiy1 = _mm256_add_ps(fiy1,ty);
322 fiz1 = _mm256_add_ps(fiz1,tz);
324 fjx0 = _mm256_add_ps(fjx0,tx);
325 fjy0 = _mm256_add_ps(fjy0,ty);
326 fjz0 = _mm256_add_ps(fjz0,tz);
330 /**************************
331 * CALCULATE INTERACTIONS *
332 **************************/
334 if (gmx_mm256_any_lt(rsq20,rcutoff2))
337 r20 = _mm256_mul_ps(rsq20,rinv20);
339 /* Compute parameters for interactions between i and j atoms */
340 qq20 = _mm256_mul_ps(iq2,jq0);
342 /* EWALD ELECTROSTATICS */
344 /* Analytical PME correction */
345 zeta2 = _mm256_mul_ps(beta2,rsq20);
346 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
347 pmecorrF = avx256_pmecorrF_f(zeta2);
348 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
349 felec = _mm256_mul_ps(qq20,felec);
350 pmecorrV = avx256_pmecorrV_f(zeta2);
351 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
352 velec = _mm256_sub_ps(rinv20,pmecorrV);
353 velec = _mm256_mul_ps(qq20,velec);
355 d = _mm256_sub_ps(r20,rswitch);
356 d = _mm256_max_ps(d,_mm256_setzero_ps());
357 d2 = _mm256_mul_ps(d,d);
358 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
360 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
362 /* Evaluate switch function */
363 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
364 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
365 velec = _mm256_mul_ps(velec,sw);
366 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
368 /* Update potential sum for this i atom from the interaction with this j atom. */
369 velec = _mm256_and_ps(velec,cutoff_mask);
370 velecsum = _mm256_add_ps(velecsum,velec);
374 fscal = _mm256_and_ps(fscal,cutoff_mask);
376 /* Calculate temporary vectorial force */
377 tx = _mm256_mul_ps(fscal,dx20);
378 ty = _mm256_mul_ps(fscal,dy20);
379 tz = _mm256_mul_ps(fscal,dz20);
381 /* Update vectorial force */
382 fix2 = _mm256_add_ps(fix2,tx);
383 fiy2 = _mm256_add_ps(fiy2,ty);
384 fiz2 = _mm256_add_ps(fiz2,tz);
386 fjx0 = _mm256_add_ps(fjx0,tx);
387 fjy0 = _mm256_add_ps(fjy0,ty);
388 fjz0 = _mm256_add_ps(fjz0,tz);
392 /**************************
393 * CALCULATE INTERACTIONS *
394 **************************/
396 if (gmx_mm256_any_lt(rsq30,rcutoff2))
399 r30 = _mm256_mul_ps(rsq30,rinv30);
401 /* Compute parameters for interactions between i and j atoms */
402 qq30 = _mm256_mul_ps(iq3,jq0);
404 /* EWALD ELECTROSTATICS */
406 /* Analytical PME correction */
407 zeta2 = _mm256_mul_ps(beta2,rsq30);
408 rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
409 pmecorrF = avx256_pmecorrF_f(zeta2);
410 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
411 felec = _mm256_mul_ps(qq30,felec);
412 pmecorrV = avx256_pmecorrV_f(zeta2);
413 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
414 velec = _mm256_sub_ps(rinv30,pmecorrV);
415 velec = _mm256_mul_ps(qq30,velec);
417 d = _mm256_sub_ps(r30,rswitch);
418 d = _mm256_max_ps(d,_mm256_setzero_ps());
419 d2 = _mm256_mul_ps(d,d);
420 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
422 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
424 /* Evaluate switch function */
425 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
426 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
427 velec = _mm256_mul_ps(velec,sw);
428 cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
430 /* Update potential sum for this i atom from the interaction with this j atom. */
431 velec = _mm256_and_ps(velec,cutoff_mask);
432 velecsum = _mm256_add_ps(velecsum,velec);
436 fscal = _mm256_and_ps(fscal,cutoff_mask);
438 /* Calculate temporary vectorial force */
439 tx = _mm256_mul_ps(fscal,dx30);
440 ty = _mm256_mul_ps(fscal,dy30);
441 tz = _mm256_mul_ps(fscal,dz30);
443 /* Update vectorial force */
444 fix3 = _mm256_add_ps(fix3,tx);
445 fiy3 = _mm256_add_ps(fiy3,ty);
446 fiz3 = _mm256_add_ps(fiz3,tz);
448 fjx0 = _mm256_add_ps(fjx0,tx);
449 fjy0 = _mm256_add_ps(fjy0,ty);
450 fjz0 = _mm256_add_ps(fjz0,tz);
454 fjptrA = f+j_coord_offsetA;
455 fjptrB = f+j_coord_offsetB;
456 fjptrC = f+j_coord_offsetC;
457 fjptrD = f+j_coord_offsetD;
458 fjptrE = f+j_coord_offsetE;
459 fjptrF = f+j_coord_offsetF;
460 fjptrG = f+j_coord_offsetG;
461 fjptrH = f+j_coord_offsetH;
463 gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
465 /* Inner loop uses 327 flops */
471 /* Get j neighbor index, and coordinate index */
472 jnrlistA = jjnr[jidx];
473 jnrlistB = jjnr[jidx+1];
474 jnrlistC = jjnr[jidx+2];
475 jnrlistD = jjnr[jidx+3];
476 jnrlistE = jjnr[jidx+4];
477 jnrlistF = jjnr[jidx+5];
478 jnrlistG = jjnr[jidx+6];
479 jnrlistH = jjnr[jidx+7];
480 /* Sign of each element will be negative for non-real atoms.
481 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
482 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
484 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
485 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
487 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
488 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
489 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
490 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
491 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
492 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
493 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
494 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
495 j_coord_offsetA = DIM*jnrA;
496 j_coord_offsetB = DIM*jnrB;
497 j_coord_offsetC = DIM*jnrC;
498 j_coord_offsetD = DIM*jnrD;
499 j_coord_offsetE = DIM*jnrE;
500 j_coord_offsetF = DIM*jnrF;
501 j_coord_offsetG = DIM*jnrG;
502 j_coord_offsetH = DIM*jnrH;
504 /* load j atom coordinates */
505 gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
506 x+j_coord_offsetC,x+j_coord_offsetD,
507 x+j_coord_offsetE,x+j_coord_offsetF,
508 x+j_coord_offsetG,x+j_coord_offsetH,
511 /* Calculate displacement vector */
512 dx10 = _mm256_sub_ps(ix1,jx0);
513 dy10 = _mm256_sub_ps(iy1,jy0);
514 dz10 = _mm256_sub_ps(iz1,jz0);
515 dx20 = _mm256_sub_ps(ix2,jx0);
516 dy20 = _mm256_sub_ps(iy2,jy0);
517 dz20 = _mm256_sub_ps(iz2,jz0);
518 dx30 = _mm256_sub_ps(ix3,jx0);
519 dy30 = _mm256_sub_ps(iy3,jy0);
520 dz30 = _mm256_sub_ps(iz3,jz0);
522 /* Calculate squared distance and things based on it */
523 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
524 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
525 rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
527 rinv10 = avx256_invsqrt_f(rsq10);
528 rinv20 = avx256_invsqrt_f(rsq20);
529 rinv30 = avx256_invsqrt_f(rsq30);
531 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
532 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
533 rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
535 /* Load parameters for j particles */
536 jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
537 charge+jnrC+0,charge+jnrD+0,
538 charge+jnrE+0,charge+jnrF+0,
539 charge+jnrG+0,charge+jnrH+0);
541 fjx0 = _mm256_setzero_ps();
542 fjy0 = _mm256_setzero_ps();
543 fjz0 = _mm256_setzero_ps();
545 /**************************
546 * CALCULATE INTERACTIONS *
547 **************************/
549 if (gmx_mm256_any_lt(rsq10,rcutoff2))
552 r10 = _mm256_mul_ps(rsq10,rinv10);
553 r10 = _mm256_andnot_ps(dummy_mask,r10);
555 /* Compute parameters for interactions between i and j atoms */
556 qq10 = _mm256_mul_ps(iq1,jq0);
558 /* EWALD ELECTROSTATICS */
560 /* Analytical PME correction */
561 zeta2 = _mm256_mul_ps(beta2,rsq10);
562 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
563 pmecorrF = avx256_pmecorrF_f(zeta2);
564 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
565 felec = _mm256_mul_ps(qq10,felec);
566 pmecorrV = avx256_pmecorrV_f(zeta2);
567 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
568 velec = _mm256_sub_ps(rinv10,pmecorrV);
569 velec = _mm256_mul_ps(qq10,velec);
571 d = _mm256_sub_ps(r10,rswitch);
572 d = _mm256_max_ps(d,_mm256_setzero_ps());
573 d2 = _mm256_mul_ps(d,d);
574 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
576 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
578 /* Evaluate switch function */
579 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
580 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
581 velec = _mm256_mul_ps(velec,sw);
582 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
584 /* Update potential sum for this i atom from the interaction with this j atom. */
585 velec = _mm256_and_ps(velec,cutoff_mask);
586 velec = _mm256_andnot_ps(dummy_mask,velec);
587 velecsum = _mm256_add_ps(velecsum,velec);
591 fscal = _mm256_and_ps(fscal,cutoff_mask);
593 fscal = _mm256_andnot_ps(dummy_mask,fscal);
595 /* Calculate temporary vectorial force */
596 tx = _mm256_mul_ps(fscal,dx10);
597 ty = _mm256_mul_ps(fscal,dy10);
598 tz = _mm256_mul_ps(fscal,dz10);
600 /* Update vectorial force */
601 fix1 = _mm256_add_ps(fix1,tx);
602 fiy1 = _mm256_add_ps(fiy1,ty);
603 fiz1 = _mm256_add_ps(fiz1,tz);
605 fjx0 = _mm256_add_ps(fjx0,tx);
606 fjy0 = _mm256_add_ps(fjy0,ty);
607 fjz0 = _mm256_add_ps(fjz0,tz);
611 /**************************
612 * CALCULATE INTERACTIONS *
613 **************************/
615 if (gmx_mm256_any_lt(rsq20,rcutoff2))
618 r20 = _mm256_mul_ps(rsq20,rinv20);
619 r20 = _mm256_andnot_ps(dummy_mask,r20);
621 /* Compute parameters for interactions between i and j atoms */
622 qq20 = _mm256_mul_ps(iq2,jq0);
624 /* EWALD ELECTROSTATICS */
626 /* Analytical PME correction */
627 zeta2 = _mm256_mul_ps(beta2,rsq20);
628 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
629 pmecorrF = avx256_pmecorrF_f(zeta2);
630 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
631 felec = _mm256_mul_ps(qq20,felec);
632 pmecorrV = avx256_pmecorrV_f(zeta2);
633 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
634 velec = _mm256_sub_ps(rinv20,pmecorrV);
635 velec = _mm256_mul_ps(qq20,velec);
637 d = _mm256_sub_ps(r20,rswitch);
638 d = _mm256_max_ps(d,_mm256_setzero_ps());
639 d2 = _mm256_mul_ps(d,d);
640 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
642 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
644 /* Evaluate switch function */
645 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
646 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
647 velec = _mm256_mul_ps(velec,sw);
648 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
650 /* Update potential sum for this i atom from the interaction with this j atom. */
651 velec = _mm256_and_ps(velec,cutoff_mask);
652 velec = _mm256_andnot_ps(dummy_mask,velec);
653 velecsum = _mm256_add_ps(velecsum,velec);
657 fscal = _mm256_and_ps(fscal,cutoff_mask);
659 fscal = _mm256_andnot_ps(dummy_mask,fscal);
661 /* Calculate temporary vectorial force */
662 tx = _mm256_mul_ps(fscal,dx20);
663 ty = _mm256_mul_ps(fscal,dy20);
664 tz = _mm256_mul_ps(fscal,dz20);
666 /* Update vectorial force */
667 fix2 = _mm256_add_ps(fix2,tx);
668 fiy2 = _mm256_add_ps(fiy2,ty);
669 fiz2 = _mm256_add_ps(fiz2,tz);
671 fjx0 = _mm256_add_ps(fjx0,tx);
672 fjy0 = _mm256_add_ps(fjy0,ty);
673 fjz0 = _mm256_add_ps(fjz0,tz);
677 /**************************
678 * CALCULATE INTERACTIONS *
679 **************************/
681 if (gmx_mm256_any_lt(rsq30,rcutoff2))
684 r30 = _mm256_mul_ps(rsq30,rinv30);
685 r30 = _mm256_andnot_ps(dummy_mask,r30);
687 /* Compute parameters for interactions between i and j atoms */
688 qq30 = _mm256_mul_ps(iq3,jq0);
690 /* EWALD ELECTROSTATICS */
692 /* Analytical PME correction */
693 zeta2 = _mm256_mul_ps(beta2,rsq30);
694 rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
695 pmecorrF = avx256_pmecorrF_f(zeta2);
696 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
697 felec = _mm256_mul_ps(qq30,felec);
698 pmecorrV = avx256_pmecorrV_f(zeta2);
699 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
700 velec = _mm256_sub_ps(rinv30,pmecorrV);
701 velec = _mm256_mul_ps(qq30,velec);
703 d = _mm256_sub_ps(r30,rswitch);
704 d = _mm256_max_ps(d,_mm256_setzero_ps());
705 d2 = _mm256_mul_ps(d,d);
706 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
708 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
710 /* Evaluate switch function */
711 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
712 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
713 velec = _mm256_mul_ps(velec,sw);
714 cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
716 /* Update potential sum for this i atom from the interaction with this j atom. */
717 velec = _mm256_and_ps(velec,cutoff_mask);
718 velec = _mm256_andnot_ps(dummy_mask,velec);
719 velecsum = _mm256_add_ps(velecsum,velec);
723 fscal = _mm256_and_ps(fscal,cutoff_mask);
725 fscal = _mm256_andnot_ps(dummy_mask,fscal);
727 /* Calculate temporary vectorial force */
728 tx = _mm256_mul_ps(fscal,dx30);
729 ty = _mm256_mul_ps(fscal,dy30);
730 tz = _mm256_mul_ps(fscal,dz30);
732 /* Update vectorial force */
733 fix3 = _mm256_add_ps(fix3,tx);
734 fiy3 = _mm256_add_ps(fiy3,ty);
735 fiz3 = _mm256_add_ps(fiz3,tz);
737 fjx0 = _mm256_add_ps(fjx0,tx);
738 fjy0 = _mm256_add_ps(fjy0,ty);
739 fjz0 = _mm256_add_ps(fjz0,tz);
743 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
744 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
745 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
746 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
747 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
748 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
749 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
750 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
752 gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
754 /* Inner loop uses 330 flops */
757 /* End of innermost loop */
759 gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
760 f+i_coord_offset+DIM,fshift+i_shift_offset);
763 /* Update potential energies */
764 gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
766 /* Increment number of inner iterations */
767 inneriter += j_index_end - j_index_start;
769 /* Outer loop uses 19 flops */
772 /* Increment number of outer iterations */
775 /* Update outer/inner flops */
777 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_VF,outeriter*19 + inneriter*330);
780 * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwNone_GeomW4P1_F_avx_256_single
781 * Electrostatics interaction: Ewald
782 * VdW interaction: None
783 * Geometry: Water4-Particle
784 * Calculate force/pot: Force
787 nb_kernel_ElecEwSw_VdwNone_GeomW4P1_F_avx_256_single
788 (t_nblist * gmx_restrict nlist,
789 rvec * gmx_restrict xx,
790 rvec * gmx_restrict ff,
791 struct t_forcerec * gmx_restrict fr,
792 t_mdatoms * gmx_restrict mdatoms,
793 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
794 t_nrnb * gmx_restrict nrnb)
796 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
797 * just 0 for non-waters.
798 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
799 * jnr indices corresponding to data put in the four positions in the SIMD register.
801 int i_shift_offset,i_coord_offset,outeriter,inneriter;
802 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
803 int jnrA,jnrB,jnrC,jnrD;
804 int jnrE,jnrF,jnrG,jnrH;
805 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
806 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
807 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
808 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
809 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
811 real *shiftvec,*fshift,*x,*f;
812 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
814 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
815 real * vdwioffsetptr1;
816 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
817 real * vdwioffsetptr2;
818 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
819 real * vdwioffsetptr3;
820 __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
821 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
822 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
823 __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
824 __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
825 __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
826 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
829 __m128i ewitab_lo,ewitab_hi;
830 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
831 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
833 __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
834 real rswitch_scalar,d_scalar;
835 __m256 dummy_mask,cutoff_mask;
836 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
837 __m256 one = _mm256_set1_ps(1.0);
838 __m256 two = _mm256_set1_ps(2.0);
844 jindex = nlist->jindex;
846 shiftidx = nlist->shift;
848 shiftvec = fr->shift_vec[0];
849 fshift = fr->fshift[0];
850 facel = _mm256_set1_ps(fr->ic->epsfac);
851 charge = mdatoms->chargeA;
853 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
854 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
855 beta2 = _mm256_mul_ps(beta,beta);
856 beta3 = _mm256_mul_ps(beta,beta2);
858 ewtab = fr->ic->tabq_coul_FDV0;
859 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
860 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
862 /* Setup water-specific parameters */
863 inr = nlist->iinr[0];
864 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
865 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
866 iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
868 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
869 rcutoff_scalar = fr->ic->rcoulomb;
870 rcutoff = _mm256_set1_ps(rcutoff_scalar);
871 rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
873 rswitch_scalar = fr->ic->rcoulomb_switch;
874 rswitch = _mm256_set1_ps(rswitch_scalar);
875 /* Setup switch parameters */
876 d_scalar = rcutoff_scalar-rswitch_scalar;
877 d = _mm256_set1_ps(d_scalar);
878 swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
879 swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
880 swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
881 swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
882 swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
883 swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
885 /* Avoid stupid compiler warnings */
886 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
899 for(iidx=0;iidx<4*DIM;iidx++)
904 /* Start outer loop over neighborlists */
905 for(iidx=0; iidx<nri; iidx++)
907 /* Load shift vector for this list */
908 i_shift_offset = DIM*shiftidx[iidx];
910 /* Load limits for loop over neighbors */
911 j_index_start = jindex[iidx];
912 j_index_end = jindex[iidx+1];
914 /* Get outer coordinate index */
916 i_coord_offset = DIM*inr;
918 /* Load i particle coords and add shift vector */
919 gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
920 &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
922 fix1 = _mm256_setzero_ps();
923 fiy1 = _mm256_setzero_ps();
924 fiz1 = _mm256_setzero_ps();
925 fix2 = _mm256_setzero_ps();
926 fiy2 = _mm256_setzero_ps();
927 fiz2 = _mm256_setzero_ps();
928 fix3 = _mm256_setzero_ps();
929 fiy3 = _mm256_setzero_ps();
930 fiz3 = _mm256_setzero_ps();
932 /* Start inner kernel loop */
933 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
936 /* Get j neighbor index, and coordinate index */
945 j_coord_offsetA = DIM*jnrA;
946 j_coord_offsetB = DIM*jnrB;
947 j_coord_offsetC = DIM*jnrC;
948 j_coord_offsetD = DIM*jnrD;
949 j_coord_offsetE = DIM*jnrE;
950 j_coord_offsetF = DIM*jnrF;
951 j_coord_offsetG = DIM*jnrG;
952 j_coord_offsetH = DIM*jnrH;
954 /* load j atom coordinates */
955 gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
956 x+j_coord_offsetC,x+j_coord_offsetD,
957 x+j_coord_offsetE,x+j_coord_offsetF,
958 x+j_coord_offsetG,x+j_coord_offsetH,
961 /* Calculate displacement vector */
962 dx10 = _mm256_sub_ps(ix1,jx0);
963 dy10 = _mm256_sub_ps(iy1,jy0);
964 dz10 = _mm256_sub_ps(iz1,jz0);
965 dx20 = _mm256_sub_ps(ix2,jx0);
966 dy20 = _mm256_sub_ps(iy2,jy0);
967 dz20 = _mm256_sub_ps(iz2,jz0);
968 dx30 = _mm256_sub_ps(ix3,jx0);
969 dy30 = _mm256_sub_ps(iy3,jy0);
970 dz30 = _mm256_sub_ps(iz3,jz0);
972 /* Calculate squared distance and things based on it */
973 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
974 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
975 rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
977 rinv10 = avx256_invsqrt_f(rsq10);
978 rinv20 = avx256_invsqrt_f(rsq20);
979 rinv30 = avx256_invsqrt_f(rsq30);
981 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
982 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
983 rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
985 /* Load parameters for j particles */
986 jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
987 charge+jnrC+0,charge+jnrD+0,
988 charge+jnrE+0,charge+jnrF+0,
989 charge+jnrG+0,charge+jnrH+0);
991 fjx0 = _mm256_setzero_ps();
992 fjy0 = _mm256_setzero_ps();
993 fjz0 = _mm256_setzero_ps();
995 /**************************
996 * CALCULATE INTERACTIONS *
997 **************************/
999 if (gmx_mm256_any_lt(rsq10,rcutoff2))
1002 r10 = _mm256_mul_ps(rsq10,rinv10);
1004 /* Compute parameters for interactions between i and j atoms */
1005 qq10 = _mm256_mul_ps(iq1,jq0);
1007 /* EWALD ELECTROSTATICS */
1009 /* Analytical PME correction */
1010 zeta2 = _mm256_mul_ps(beta2,rsq10);
1011 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
1012 pmecorrF = avx256_pmecorrF_f(zeta2);
1013 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1014 felec = _mm256_mul_ps(qq10,felec);
1015 pmecorrV = avx256_pmecorrV_f(zeta2);
1016 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1017 velec = _mm256_sub_ps(rinv10,pmecorrV);
1018 velec = _mm256_mul_ps(qq10,velec);
1020 d = _mm256_sub_ps(r10,rswitch);
1021 d = _mm256_max_ps(d,_mm256_setzero_ps());
1022 d2 = _mm256_mul_ps(d,d);
1023 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1025 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1027 /* Evaluate switch function */
1028 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1029 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
1030 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1034 fscal = _mm256_and_ps(fscal,cutoff_mask);
1036 /* Calculate temporary vectorial force */
1037 tx = _mm256_mul_ps(fscal,dx10);
1038 ty = _mm256_mul_ps(fscal,dy10);
1039 tz = _mm256_mul_ps(fscal,dz10);
1041 /* Update vectorial force */
1042 fix1 = _mm256_add_ps(fix1,tx);
1043 fiy1 = _mm256_add_ps(fiy1,ty);
1044 fiz1 = _mm256_add_ps(fiz1,tz);
1046 fjx0 = _mm256_add_ps(fjx0,tx);
1047 fjy0 = _mm256_add_ps(fjy0,ty);
1048 fjz0 = _mm256_add_ps(fjz0,tz);
1052 /**************************
1053 * CALCULATE INTERACTIONS *
1054 **************************/
1056 if (gmx_mm256_any_lt(rsq20,rcutoff2))
1059 r20 = _mm256_mul_ps(rsq20,rinv20);
1061 /* Compute parameters for interactions between i and j atoms */
1062 qq20 = _mm256_mul_ps(iq2,jq0);
1064 /* EWALD ELECTROSTATICS */
1066 /* Analytical PME correction */
1067 zeta2 = _mm256_mul_ps(beta2,rsq20);
1068 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
1069 pmecorrF = avx256_pmecorrF_f(zeta2);
1070 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1071 felec = _mm256_mul_ps(qq20,felec);
1072 pmecorrV = avx256_pmecorrV_f(zeta2);
1073 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1074 velec = _mm256_sub_ps(rinv20,pmecorrV);
1075 velec = _mm256_mul_ps(qq20,velec);
1077 d = _mm256_sub_ps(r20,rswitch);
1078 d = _mm256_max_ps(d,_mm256_setzero_ps());
1079 d2 = _mm256_mul_ps(d,d);
1080 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1082 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1084 /* Evaluate switch function */
1085 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1086 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
1087 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1091 fscal = _mm256_and_ps(fscal,cutoff_mask);
1093 /* Calculate temporary vectorial force */
1094 tx = _mm256_mul_ps(fscal,dx20);
1095 ty = _mm256_mul_ps(fscal,dy20);
1096 tz = _mm256_mul_ps(fscal,dz20);
1098 /* Update vectorial force */
1099 fix2 = _mm256_add_ps(fix2,tx);
1100 fiy2 = _mm256_add_ps(fiy2,ty);
1101 fiz2 = _mm256_add_ps(fiz2,tz);
1103 fjx0 = _mm256_add_ps(fjx0,tx);
1104 fjy0 = _mm256_add_ps(fjy0,ty);
1105 fjz0 = _mm256_add_ps(fjz0,tz);
1109 /**************************
1110 * CALCULATE INTERACTIONS *
1111 **************************/
1113 if (gmx_mm256_any_lt(rsq30,rcutoff2))
1116 r30 = _mm256_mul_ps(rsq30,rinv30);
1118 /* Compute parameters for interactions between i and j atoms */
1119 qq30 = _mm256_mul_ps(iq3,jq0);
1121 /* EWALD ELECTROSTATICS */
1123 /* Analytical PME correction */
1124 zeta2 = _mm256_mul_ps(beta2,rsq30);
1125 rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
1126 pmecorrF = avx256_pmecorrF_f(zeta2);
1127 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1128 felec = _mm256_mul_ps(qq30,felec);
1129 pmecorrV = avx256_pmecorrV_f(zeta2);
1130 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1131 velec = _mm256_sub_ps(rinv30,pmecorrV);
1132 velec = _mm256_mul_ps(qq30,velec);
1134 d = _mm256_sub_ps(r30,rswitch);
1135 d = _mm256_max_ps(d,_mm256_setzero_ps());
1136 d2 = _mm256_mul_ps(d,d);
1137 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1139 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1141 /* Evaluate switch function */
1142 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1143 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
1144 cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
1148 fscal = _mm256_and_ps(fscal,cutoff_mask);
1150 /* Calculate temporary vectorial force */
1151 tx = _mm256_mul_ps(fscal,dx30);
1152 ty = _mm256_mul_ps(fscal,dy30);
1153 tz = _mm256_mul_ps(fscal,dz30);
1155 /* Update vectorial force */
1156 fix3 = _mm256_add_ps(fix3,tx);
1157 fiy3 = _mm256_add_ps(fiy3,ty);
1158 fiz3 = _mm256_add_ps(fiz3,tz);
1160 fjx0 = _mm256_add_ps(fjx0,tx);
1161 fjy0 = _mm256_add_ps(fjy0,ty);
1162 fjz0 = _mm256_add_ps(fjz0,tz);
1166 fjptrA = f+j_coord_offsetA;
1167 fjptrB = f+j_coord_offsetB;
1168 fjptrC = f+j_coord_offsetC;
1169 fjptrD = f+j_coord_offsetD;
1170 fjptrE = f+j_coord_offsetE;
1171 fjptrF = f+j_coord_offsetF;
1172 fjptrG = f+j_coord_offsetG;
1173 fjptrH = f+j_coord_offsetH;
1175 gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1177 /* Inner loop uses 318 flops */
1180 if(jidx<j_index_end)
1183 /* Get j neighbor index, and coordinate index */
1184 jnrlistA = jjnr[jidx];
1185 jnrlistB = jjnr[jidx+1];
1186 jnrlistC = jjnr[jidx+2];
1187 jnrlistD = jjnr[jidx+3];
1188 jnrlistE = jjnr[jidx+4];
1189 jnrlistF = jjnr[jidx+5];
1190 jnrlistG = jjnr[jidx+6];
1191 jnrlistH = jjnr[jidx+7];
1192 /* Sign of each element will be negative for non-real atoms.
1193 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1194 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
1196 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
1197 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
1199 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
1200 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
1201 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
1202 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
1203 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
1204 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
1205 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
1206 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
1207 j_coord_offsetA = DIM*jnrA;
1208 j_coord_offsetB = DIM*jnrB;
1209 j_coord_offsetC = DIM*jnrC;
1210 j_coord_offsetD = DIM*jnrD;
1211 j_coord_offsetE = DIM*jnrE;
1212 j_coord_offsetF = DIM*jnrF;
1213 j_coord_offsetG = DIM*jnrG;
1214 j_coord_offsetH = DIM*jnrH;
1216 /* load j atom coordinates */
1217 gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1218 x+j_coord_offsetC,x+j_coord_offsetD,
1219 x+j_coord_offsetE,x+j_coord_offsetF,
1220 x+j_coord_offsetG,x+j_coord_offsetH,
1223 /* Calculate displacement vector */
1224 dx10 = _mm256_sub_ps(ix1,jx0);
1225 dy10 = _mm256_sub_ps(iy1,jy0);
1226 dz10 = _mm256_sub_ps(iz1,jz0);
1227 dx20 = _mm256_sub_ps(ix2,jx0);
1228 dy20 = _mm256_sub_ps(iy2,jy0);
1229 dz20 = _mm256_sub_ps(iz2,jz0);
1230 dx30 = _mm256_sub_ps(ix3,jx0);
1231 dy30 = _mm256_sub_ps(iy3,jy0);
1232 dz30 = _mm256_sub_ps(iz3,jz0);
1234 /* Calculate squared distance and things based on it */
1235 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1236 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1237 rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
1239 rinv10 = avx256_invsqrt_f(rsq10);
1240 rinv20 = avx256_invsqrt_f(rsq20);
1241 rinv30 = avx256_invsqrt_f(rsq30);
1243 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
1244 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
1245 rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
1247 /* Load parameters for j particles */
1248 jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
1249 charge+jnrC+0,charge+jnrD+0,
1250 charge+jnrE+0,charge+jnrF+0,
1251 charge+jnrG+0,charge+jnrH+0);
1253 fjx0 = _mm256_setzero_ps();
1254 fjy0 = _mm256_setzero_ps();
1255 fjz0 = _mm256_setzero_ps();
1257 /**************************
1258 * CALCULATE INTERACTIONS *
1259 **************************/
1261 if (gmx_mm256_any_lt(rsq10,rcutoff2))
1264 r10 = _mm256_mul_ps(rsq10,rinv10);
1265 r10 = _mm256_andnot_ps(dummy_mask,r10);
1267 /* Compute parameters for interactions between i and j atoms */
1268 qq10 = _mm256_mul_ps(iq1,jq0);
1270 /* EWALD ELECTROSTATICS */
1272 /* Analytical PME correction */
1273 zeta2 = _mm256_mul_ps(beta2,rsq10);
1274 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
1275 pmecorrF = avx256_pmecorrF_f(zeta2);
1276 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1277 felec = _mm256_mul_ps(qq10,felec);
1278 pmecorrV = avx256_pmecorrV_f(zeta2);
1279 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1280 velec = _mm256_sub_ps(rinv10,pmecorrV);
1281 velec = _mm256_mul_ps(qq10,velec);
1283 d = _mm256_sub_ps(r10,rswitch);
1284 d = _mm256_max_ps(d,_mm256_setzero_ps());
1285 d2 = _mm256_mul_ps(d,d);
1286 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1288 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1290 /* Evaluate switch function */
1291 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1292 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
1293 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1297 fscal = _mm256_and_ps(fscal,cutoff_mask);
1299 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1301 /* Calculate temporary vectorial force */
1302 tx = _mm256_mul_ps(fscal,dx10);
1303 ty = _mm256_mul_ps(fscal,dy10);
1304 tz = _mm256_mul_ps(fscal,dz10);
1306 /* Update vectorial force */
1307 fix1 = _mm256_add_ps(fix1,tx);
1308 fiy1 = _mm256_add_ps(fiy1,ty);
1309 fiz1 = _mm256_add_ps(fiz1,tz);
1311 fjx0 = _mm256_add_ps(fjx0,tx);
1312 fjy0 = _mm256_add_ps(fjy0,ty);
1313 fjz0 = _mm256_add_ps(fjz0,tz);
1317 /**************************
1318 * CALCULATE INTERACTIONS *
1319 **************************/
1321 if (gmx_mm256_any_lt(rsq20,rcutoff2))
1324 r20 = _mm256_mul_ps(rsq20,rinv20);
1325 r20 = _mm256_andnot_ps(dummy_mask,r20);
1327 /* Compute parameters for interactions between i and j atoms */
1328 qq20 = _mm256_mul_ps(iq2,jq0);
1330 /* EWALD ELECTROSTATICS */
1332 /* Analytical PME correction */
1333 zeta2 = _mm256_mul_ps(beta2,rsq20);
1334 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
1335 pmecorrF = avx256_pmecorrF_f(zeta2);
1336 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1337 felec = _mm256_mul_ps(qq20,felec);
1338 pmecorrV = avx256_pmecorrV_f(zeta2);
1339 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1340 velec = _mm256_sub_ps(rinv20,pmecorrV);
1341 velec = _mm256_mul_ps(qq20,velec);
1343 d = _mm256_sub_ps(r20,rswitch);
1344 d = _mm256_max_ps(d,_mm256_setzero_ps());
1345 d2 = _mm256_mul_ps(d,d);
1346 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1348 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1350 /* Evaluate switch function */
1351 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1352 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
1353 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1357 fscal = _mm256_and_ps(fscal,cutoff_mask);
1359 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1361 /* Calculate temporary vectorial force */
1362 tx = _mm256_mul_ps(fscal,dx20);
1363 ty = _mm256_mul_ps(fscal,dy20);
1364 tz = _mm256_mul_ps(fscal,dz20);
1366 /* Update vectorial force */
1367 fix2 = _mm256_add_ps(fix2,tx);
1368 fiy2 = _mm256_add_ps(fiy2,ty);
1369 fiz2 = _mm256_add_ps(fiz2,tz);
1371 fjx0 = _mm256_add_ps(fjx0,tx);
1372 fjy0 = _mm256_add_ps(fjy0,ty);
1373 fjz0 = _mm256_add_ps(fjz0,tz);
1377 /**************************
1378 * CALCULATE INTERACTIONS *
1379 **************************/
1381 if (gmx_mm256_any_lt(rsq30,rcutoff2))
1384 r30 = _mm256_mul_ps(rsq30,rinv30);
1385 r30 = _mm256_andnot_ps(dummy_mask,r30);
1387 /* Compute parameters for interactions between i and j atoms */
1388 qq30 = _mm256_mul_ps(iq3,jq0);
1390 /* EWALD ELECTROSTATICS */
1392 /* Analytical PME correction */
1393 zeta2 = _mm256_mul_ps(beta2,rsq30);
1394 rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
1395 pmecorrF = avx256_pmecorrF_f(zeta2);
1396 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1397 felec = _mm256_mul_ps(qq30,felec);
1398 pmecorrV = avx256_pmecorrV_f(zeta2);
1399 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1400 velec = _mm256_sub_ps(rinv30,pmecorrV);
1401 velec = _mm256_mul_ps(qq30,velec);
1403 d = _mm256_sub_ps(r30,rswitch);
1404 d = _mm256_max_ps(d,_mm256_setzero_ps());
1405 d2 = _mm256_mul_ps(d,d);
1406 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1408 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1410 /* Evaluate switch function */
1411 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1412 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
1413 cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
1417 fscal = _mm256_and_ps(fscal,cutoff_mask);
1419 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1421 /* Calculate temporary vectorial force */
1422 tx = _mm256_mul_ps(fscal,dx30);
1423 ty = _mm256_mul_ps(fscal,dy30);
1424 tz = _mm256_mul_ps(fscal,dz30);
1426 /* Update vectorial force */
1427 fix3 = _mm256_add_ps(fix3,tx);
1428 fiy3 = _mm256_add_ps(fiy3,ty);
1429 fiz3 = _mm256_add_ps(fiz3,tz);
1431 fjx0 = _mm256_add_ps(fjx0,tx);
1432 fjy0 = _mm256_add_ps(fjy0,ty);
1433 fjz0 = _mm256_add_ps(fjz0,tz);
1437 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1438 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1439 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1440 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1441 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1442 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1443 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1444 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1446 gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1448 /* Inner loop uses 321 flops */
1451 /* End of innermost loop */
1453 gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1454 f+i_coord_offset+DIM,fshift+i_shift_offset);
1456 /* Increment number of inner iterations */
1457 inneriter += j_index_end - j_index_start;
1459 /* Outer loop uses 18 flops */
1462 /* Increment number of outer iterations */
1465 /* Update outer/inner flops */
1467 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4_F,outeriter*18 + inneriter*321);