2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_single kernel generator.
44 #include "../nb_kernel.h"
45 #include "gromacs/gmxlib/nrnb.h"
47 #include "kernelutil_x86_avx_256_single.h"
50 * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4P1_VF_avx_256_single
51 * Electrostatics interaction: Ewald
52 * VdW interaction: LJEwald
53 * Geometry: Water4-Particle
54 * Calculate force/pot: PotentialAndForce
57 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4P1_VF_avx_256_single
58 (t_nblist * gmx_restrict nlist,
59 rvec * gmx_restrict xx,
60 rvec * gmx_restrict ff,
61 struct t_forcerec * gmx_restrict fr,
62 t_mdatoms * gmx_restrict mdatoms,
63 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
64 t_nrnb * gmx_restrict nrnb)
66 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
67 * just 0 for non-waters.
68 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
69 * jnr indices corresponding to data put in the four positions in the SIMD register.
71 int i_shift_offset,i_coord_offset,outeriter,inneriter;
72 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
73 int jnrA,jnrB,jnrC,jnrD;
74 int jnrE,jnrF,jnrG,jnrH;
75 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
76 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
77 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
78 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
79 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
81 real *shiftvec,*fshift,*x,*f;
82 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
84 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
85 real * vdwioffsetptr0;
86 real * vdwgridioffsetptr0;
87 __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
88 real * vdwioffsetptr1;
89 real * vdwgridioffsetptr1;
90 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
91 real * vdwioffsetptr2;
92 real * vdwgridioffsetptr2;
93 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
94 real * vdwioffsetptr3;
95 real * vdwgridioffsetptr3;
96 __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
97 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
98 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
99 __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
100 __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
101 __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
102 __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
103 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
106 __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
109 __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
110 __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
116 __m256 ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
117 __m256 one_half = _mm256_set1_ps(0.5);
118 __m256 minus_one = _mm256_set1_ps(-1.0);
120 __m128i ewitab_lo,ewitab_hi;
121 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
122 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
124 __m256 dummy_mask,cutoff_mask;
125 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
126 __m256 one = _mm256_set1_ps(1.0);
127 __m256 two = _mm256_set1_ps(2.0);
133 jindex = nlist->jindex;
135 shiftidx = nlist->shift;
137 shiftvec = fr->shift_vec[0];
138 fshift = fr->fshift[0];
139 facel = _mm256_set1_ps(fr->ic->epsfac);
140 charge = mdatoms->chargeA;
141 nvdwtype = fr->ntype;
143 vdwtype = mdatoms->typeA;
144 vdwgridparam = fr->ljpme_c6grid;
145 sh_lj_ewald = _mm256_set1_ps(fr->ic->sh_lj_ewald);
146 ewclj = _mm256_set1_ps(fr->ic->ewaldcoeff_lj);
147 ewclj2 = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
149 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
150 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
151 beta2 = _mm256_mul_ps(beta,beta);
152 beta3 = _mm256_mul_ps(beta,beta2);
154 ewtab = fr->ic->tabq_coul_FDV0;
155 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
156 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
158 /* Setup water-specific parameters */
159 inr = nlist->iinr[0];
160 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
161 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
162 iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
163 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
164 vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
166 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
167 rcutoff_scalar = fr->ic->rcoulomb;
168 rcutoff = _mm256_set1_ps(rcutoff_scalar);
169 rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
171 sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
172 rvdw = _mm256_set1_ps(fr->ic->rvdw);
174 /* Avoid stupid compiler warnings */
175 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
188 for(iidx=0;iidx<4*DIM;iidx++)
193 /* Start outer loop over neighborlists */
194 for(iidx=0; iidx<nri; iidx++)
196 /* Load shift vector for this list */
197 i_shift_offset = DIM*shiftidx[iidx];
199 /* Load limits for loop over neighbors */
200 j_index_start = jindex[iidx];
201 j_index_end = jindex[iidx+1];
203 /* Get outer coordinate index */
205 i_coord_offset = DIM*inr;
207 /* Load i particle coords and add shift vector */
208 gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
209 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
211 fix0 = _mm256_setzero_ps();
212 fiy0 = _mm256_setzero_ps();
213 fiz0 = _mm256_setzero_ps();
214 fix1 = _mm256_setzero_ps();
215 fiy1 = _mm256_setzero_ps();
216 fiz1 = _mm256_setzero_ps();
217 fix2 = _mm256_setzero_ps();
218 fiy2 = _mm256_setzero_ps();
219 fiz2 = _mm256_setzero_ps();
220 fix3 = _mm256_setzero_ps();
221 fiy3 = _mm256_setzero_ps();
222 fiz3 = _mm256_setzero_ps();
224 /* Reset potential sums */
225 velecsum = _mm256_setzero_ps();
226 vvdwsum = _mm256_setzero_ps();
228 /* Start inner kernel loop */
229 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
232 /* Get j neighbor index, and coordinate index */
241 j_coord_offsetA = DIM*jnrA;
242 j_coord_offsetB = DIM*jnrB;
243 j_coord_offsetC = DIM*jnrC;
244 j_coord_offsetD = DIM*jnrD;
245 j_coord_offsetE = DIM*jnrE;
246 j_coord_offsetF = DIM*jnrF;
247 j_coord_offsetG = DIM*jnrG;
248 j_coord_offsetH = DIM*jnrH;
250 /* load j atom coordinates */
251 gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
252 x+j_coord_offsetC,x+j_coord_offsetD,
253 x+j_coord_offsetE,x+j_coord_offsetF,
254 x+j_coord_offsetG,x+j_coord_offsetH,
257 /* Calculate displacement vector */
258 dx00 = _mm256_sub_ps(ix0,jx0);
259 dy00 = _mm256_sub_ps(iy0,jy0);
260 dz00 = _mm256_sub_ps(iz0,jz0);
261 dx10 = _mm256_sub_ps(ix1,jx0);
262 dy10 = _mm256_sub_ps(iy1,jy0);
263 dz10 = _mm256_sub_ps(iz1,jz0);
264 dx20 = _mm256_sub_ps(ix2,jx0);
265 dy20 = _mm256_sub_ps(iy2,jy0);
266 dz20 = _mm256_sub_ps(iz2,jz0);
267 dx30 = _mm256_sub_ps(ix3,jx0);
268 dy30 = _mm256_sub_ps(iy3,jy0);
269 dz30 = _mm256_sub_ps(iz3,jz0);
271 /* Calculate squared distance and things based on it */
272 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
273 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
274 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
275 rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
277 rinv00 = avx256_invsqrt_f(rsq00);
278 rinv10 = avx256_invsqrt_f(rsq10);
279 rinv20 = avx256_invsqrt_f(rsq20);
280 rinv30 = avx256_invsqrt_f(rsq30);
282 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
283 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
284 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
285 rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
287 /* Load parameters for j particles */
288 jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
289 charge+jnrC+0,charge+jnrD+0,
290 charge+jnrE+0,charge+jnrF+0,
291 charge+jnrG+0,charge+jnrH+0);
292 vdwjidx0A = 2*vdwtype[jnrA+0];
293 vdwjidx0B = 2*vdwtype[jnrB+0];
294 vdwjidx0C = 2*vdwtype[jnrC+0];
295 vdwjidx0D = 2*vdwtype[jnrD+0];
296 vdwjidx0E = 2*vdwtype[jnrE+0];
297 vdwjidx0F = 2*vdwtype[jnrF+0];
298 vdwjidx0G = 2*vdwtype[jnrG+0];
299 vdwjidx0H = 2*vdwtype[jnrH+0];
301 fjx0 = _mm256_setzero_ps();
302 fjy0 = _mm256_setzero_ps();
303 fjz0 = _mm256_setzero_ps();
305 /**************************
306 * CALCULATE INTERACTIONS *
307 **************************/
309 if (gmx_mm256_any_lt(rsq00,rcutoff2))
312 r00 = _mm256_mul_ps(rsq00,rinv00);
314 /* Compute parameters for interactions between i and j atoms */
315 gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
316 vdwioffsetptr0+vdwjidx0B,
317 vdwioffsetptr0+vdwjidx0C,
318 vdwioffsetptr0+vdwjidx0D,
319 vdwioffsetptr0+vdwjidx0E,
320 vdwioffsetptr0+vdwjidx0F,
321 vdwioffsetptr0+vdwjidx0G,
322 vdwioffsetptr0+vdwjidx0H,
325 c6grid_00 = gmx_mm256_load_8real_swizzle_ps(vdwgridioffsetptr0+vdwjidx0A,
326 vdwgridioffsetptr0+vdwjidx0B,
327 vdwgridioffsetptr0+vdwjidx0C,
328 vdwgridioffsetptr0+vdwjidx0D,
329 vdwgridioffsetptr0+vdwjidx0E,
330 vdwgridioffsetptr0+vdwjidx0F,
331 vdwgridioffsetptr0+vdwjidx0G,
332 vdwgridioffsetptr0+vdwjidx0H);
334 /* Analytical LJ-PME */
335 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
336 ewcljrsq = _mm256_mul_ps(ewclj2,rsq00);
337 ewclj6 = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
338 exponent = avx256_exp_f(ewcljrsq);
339 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
340 poly = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
341 /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
342 vvdw6 = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
343 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
344 vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
345 _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
346 /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
347 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
349 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
351 /* Update potential sum for this i atom from the interaction with this j atom. */
352 vvdw = _mm256_and_ps(vvdw,cutoff_mask);
353 vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
357 fscal = _mm256_and_ps(fscal,cutoff_mask);
359 /* Calculate temporary vectorial force */
360 tx = _mm256_mul_ps(fscal,dx00);
361 ty = _mm256_mul_ps(fscal,dy00);
362 tz = _mm256_mul_ps(fscal,dz00);
364 /* Update vectorial force */
365 fix0 = _mm256_add_ps(fix0,tx);
366 fiy0 = _mm256_add_ps(fiy0,ty);
367 fiz0 = _mm256_add_ps(fiz0,tz);
369 fjx0 = _mm256_add_ps(fjx0,tx);
370 fjy0 = _mm256_add_ps(fjy0,ty);
371 fjz0 = _mm256_add_ps(fjz0,tz);
375 /**************************
376 * CALCULATE INTERACTIONS *
377 **************************/
379 if (gmx_mm256_any_lt(rsq10,rcutoff2))
382 r10 = _mm256_mul_ps(rsq10,rinv10);
384 /* Compute parameters for interactions between i and j atoms */
385 qq10 = _mm256_mul_ps(iq1,jq0);
387 /* EWALD ELECTROSTATICS */
389 /* Analytical PME correction */
390 zeta2 = _mm256_mul_ps(beta2,rsq10);
391 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
392 pmecorrF = avx256_pmecorrF_f(zeta2);
393 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
394 felec = _mm256_mul_ps(qq10,felec);
395 pmecorrV = avx256_pmecorrV_f(zeta2);
396 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
397 velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
398 velec = _mm256_mul_ps(qq10,velec);
400 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
402 /* Update potential sum for this i atom from the interaction with this j atom. */
403 velec = _mm256_and_ps(velec,cutoff_mask);
404 velecsum = _mm256_add_ps(velecsum,velec);
408 fscal = _mm256_and_ps(fscal,cutoff_mask);
410 /* Calculate temporary vectorial force */
411 tx = _mm256_mul_ps(fscal,dx10);
412 ty = _mm256_mul_ps(fscal,dy10);
413 tz = _mm256_mul_ps(fscal,dz10);
415 /* Update vectorial force */
416 fix1 = _mm256_add_ps(fix1,tx);
417 fiy1 = _mm256_add_ps(fiy1,ty);
418 fiz1 = _mm256_add_ps(fiz1,tz);
420 fjx0 = _mm256_add_ps(fjx0,tx);
421 fjy0 = _mm256_add_ps(fjy0,ty);
422 fjz0 = _mm256_add_ps(fjz0,tz);
426 /**************************
427 * CALCULATE INTERACTIONS *
428 **************************/
430 if (gmx_mm256_any_lt(rsq20,rcutoff2))
433 r20 = _mm256_mul_ps(rsq20,rinv20);
435 /* Compute parameters for interactions between i and j atoms */
436 qq20 = _mm256_mul_ps(iq2,jq0);
438 /* EWALD ELECTROSTATICS */
440 /* Analytical PME correction */
441 zeta2 = _mm256_mul_ps(beta2,rsq20);
442 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
443 pmecorrF = avx256_pmecorrF_f(zeta2);
444 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
445 felec = _mm256_mul_ps(qq20,felec);
446 pmecorrV = avx256_pmecorrV_f(zeta2);
447 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
448 velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
449 velec = _mm256_mul_ps(qq20,velec);
451 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
453 /* Update potential sum for this i atom from the interaction with this j atom. */
454 velec = _mm256_and_ps(velec,cutoff_mask);
455 velecsum = _mm256_add_ps(velecsum,velec);
459 fscal = _mm256_and_ps(fscal,cutoff_mask);
461 /* Calculate temporary vectorial force */
462 tx = _mm256_mul_ps(fscal,dx20);
463 ty = _mm256_mul_ps(fscal,dy20);
464 tz = _mm256_mul_ps(fscal,dz20);
466 /* Update vectorial force */
467 fix2 = _mm256_add_ps(fix2,tx);
468 fiy2 = _mm256_add_ps(fiy2,ty);
469 fiz2 = _mm256_add_ps(fiz2,tz);
471 fjx0 = _mm256_add_ps(fjx0,tx);
472 fjy0 = _mm256_add_ps(fjy0,ty);
473 fjz0 = _mm256_add_ps(fjz0,tz);
477 /**************************
478 * CALCULATE INTERACTIONS *
479 **************************/
481 if (gmx_mm256_any_lt(rsq30,rcutoff2))
484 r30 = _mm256_mul_ps(rsq30,rinv30);
486 /* Compute parameters for interactions between i and j atoms */
487 qq30 = _mm256_mul_ps(iq3,jq0);
489 /* EWALD ELECTROSTATICS */
491 /* Analytical PME correction */
492 zeta2 = _mm256_mul_ps(beta2,rsq30);
493 rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
494 pmecorrF = avx256_pmecorrF_f(zeta2);
495 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
496 felec = _mm256_mul_ps(qq30,felec);
497 pmecorrV = avx256_pmecorrV_f(zeta2);
498 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
499 velec = _mm256_sub_ps(_mm256_sub_ps(rinv30,sh_ewald),pmecorrV);
500 velec = _mm256_mul_ps(qq30,velec);
502 cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
504 /* Update potential sum for this i atom from the interaction with this j atom. */
505 velec = _mm256_and_ps(velec,cutoff_mask);
506 velecsum = _mm256_add_ps(velecsum,velec);
510 fscal = _mm256_and_ps(fscal,cutoff_mask);
512 /* Calculate temporary vectorial force */
513 tx = _mm256_mul_ps(fscal,dx30);
514 ty = _mm256_mul_ps(fscal,dy30);
515 tz = _mm256_mul_ps(fscal,dz30);
517 /* Update vectorial force */
518 fix3 = _mm256_add_ps(fix3,tx);
519 fiy3 = _mm256_add_ps(fiy3,ty);
520 fiz3 = _mm256_add_ps(fiz3,tz);
522 fjx0 = _mm256_add_ps(fjx0,tx);
523 fjy0 = _mm256_add_ps(fjy0,ty);
524 fjz0 = _mm256_add_ps(fjz0,tz);
528 fjptrA = f+j_coord_offsetA;
529 fjptrB = f+j_coord_offsetB;
530 fjptrC = f+j_coord_offsetC;
531 fjptrD = f+j_coord_offsetD;
532 fjptrE = f+j_coord_offsetE;
533 fjptrF = f+j_coord_offsetF;
534 fjptrG = f+j_coord_offsetG;
535 fjptrH = f+j_coord_offsetH;
537 gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
539 /* Inner loop uses 392 flops */
545 /* Get j neighbor index, and coordinate index */
546 jnrlistA = jjnr[jidx];
547 jnrlistB = jjnr[jidx+1];
548 jnrlistC = jjnr[jidx+2];
549 jnrlistD = jjnr[jidx+3];
550 jnrlistE = jjnr[jidx+4];
551 jnrlistF = jjnr[jidx+5];
552 jnrlistG = jjnr[jidx+6];
553 jnrlistH = jjnr[jidx+7];
554 /* Sign of each element will be negative for non-real atoms.
555 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
556 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
558 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
559 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
561 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
562 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
563 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
564 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
565 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
566 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
567 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
568 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
569 j_coord_offsetA = DIM*jnrA;
570 j_coord_offsetB = DIM*jnrB;
571 j_coord_offsetC = DIM*jnrC;
572 j_coord_offsetD = DIM*jnrD;
573 j_coord_offsetE = DIM*jnrE;
574 j_coord_offsetF = DIM*jnrF;
575 j_coord_offsetG = DIM*jnrG;
576 j_coord_offsetH = DIM*jnrH;
578 /* load j atom coordinates */
579 gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
580 x+j_coord_offsetC,x+j_coord_offsetD,
581 x+j_coord_offsetE,x+j_coord_offsetF,
582 x+j_coord_offsetG,x+j_coord_offsetH,
585 /* Calculate displacement vector */
586 dx00 = _mm256_sub_ps(ix0,jx0);
587 dy00 = _mm256_sub_ps(iy0,jy0);
588 dz00 = _mm256_sub_ps(iz0,jz0);
589 dx10 = _mm256_sub_ps(ix1,jx0);
590 dy10 = _mm256_sub_ps(iy1,jy0);
591 dz10 = _mm256_sub_ps(iz1,jz0);
592 dx20 = _mm256_sub_ps(ix2,jx0);
593 dy20 = _mm256_sub_ps(iy2,jy0);
594 dz20 = _mm256_sub_ps(iz2,jz0);
595 dx30 = _mm256_sub_ps(ix3,jx0);
596 dy30 = _mm256_sub_ps(iy3,jy0);
597 dz30 = _mm256_sub_ps(iz3,jz0);
599 /* Calculate squared distance and things based on it */
600 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
601 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
602 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
603 rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
605 rinv00 = avx256_invsqrt_f(rsq00);
606 rinv10 = avx256_invsqrt_f(rsq10);
607 rinv20 = avx256_invsqrt_f(rsq20);
608 rinv30 = avx256_invsqrt_f(rsq30);
610 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
611 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
612 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
613 rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
615 /* Load parameters for j particles */
616 jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
617 charge+jnrC+0,charge+jnrD+0,
618 charge+jnrE+0,charge+jnrF+0,
619 charge+jnrG+0,charge+jnrH+0);
620 vdwjidx0A = 2*vdwtype[jnrA+0];
621 vdwjidx0B = 2*vdwtype[jnrB+0];
622 vdwjidx0C = 2*vdwtype[jnrC+0];
623 vdwjidx0D = 2*vdwtype[jnrD+0];
624 vdwjidx0E = 2*vdwtype[jnrE+0];
625 vdwjidx0F = 2*vdwtype[jnrF+0];
626 vdwjidx0G = 2*vdwtype[jnrG+0];
627 vdwjidx0H = 2*vdwtype[jnrH+0];
629 fjx0 = _mm256_setzero_ps();
630 fjy0 = _mm256_setzero_ps();
631 fjz0 = _mm256_setzero_ps();
633 /**************************
634 * CALCULATE INTERACTIONS *
635 **************************/
637 if (gmx_mm256_any_lt(rsq00,rcutoff2))
640 r00 = _mm256_mul_ps(rsq00,rinv00);
641 r00 = _mm256_andnot_ps(dummy_mask,r00);
643 /* Compute parameters for interactions between i and j atoms */
644 gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
645 vdwioffsetptr0+vdwjidx0B,
646 vdwioffsetptr0+vdwjidx0C,
647 vdwioffsetptr0+vdwjidx0D,
648 vdwioffsetptr0+vdwjidx0E,
649 vdwioffsetptr0+vdwjidx0F,
650 vdwioffsetptr0+vdwjidx0G,
651 vdwioffsetptr0+vdwjidx0H,
654 c6grid_00 = gmx_mm256_load_8real_swizzle_ps(vdwgridioffsetptr0+vdwjidx0A,
655 vdwgridioffsetptr0+vdwjidx0B,
656 vdwgridioffsetptr0+vdwjidx0C,
657 vdwgridioffsetptr0+vdwjidx0D,
658 vdwgridioffsetptr0+vdwjidx0E,
659 vdwgridioffsetptr0+vdwjidx0F,
660 vdwgridioffsetptr0+vdwjidx0G,
661 vdwgridioffsetptr0+vdwjidx0H);
663 /* Analytical LJ-PME */
664 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
665 ewcljrsq = _mm256_mul_ps(ewclj2,rsq00);
666 ewclj6 = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
667 exponent = avx256_exp_f(ewcljrsq);
668 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
669 poly = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
670 /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
671 vvdw6 = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
672 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
673 vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
674 _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
675 /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
676 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
678 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
680 /* Update potential sum for this i atom from the interaction with this j atom. */
681 vvdw = _mm256_and_ps(vvdw,cutoff_mask);
682 vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
683 vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
687 fscal = _mm256_and_ps(fscal,cutoff_mask);
689 fscal = _mm256_andnot_ps(dummy_mask,fscal);
691 /* Calculate temporary vectorial force */
692 tx = _mm256_mul_ps(fscal,dx00);
693 ty = _mm256_mul_ps(fscal,dy00);
694 tz = _mm256_mul_ps(fscal,dz00);
696 /* Update vectorial force */
697 fix0 = _mm256_add_ps(fix0,tx);
698 fiy0 = _mm256_add_ps(fiy0,ty);
699 fiz0 = _mm256_add_ps(fiz0,tz);
701 fjx0 = _mm256_add_ps(fjx0,tx);
702 fjy0 = _mm256_add_ps(fjy0,ty);
703 fjz0 = _mm256_add_ps(fjz0,tz);
707 /**************************
708 * CALCULATE INTERACTIONS *
709 **************************/
711 if (gmx_mm256_any_lt(rsq10,rcutoff2))
714 r10 = _mm256_mul_ps(rsq10,rinv10);
715 r10 = _mm256_andnot_ps(dummy_mask,r10);
717 /* Compute parameters for interactions between i and j atoms */
718 qq10 = _mm256_mul_ps(iq1,jq0);
720 /* EWALD ELECTROSTATICS */
722 /* Analytical PME correction */
723 zeta2 = _mm256_mul_ps(beta2,rsq10);
724 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
725 pmecorrF = avx256_pmecorrF_f(zeta2);
726 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
727 felec = _mm256_mul_ps(qq10,felec);
728 pmecorrV = avx256_pmecorrV_f(zeta2);
729 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
730 velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
731 velec = _mm256_mul_ps(qq10,velec);
733 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
735 /* Update potential sum for this i atom from the interaction with this j atom. */
736 velec = _mm256_and_ps(velec,cutoff_mask);
737 velec = _mm256_andnot_ps(dummy_mask,velec);
738 velecsum = _mm256_add_ps(velecsum,velec);
742 fscal = _mm256_and_ps(fscal,cutoff_mask);
744 fscal = _mm256_andnot_ps(dummy_mask,fscal);
746 /* Calculate temporary vectorial force */
747 tx = _mm256_mul_ps(fscal,dx10);
748 ty = _mm256_mul_ps(fscal,dy10);
749 tz = _mm256_mul_ps(fscal,dz10);
751 /* Update vectorial force */
752 fix1 = _mm256_add_ps(fix1,tx);
753 fiy1 = _mm256_add_ps(fiy1,ty);
754 fiz1 = _mm256_add_ps(fiz1,tz);
756 fjx0 = _mm256_add_ps(fjx0,tx);
757 fjy0 = _mm256_add_ps(fjy0,ty);
758 fjz0 = _mm256_add_ps(fjz0,tz);
762 /**************************
763 * CALCULATE INTERACTIONS *
764 **************************/
766 if (gmx_mm256_any_lt(rsq20,rcutoff2))
769 r20 = _mm256_mul_ps(rsq20,rinv20);
770 r20 = _mm256_andnot_ps(dummy_mask,r20);
772 /* Compute parameters for interactions between i and j atoms */
773 qq20 = _mm256_mul_ps(iq2,jq0);
775 /* EWALD ELECTROSTATICS */
777 /* Analytical PME correction */
778 zeta2 = _mm256_mul_ps(beta2,rsq20);
779 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
780 pmecorrF = avx256_pmecorrF_f(zeta2);
781 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
782 felec = _mm256_mul_ps(qq20,felec);
783 pmecorrV = avx256_pmecorrV_f(zeta2);
784 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
785 velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
786 velec = _mm256_mul_ps(qq20,velec);
788 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
790 /* Update potential sum for this i atom from the interaction with this j atom. */
791 velec = _mm256_and_ps(velec,cutoff_mask);
792 velec = _mm256_andnot_ps(dummy_mask,velec);
793 velecsum = _mm256_add_ps(velecsum,velec);
797 fscal = _mm256_and_ps(fscal,cutoff_mask);
799 fscal = _mm256_andnot_ps(dummy_mask,fscal);
801 /* Calculate temporary vectorial force */
802 tx = _mm256_mul_ps(fscal,dx20);
803 ty = _mm256_mul_ps(fscal,dy20);
804 tz = _mm256_mul_ps(fscal,dz20);
806 /* Update vectorial force */
807 fix2 = _mm256_add_ps(fix2,tx);
808 fiy2 = _mm256_add_ps(fiy2,ty);
809 fiz2 = _mm256_add_ps(fiz2,tz);
811 fjx0 = _mm256_add_ps(fjx0,tx);
812 fjy0 = _mm256_add_ps(fjy0,ty);
813 fjz0 = _mm256_add_ps(fjz0,tz);
817 /**************************
818 * CALCULATE INTERACTIONS *
819 **************************/
821 if (gmx_mm256_any_lt(rsq30,rcutoff2))
824 r30 = _mm256_mul_ps(rsq30,rinv30);
825 r30 = _mm256_andnot_ps(dummy_mask,r30);
827 /* Compute parameters for interactions between i and j atoms */
828 qq30 = _mm256_mul_ps(iq3,jq0);
830 /* EWALD ELECTROSTATICS */
832 /* Analytical PME correction */
833 zeta2 = _mm256_mul_ps(beta2,rsq30);
834 rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
835 pmecorrF = avx256_pmecorrF_f(zeta2);
836 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
837 felec = _mm256_mul_ps(qq30,felec);
838 pmecorrV = avx256_pmecorrV_f(zeta2);
839 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
840 velec = _mm256_sub_ps(_mm256_sub_ps(rinv30,sh_ewald),pmecorrV);
841 velec = _mm256_mul_ps(qq30,velec);
843 cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
845 /* Update potential sum for this i atom from the interaction with this j atom. */
846 velec = _mm256_and_ps(velec,cutoff_mask);
847 velec = _mm256_andnot_ps(dummy_mask,velec);
848 velecsum = _mm256_add_ps(velecsum,velec);
852 fscal = _mm256_and_ps(fscal,cutoff_mask);
854 fscal = _mm256_andnot_ps(dummy_mask,fscal);
856 /* Calculate temporary vectorial force */
857 tx = _mm256_mul_ps(fscal,dx30);
858 ty = _mm256_mul_ps(fscal,dy30);
859 tz = _mm256_mul_ps(fscal,dz30);
861 /* Update vectorial force */
862 fix3 = _mm256_add_ps(fix3,tx);
863 fiy3 = _mm256_add_ps(fiy3,ty);
864 fiz3 = _mm256_add_ps(fiz3,tz);
866 fjx0 = _mm256_add_ps(fjx0,tx);
867 fjy0 = _mm256_add_ps(fjy0,ty);
868 fjz0 = _mm256_add_ps(fjz0,tz);
872 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
873 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
874 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
875 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
876 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
877 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
878 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
879 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
881 gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
883 /* Inner loop uses 396 flops */
886 /* End of innermost loop */
888 gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
889 f+i_coord_offset,fshift+i_shift_offset);
892 /* Update potential energies */
893 gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
894 gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
896 /* Increment number of inner iterations */
897 inneriter += j_index_end - j_index_start;
899 /* Outer loop uses 26 flops */
902 /* Increment number of outer iterations */
905 /* Update outer/inner flops */
907 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*396);
910 * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4P1_F_avx_256_single
911 * Electrostatics interaction: Ewald
912 * VdW interaction: LJEwald
913 * Geometry: Water4-Particle
914 * Calculate force/pot: Force
917 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4P1_F_avx_256_single
918 (t_nblist * gmx_restrict nlist,
919 rvec * gmx_restrict xx,
920 rvec * gmx_restrict ff,
921 struct t_forcerec * gmx_restrict fr,
922 t_mdatoms * gmx_restrict mdatoms,
923 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
924 t_nrnb * gmx_restrict nrnb)
926 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
927 * just 0 for non-waters.
928 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
929 * jnr indices corresponding to data put in the four positions in the SIMD register.
931 int i_shift_offset,i_coord_offset,outeriter,inneriter;
932 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
933 int jnrA,jnrB,jnrC,jnrD;
934 int jnrE,jnrF,jnrG,jnrH;
935 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
936 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
937 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
938 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
939 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
941 real *shiftvec,*fshift,*x,*f;
942 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
944 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
945 real * vdwioffsetptr0;
946 real * vdwgridioffsetptr0;
947 __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
948 real * vdwioffsetptr1;
949 real * vdwgridioffsetptr1;
950 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
951 real * vdwioffsetptr2;
952 real * vdwgridioffsetptr2;
953 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
954 real * vdwioffsetptr3;
955 real * vdwgridioffsetptr3;
956 __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
957 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
958 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
959 __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
960 __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
961 __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
962 __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
963 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
966 __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
969 __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
970 __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
976 __m256 ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
977 __m256 one_half = _mm256_set1_ps(0.5);
978 __m256 minus_one = _mm256_set1_ps(-1.0);
980 __m128i ewitab_lo,ewitab_hi;
981 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
982 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
984 __m256 dummy_mask,cutoff_mask;
985 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
986 __m256 one = _mm256_set1_ps(1.0);
987 __m256 two = _mm256_set1_ps(2.0);
993 jindex = nlist->jindex;
995 shiftidx = nlist->shift;
997 shiftvec = fr->shift_vec[0];
998 fshift = fr->fshift[0];
999 facel = _mm256_set1_ps(fr->ic->epsfac);
1000 charge = mdatoms->chargeA;
1001 nvdwtype = fr->ntype;
1002 vdwparam = fr->nbfp;
1003 vdwtype = mdatoms->typeA;
1004 vdwgridparam = fr->ljpme_c6grid;
1005 sh_lj_ewald = _mm256_set1_ps(fr->ic->sh_lj_ewald);
1006 ewclj = _mm256_set1_ps(fr->ic->ewaldcoeff_lj);
1007 ewclj2 = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
1009 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
1010 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1011 beta2 = _mm256_mul_ps(beta,beta);
1012 beta3 = _mm256_mul_ps(beta,beta2);
1014 ewtab = fr->ic->tabq_coul_F;
1015 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
1016 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1018 /* Setup water-specific parameters */
1019 inr = nlist->iinr[0];
1020 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1021 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1022 iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
1023 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
1024 vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
1026 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1027 rcutoff_scalar = fr->ic->rcoulomb;
1028 rcutoff = _mm256_set1_ps(rcutoff_scalar);
1029 rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
1031 sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
1032 rvdw = _mm256_set1_ps(fr->ic->rvdw);
1034 /* Avoid stupid compiler warnings */
1035 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1036 j_coord_offsetA = 0;
1037 j_coord_offsetB = 0;
1038 j_coord_offsetC = 0;
1039 j_coord_offsetD = 0;
1040 j_coord_offsetE = 0;
1041 j_coord_offsetF = 0;
1042 j_coord_offsetG = 0;
1043 j_coord_offsetH = 0;
1048 for(iidx=0;iidx<4*DIM;iidx++)
1050 scratch[iidx] = 0.0;
1053 /* Start outer loop over neighborlists */
1054 for(iidx=0; iidx<nri; iidx++)
1056 /* Load shift vector for this list */
1057 i_shift_offset = DIM*shiftidx[iidx];
1059 /* Load limits for loop over neighbors */
1060 j_index_start = jindex[iidx];
1061 j_index_end = jindex[iidx+1];
1063 /* Get outer coordinate index */
1065 i_coord_offset = DIM*inr;
1067 /* Load i particle coords and add shift vector */
1068 gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1069 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1071 fix0 = _mm256_setzero_ps();
1072 fiy0 = _mm256_setzero_ps();
1073 fiz0 = _mm256_setzero_ps();
1074 fix1 = _mm256_setzero_ps();
1075 fiy1 = _mm256_setzero_ps();
1076 fiz1 = _mm256_setzero_ps();
1077 fix2 = _mm256_setzero_ps();
1078 fiy2 = _mm256_setzero_ps();
1079 fiz2 = _mm256_setzero_ps();
1080 fix3 = _mm256_setzero_ps();
1081 fiy3 = _mm256_setzero_ps();
1082 fiz3 = _mm256_setzero_ps();
1084 /* Start inner kernel loop */
1085 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1088 /* Get j neighbor index, and coordinate index */
1090 jnrB = jjnr[jidx+1];
1091 jnrC = jjnr[jidx+2];
1092 jnrD = jjnr[jidx+3];
1093 jnrE = jjnr[jidx+4];
1094 jnrF = jjnr[jidx+5];
1095 jnrG = jjnr[jidx+6];
1096 jnrH = jjnr[jidx+7];
1097 j_coord_offsetA = DIM*jnrA;
1098 j_coord_offsetB = DIM*jnrB;
1099 j_coord_offsetC = DIM*jnrC;
1100 j_coord_offsetD = DIM*jnrD;
1101 j_coord_offsetE = DIM*jnrE;
1102 j_coord_offsetF = DIM*jnrF;
1103 j_coord_offsetG = DIM*jnrG;
1104 j_coord_offsetH = DIM*jnrH;
1106 /* load j atom coordinates */
1107 gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1108 x+j_coord_offsetC,x+j_coord_offsetD,
1109 x+j_coord_offsetE,x+j_coord_offsetF,
1110 x+j_coord_offsetG,x+j_coord_offsetH,
1113 /* Calculate displacement vector */
1114 dx00 = _mm256_sub_ps(ix0,jx0);
1115 dy00 = _mm256_sub_ps(iy0,jy0);
1116 dz00 = _mm256_sub_ps(iz0,jz0);
1117 dx10 = _mm256_sub_ps(ix1,jx0);
1118 dy10 = _mm256_sub_ps(iy1,jy0);
1119 dz10 = _mm256_sub_ps(iz1,jz0);
1120 dx20 = _mm256_sub_ps(ix2,jx0);
1121 dy20 = _mm256_sub_ps(iy2,jy0);
1122 dz20 = _mm256_sub_ps(iz2,jz0);
1123 dx30 = _mm256_sub_ps(ix3,jx0);
1124 dy30 = _mm256_sub_ps(iy3,jy0);
1125 dz30 = _mm256_sub_ps(iz3,jz0);
1127 /* Calculate squared distance and things based on it */
1128 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1129 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1130 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1131 rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
1133 rinv00 = avx256_invsqrt_f(rsq00);
1134 rinv10 = avx256_invsqrt_f(rsq10);
1135 rinv20 = avx256_invsqrt_f(rsq20);
1136 rinv30 = avx256_invsqrt_f(rsq30);
1138 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
1139 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
1140 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
1141 rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
1143 /* Load parameters for j particles */
1144 jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
1145 charge+jnrC+0,charge+jnrD+0,
1146 charge+jnrE+0,charge+jnrF+0,
1147 charge+jnrG+0,charge+jnrH+0);
1148 vdwjidx0A = 2*vdwtype[jnrA+0];
1149 vdwjidx0B = 2*vdwtype[jnrB+0];
1150 vdwjidx0C = 2*vdwtype[jnrC+0];
1151 vdwjidx0D = 2*vdwtype[jnrD+0];
1152 vdwjidx0E = 2*vdwtype[jnrE+0];
1153 vdwjidx0F = 2*vdwtype[jnrF+0];
1154 vdwjidx0G = 2*vdwtype[jnrG+0];
1155 vdwjidx0H = 2*vdwtype[jnrH+0];
1157 fjx0 = _mm256_setzero_ps();
1158 fjy0 = _mm256_setzero_ps();
1159 fjz0 = _mm256_setzero_ps();
1161 /**************************
1162 * CALCULATE INTERACTIONS *
1163 **************************/
1165 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1168 r00 = _mm256_mul_ps(rsq00,rinv00);
1170 /* Compute parameters for interactions between i and j atoms */
1171 gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
1172 vdwioffsetptr0+vdwjidx0B,
1173 vdwioffsetptr0+vdwjidx0C,
1174 vdwioffsetptr0+vdwjidx0D,
1175 vdwioffsetptr0+vdwjidx0E,
1176 vdwioffsetptr0+vdwjidx0F,
1177 vdwioffsetptr0+vdwjidx0G,
1178 vdwioffsetptr0+vdwjidx0H,
1181 c6grid_00 = gmx_mm256_load_8real_swizzle_ps(vdwgridioffsetptr0+vdwjidx0A,
1182 vdwgridioffsetptr0+vdwjidx0B,
1183 vdwgridioffsetptr0+vdwjidx0C,
1184 vdwgridioffsetptr0+vdwjidx0D,
1185 vdwgridioffsetptr0+vdwjidx0E,
1186 vdwgridioffsetptr0+vdwjidx0F,
1187 vdwgridioffsetptr0+vdwjidx0G,
1188 vdwgridioffsetptr0+vdwjidx0H);
1190 /* Analytical LJ-PME */
1191 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1192 ewcljrsq = _mm256_mul_ps(ewclj2,rsq00);
1193 ewclj6 = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1194 exponent = avx256_exp_f(ewcljrsq);
1195 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1196 poly = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1197 /* f6A = 6 * C6grid * (1 - poly) */
1198 f6A = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
1199 /* f6B = C6grid * exponent * beta^6 */
1200 f6B = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
1201 /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1202 fvdw = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1204 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1208 fscal = _mm256_and_ps(fscal,cutoff_mask);
1210 /* Calculate temporary vectorial force */
1211 tx = _mm256_mul_ps(fscal,dx00);
1212 ty = _mm256_mul_ps(fscal,dy00);
1213 tz = _mm256_mul_ps(fscal,dz00);
1215 /* Update vectorial force */
1216 fix0 = _mm256_add_ps(fix0,tx);
1217 fiy0 = _mm256_add_ps(fiy0,ty);
1218 fiz0 = _mm256_add_ps(fiz0,tz);
1220 fjx0 = _mm256_add_ps(fjx0,tx);
1221 fjy0 = _mm256_add_ps(fjy0,ty);
1222 fjz0 = _mm256_add_ps(fjz0,tz);
1226 /**************************
1227 * CALCULATE INTERACTIONS *
1228 **************************/
1230 if (gmx_mm256_any_lt(rsq10,rcutoff2))
1233 r10 = _mm256_mul_ps(rsq10,rinv10);
1235 /* Compute parameters for interactions between i and j atoms */
1236 qq10 = _mm256_mul_ps(iq1,jq0);
1238 /* EWALD ELECTROSTATICS */
1240 /* Analytical PME correction */
1241 zeta2 = _mm256_mul_ps(beta2,rsq10);
1242 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
1243 pmecorrF = avx256_pmecorrF_f(zeta2);
1244 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1245 felec = _mm256_mul_ps(qq10,felec);
1247 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1251 fscal = _mm256_and_ps(fscal,cutoff_mask);
1253 /* Calculate temporary vectorial force */
1254 tx = _mm256_mul_ps(fscal,dx10);
1255 ty = _mm256_mul_ps(fscal,dy10);
1256 tz = _mm256_mul_ps(fscal,dz10);
1258 /* Update vectorial force */
1259 fix1 = _mm256_add_ps(fix1,tx);
1260 fiy1 = _mm256_add_ps(fiy1,ty);
1261 fiz1 = _mm256_add_ps(fiz1,tz);
1263 fjx0 = _mm256_add_ps(fjx0,tx);
1264 fjy0 = _mm256_add_ps(fjy0,ty);
1265 fjz0 = _mm256_add_ps(fjz0,tz);
1269 /**************************
1270 * CALCULATE INTERACTIONS *
1271 **************************/
1273 if (gmx_mm256_any_lt(rsq20,rcutoff2))
1276 r20 = _mm256_mul_ps(rsq20,rinv20);
1278 /* Compute parameters for interactions between i and j atoms */
1279 qq20 = _mm256_mul_ps(iq2,jq0);
1281 /* EWALD ELECTROSTATICS */
1283 /* Analytical PME correction */
1284 zeta2 = _mm256_mul_ps(beta2,rsq20);
1285 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
1286 pmecorrF = avx256_pmecorrF_f(zeta2);
1287 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1288 felec = _mm256_mul_ps(qq20,felec);
1290 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1294 fscal = _mm256_and_ps(fscal,cutoff_mask);
1296 /* Calculate temporary vectorial force */
1297 tx = _mm256_mul_ps(fscal,dx20);
1298 ty = _mm256_mul_ps(fscal,dy20);
1299 tz = _mm256_mul_ps(fscal,dz20);
1301 /* Update vectorial force */
1302 fix2 = _mm256_add_ps(fix2,tx);
1303 fiy2 = _mm256_add_ps(fiy2,ty);
1304 fiz2 = _mm256_add_ps(fiz2,tz);
1306 fjx0 = _mm256_add_ps(fjx0,tx);
1307 fjy0 = _mm256_add_ps(fjy0,ty);
1308 fjz0 = _mm256_add_ps(fjz0,tz);
1312 /**************************
1313 * CALCULATE INTERACTIONS *
1314 **************************/
1316 if (gmx_mm256_any_lt(rsq30,rcutoff2))
1319 r30 = _mm256_mul_ps(rsq30,rinv30);
1321 /* Compute parameters for interactions between i and j atoms */
1322 qq30 = _mm256_mul_ps(iq3,jq0);
1324 /* EWALD ELECTROSTATICS */
1326 /* Analytical PME correction */
1327 zeta2 = _mm256_mul_ps(beta2,rsq30);
1328 rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
1329 pmecorrF = avx256_pmecorrF_f(zeta2);
1330 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1331 felec = _mm256_mul_ps(qq30,felec);
1333 cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
1337 fscal = _mm256_and_ps(fscal,cutoff_mask);
1339 /* Calculate temporary vectorial force */
1340 tx = _mm256_mul_ps(fscal,dx30);
1341 ty = _mm256_mul_ps(fscal,dy30);
1342 tz = _mm256_mul_ps(fscal,dz30);
1344 /* Update vectorial force */
1345 fix3 = _mm256_add_ps(fix3,tx);
1346 fiy3 = _mm256_add_ps(fiy3,ty);
1347 fiz3 = _mm256_add_ps(fiz3,tz);
1349 fjx0 = _mm256_add_ps(fjx0,tx);
1350 fjy0 = _mm256_add_ps(fjy0,ty);
1351 fjz0 = _mm256_add_ps(fjz0,tz);
1355 fjptrA = f+j_coord_offsetA;
1356 fjptrB = f+j_coord_offsetB;
1357 fjptrC = f+j_coord_offsetC;
1358 fjptrD = f+j_coord_offsetD;
1359 fjptrE = f+j_coord_offsetE;
1360 fjptrF = f+j_coord_offsetF;
1361 fjptrG = f+j_coord_offsetG;
1362 fjptrH = f+j_coord_offsetH;
1364 gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1366 /* Inner loop uses 229 flops */
1369 if(jidx<j_index_end)
1372 /* Get j neighbor index, and coordinate index */
1373 jnrlistA = jjnr[jidx];
1374 jnrlistB = jjnr[jidx+1];
1375 jnrlistC = jjnr[jidx+2];
1376 jnrlistD = jjnr[jidx+3];
1377 jnrlistE = jjnr[jidx+4];
1378 jnrlistF = jjnr[jidx+5];
1379 jnrlistG = jjnr[jidx+6];
1380 jnrlistH = jjnr[jidx+7];
1381 /* Sign of each element will be negative for non-real atoms.
1382 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1383 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
1385 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
1386 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
1388 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
1389 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
1390 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
1391 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
1392 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
1393 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
1394 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
1395 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
1396 j_coord_offsetA = DIM*jnrA;
1397 j_coord_offsetB = DIM*jnrB;
1398 j_coord_offsetC = DIM*jnrC;
1399 j_coord_offsetD = DIM*jnrD;
1400 j_coord_offsetE = DIM*jnrE;
1401 j_coord_offsetF = DIM*jnrF;
1402 j_coord_offsetG = DIM*jnrG;
1403 j_coord_offsetH = DIM*jnrH;
1405 /* load j atom coordinates */
1406 gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1407 x+j_coord_offsetC,x+j_coord_offsetD,
1408 x+j_coord_offsetE,x+j_coord_offsetF,
1409 x+j_coord_offsetG,x+j_coord_offsetH,
1412 /* Calculate displacement vector */
1413 dx00 = _mm256_sub_ps(ix0,jx0);
1414 dy00 = _mm256_sub_ps(iy0,jy0);
1415 dz00 = _mm256_sub_ps(iz0,jz0);
1416 dx10 = _mm256_sub_ps(ix1,jx0);
1417 dy10 = _mm256_sub_ps(iy1,jy0);
1418 dz10 = _mm256_sub_ps(iz1,jz0);
1419 dx20 = _mm256_sub_ps(ix2,jx0);
1420 dy20 = _mm256_sub_ps(iy2,jy0);
1421 dz20 = _mm256_sub_ps(iz2,jz0);
1422 dx30 = _mm256_sub_ps(ix3,jx0);
1423 dy30 = _mm256_sub_ps(iy3,jy0);
1424 dz30 = _mm256_sub_ps(iz3,jz0);
1426 /* Calculate squared distance and things based on it */
1427 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1428 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1429 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1430 rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
1432 rinv00 = avx256_invsqrt_f(rsq00);
1433 rinv10 = avx256_invsqrt_f(rsq10);
1434 rinv20 = avx256_invsqrt_f(rsq20);
1435 rinv30 = avx256_invsqrt_f(rsq30);
1437 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
1438 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
1439 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
1440 rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
1442 /* Load parameters for j particles */
1443 jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
1444 charge+jnrC+0,charge+jnrD+0,
1445 charge+jnrE+0,charge+jnrF+0,
1446 charge+jnrG+0,charge+jnrH+0);
1447 vdwjidx0A = 2*vdwtype[jnrA+0];
1448 vdwjidx0B = 2*vdwtype[jnrB+0];
1449 vdwjidx0C = 2*vdwtype[jnrC+0];
1450 vdwjidx0D = 2*vdwtype[jnrD+0];
1451 vdwjidx0E = 2*vdwtype[jnrE+0];
1452 vdwjidx0F = 2*vdwtype[jnrF+0];
1453 vdwjidx0G = 2*vdwtype[jnrG+0];
1454 vdwjidx0H = 2*vdwtype[jnrH+0];
1456 fjx0 = _mm256_setzero_ps();
1457 fjy0 = _mm256_setzero_ps();
1458 fjz0 = _mm256_setzero_ps();
1460 /**************************
1461 * CALCULATE INTERACTIONS *
1462 **************************/
1464 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1467 r00 = _mm256_mul_ps(rsq00,rinv00);
1468 r00 = _mm256_andnot_ps(dummy_mask,r00);
1470 /* Compute parameters for interactions between i and j atoms */
1471 gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
1472 vdwioffsetptr0+vdwjidx0B,
1473 vdwioffsetptr0+vdwjidx0C,
1474 vdwioffsetptr0+vdwjidx0D,
1475 vdwioffsetptr0+vdwjidx0E,
1476 vdwioffsetptr0+vdwjidx0F,
1477 vdwioffsetptr0+vdwjidx0G,
1478 vdwioffsetptr0+vdwjidx0H,
1481 c6grid_00 = gmx_mm256_load_8real_swizzle_ps(vdwgridioffsetptr0+vdwjidx0A,
1482 vdwgridioffsetptr0+vdwjidx0B,
1483 vdwgridioffsetptr0+vdwjidx0C,
1484 vdwgridioffsetptr0+vdwjidx0D,
1485 vdwgridioffsetptr0+vdwjidx0E,
1486 vdwgridioffsetptr0+vdwjidx0F,
1487 vdwgridioffsetptr0+vdwjidx0G,
1488 vdwgridioffsetptr0+vdwjidx0H);
1490 /* Analytical LJ-PME */
1491 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1492 ewcljrsq = _mm256_mul_ps(ewclj2,rsq00);
1493 ewclj6 = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1494 exponent = avx256_exp_f(ewcljrsq);
1495 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1496 poly = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1497 /* f6A = 6 * C6grid * (1 - poly) */
1498 f6A = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
1499 /* f6B = C6grid * exponent * beta^6 */
1500 f6B = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
1501 /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1502 fvdw = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1504 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1508 fscal = _mm256_and_ps(fscal,cutoff_mask);
1510 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1512 /* Calculate temporary vectorial force */
1513 tx = _mm256_mul_ps(fscal,dx00);
1514 ty = _mm256_mul_ps(fscal,dy00);
1515 tz = _mm256_mul_ps(fscal,dz00);
1517 /* Update vectorial force */
1518 fix0 = _mm256_add_ps(fix0,tx);
1519 fiy0 = _mm256_add_ps(fiy0,ty);
1520 fiz0 = _mm256_add_ps(fiz0,tz);
1522 fjx0 = _mm256_add_ps(fjx0,tx);
1523 fjy0 = _mm256_add_ps(fjy0,ty);
1524 fjz0 = _mm256_add_ps(fjz0,tz);
1528 /**************************
1529 * CALCULATE INTERACTIONS *
1530 **************************/
1532 if (gmx_mm256_any_lt(rsq10,rcutoff2))
1535 r10 = _mm256_mul_ps(rsq10,rinv10);
1536 r10 = _mm256_andnot_ps(dummy_mask,r10);
1538 /* Compute parameters for interactions between i and j atoms */
1539 qq10 = _mm256_mul_ps(iq1,jq0);
1541 /* EWALD ELECTROSTATICS */
1543 /* Analytical PME correction */
1544 zeta2 = _mm256_mul_ps(beta2,rsq10);
1545 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
1546 pmecorrF = avx256_pmecorrF_f(zeta2);
1547 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1548 felec = _mm256_mul_ps(qq10,felec);
1550 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1554 fscal = _mm256_and_ps(fscal,cutoff_mask);
1556 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1558 /* Calculate temporary vectorial force */
1559 tx = _mm256_mul_ps(fscal,dx10);
1560 ty = _mm256_mul_ps(fscal,dy10);
1561 tz = _mm256_mul_ps(fscal,dz10);
1563 /* Update vectorial force */
1564 fix1 = _mm256_add_ps(fix1,tx);
1565 fiy1 = _mm256_add_ps(fiy1,ty);
1566 fiz1 = _mm256_add_ps(fiz1,tz);
1568 fjx0 = _mm256_add_ps(fjx0,tx);
1569 fjy0 = _mm256_add_ps(fjy0,ty);
1570 fjz0 = _mm256_add_ps(fjz0,tz);
1574 /**************************
1575 * CALCULATE INTERACTIONS *
1576 **************************/
1578 if (gmx_mm256_any_lt(rsq20,rcutoff2))
1581 r20 = _mm256_mul_ps(rsq20,rinv20);
1582 r20 = _mm256_andnot_ps(dummy_mask,r20);
1584 /* Compute parameters for interactions between i and j atoms */
1585 qq20 = _mm256_mul_ps(iq2,jq0);
1587 /* EWALD ELECTROSTATICS */
1589 /* Analytical PME correction */
1590 zeta2 = _mm256_mul_ps(beta2,rsq20);
1591 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
1592 pmecorrF = avx256_pmecorrF_f(zeta2);
1593 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1594 felec = _mm256_mul_ps(qq20,felec);
1596 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1600 fscal = _mm256_and_ps(fscal,cutoff_mask);
1602 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1604 /* Calculate temporary vectorial force */
1605 tx = _mm256_mul_ps(fscal,dx20);
1606 ty = _mm256_mul_ps(fscal,dy20);
1607 tz = _mm256_mul_ps(fscal,dz20);
1609 /* Update vectorial force */
1610 fix2 = _mm256_add_ps(fix2,tx);
1611 fiy2 = _mm256_add_ps(fiy2,ty);
1612 fiz2 = _mm256_add_ps(fiz2,tz);
1614 fjx0 = _mm256_add_ps(fjx0,tx);
1615 fjy0 = _mm256_add_ps(fjy0,ty);
1616 fjz0 = _mm256_add_ps(fjz0,tz);
1620 /**************************
1621 * CALCULATE INTERACTIONS *
1622 **************************/
1624 if (gmx_mm256_any_lt(rsq30,rcutoff2))
1627 r30 = _mm256_mul_ps(rsq30,rinv30);
1628 r30 = _mm256_andnot_ps(dummy_mask,r30);
1630 /* Compute parameters for interactions between i and j atoms */
1631 qq30 = _mm256_mul_ps(iq3,jq0);
1633 /* EWALD ELECTROSTATICS */
1635 /* Analytical PME correction */
1636 zeta2 = _mm256_mul_ps(beta2,rsq30);
1637 rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
1638 pmecorrF = avx256_pmecorrF_f(zeta2);
1639 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1640 felec = _mm256_mul_ps(qq30,felec);
1642 cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
1646 fscal = _mm256_and_ps(fscal,cutoff_mask);
1648 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1650 /* Calculate temporary vectorial force */
1651 tx = _mm256_mul_ps(fscal,dx30);
1652 ty = _mm256_mul_ps(fscal,dy30);
1653 tz = _mm256_mul_ps(fscal,dz30);
1655 /* Update vectorial force */
1656 fix3 = _mm256_add_ps(fix3,tx);
1657 fiy3 = _mm256_add_ps(fiy3,ty);
1658 fiz3 = _mm256_add_ps(fiz3,tz);
1660 fjx0 = _mm256_add_ps(fjx0,tx);
1661 fjy0 = _mm256_add_ps(fjy0,ty);
1662 fjz0 = _mm256_add_ps(fjz0,tz);
1666 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1667 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1668 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1669 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1670 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1671 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1672 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1673 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1675 gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1677 /* Inner loop uses 233 flops */
1680 /* End of innermost loop */
1682 gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1683 f+i_coord_offset,fshift+i_shift_offset);
1685 /* Increment number of inner iterations */
1686 inneriter += j_index_end - j_index_start;
1688 /* Outer loop uses 24 flops */
1691 /* Increment number of outer iterations */
1694 /* Update outer/inner flops */
1696 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*233);