2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_double kernel generator.
44 #include "../nb_kernel.h"
45 #include "gromacs/gmxlib/nrnb.h"
47 #include "kernelutil_x86_avx_256_double.h"
50 * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3P1_VF_avx_256_double
51 * Electrostatics interaction: Ewald
52 * VdW interaction: LJEwald
53 * Geometry: Water3-Particle
54 * Calculate force/pot: PotentialAndForce
57 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3P1_VF_avx_256_double
58 (t_nblist * gmx_restrict nlist,
59 rvec * gmx_restrict xx,
60 rvec * gmx_restrict ff,
61 struct t_forcerec * gmx_restrict fr,
62 t_mdatoms * gmx_restrict mdatoms,
63 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
64 t_nrnb * gmx_restrict nrnb)
66 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
67 * just 0 for non-waters.
68 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
69 * jnr indices corresponding to data put in the four positions in the SIMD register.
71 int i_shift_offset,i_coord_offset,outeriter,inneriter;
72 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
73 int jnrA,jnrB,jnrC,jnrD;
74 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
75 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
76 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
77 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
79 real *shiftvec,*fshift,*x,*f;
80 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
82 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
83 real * vdwioffsetptr0;
84 real * vdwgridioffsetptr0;
85 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
86 real * vdwioffsetptr1;
87 real * vdwgridioffsetptr1;
88 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
89 real * vdwioffsetptr2;
90 real * vdwgridioffsetptr2;
91 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
92 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
93 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
94 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
95 __m256d dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
96 __m256d dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
97 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
100 __m256d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
103 __m256d one_sixth = _mm256_set1_pd(1.0/6.0);
104 __m256d one_twelfth = _mm256_set1_pd(1.0/12.0);
109 __m256d ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
110 __m256d one_half = _mm256_set1_pd(0.5);
111 __m256d minus_one = _mm256_set1_pd(-1.0);
113 __m256d ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
114 __m256d beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
116 __m256d dummy_mask,cutoff_mask;
117 __m128 tmpmask0,tmpmask1;
118 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
119 __m256d one = _mm256_set1_pd(1.0);
120 __m256d two = _mm256_set1_pd(2.0);
126 jindex = nlist->jindex;
128 shiftidx = nlist->shift;
130 shiftvec = fr->shift_vec[0];
131 fshift = fr->fshift[0];
132 facel = _mm256_set1_pd(fr->ic->epsfac);
133 charge = mdatoms->chargeA;
134 nvdwtype = fr->ntype;
136 vdwtype = mdatoms->typeA;
137 vdwgridparam = fr->ljpme_c6grid;
138 sh_lj_ewald = _mm256_set1_pd(fr->ic->sh_lj_ewald);
139 ewclj = _mm256_set1_pd(fr->ic->ewaldcoeff_lj);
140 ewclj2 = _mm256_mul_pd(minus_one,_mm256_mul_pd(ewclj,ewclj));
142 sh_ewald = _mm256_set1_pd(fr->ic->sh_ewald);
143 beta = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
144 beta2 = _mm256_mul_pd(beta,beta);
145 beta3 = _mm256_mul_pd(beta,beta2);
147 ewtab = fr->ic->tabq_coul_FDV0;
148 ewtabscale = _mm256_set1_pd(fr->ic->tabq_scale);
149 ewtabhalfspace = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
151 /* Setup water-specific parameters */
152 inr = nlist->iinr[0];
153 iq0 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
154 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
155 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
156 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
157 vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
159 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
160 rcutoff_scalar = fr->ic->rcoulomb;
161 rcutoff = _mm256_set1_pd(rcutoff_scalar);
162 rcutoff2 = _mm256_mul_pd(rcutoff,rcutoff);
164 sh_vdw_invrcut6 = _mm256_set1_pd(fr->ic->sh_invrc6);
165 rvdw = _mm256_set1_pd(fr->ic->rvdw);
167 /* Avoid stupid compiler warnings */
168 jnrA = jnrB = jnrC = jnrD = 0;
177 for(iidx=0;iidx<4*DIM;iidx++)
182 /* Start outer loop over neighborlists */
183 for(iidx=0; iidx<nri; iidx++)
185 /* Load shift vector for this list */
186 i_shift_offset = DIM*shiftidx[iidx];
188 /* Load limits for loop over neighbors */
189 j_index_start = jindex[iidx];
190 j_index_end = jindex[iidx+1];
192 /* Get outer coordinate index */
194 i_coord_offset = DIM*inr;
196 /* Load i particle coords and add shift vector */
197 gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
198 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
200 fix0 = _mm256_setzero_pd();
201 fiy0 = _mm256_setzero_pd();
202 fiz0 = _mm256_setzero_pd();
203 fix1 = _mm256_setzero_pd();
204 fiy1 = _mm256_setzero_pd();
205 fiz1 = _mm256_setzero_pd();
206 fix2 = _mm256_setzero_pd();
207 fiy2 = _mm256_setzero_pd();
208 fiz2 = _mm256_setzero_pd();
210 /* Reset potential sums */
211 velecsum = _mm256_setzero_pd();
212 vvdwsum = _mm256_setzero_pd();
214 /* Start inner kernel loop */
215 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
218 /* Get j neighbor index, and coordinate index */
223 j_coord_offsetA = DIM*jnrA;
224 j_coord_offsetB = DIM*jnrB;
225 j_coord_offsetC = DIM*jnrC;
226 j_coord_offsetD = DIM*jnrD;
228 /* load j atom coordinates */
229 gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
230 x+j_coord_offsetC,x+j_coord_offsetD,
233 /* Calculate displacement vector */
234 dx00 = _mm256_sub_pd(ix0,jx0);
235 dy00 = _mm256_sub_pd(iy0,jy0);
236 dz00 = _mm256_sub_pd(iz0,jz0);
237 dx10 = _mm256_sub_pd(ix1,jx0);
238 dy10 = _mm256_sub_pd(iy1,jy0);
239 dz10 = _mm256_sub_pd(iz1,jz0);
240 dx20 = _mm256_sub_pd(ix2,jx0);
241 dy20 = _mm256_sub_pd(iy2,jy0);
242 dz20 = _mm256_sub_pd(iz2,jz0);
244 /* Calculate squared distance and things based on it */
245 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
246 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
247 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
249 rinv00 = avx256_invsqrt_d(rsq00);
250 rinv10 = avx256_invsqrt_d(rsq10);
251 rinv20 = avx256_invsqrt_d(rsq20);
253 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
254 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
255 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
257 /* Load parameters for j particles */
258 jq0 = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
259 charge+jnrC+0,charge+jnrD+0);
260 vdwjidx0A = 2*vdwtype[jnrA+0];
261 vdwjidx0B = 2*vdwtype[jnrB+0];
262 vdwjidx0C = 2*vdwtype[jnrC+0];
263 vdwjidx0D = 2*vdwtype[jnrD+0];
265 fjx0 = _mm256_setzero_pd();
266 fjy0 = _mm256_setzero_pd();
267 fjz0 = _mm256_setzero_pd();
269 /**************************
270 * CALCULATE INTERACTIONS *
271 **************************/
273 if (gmx_mm256_any_lt(rsq00,rcutoff2))
276 r00 = _mm256_mul_pd(rsq00,rinv00);
278 /* Compute parameters for interactions between i and j atoms */
279 qq00 = _mm256_mul_pd(iq0,jq0);
280 gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
281 vdwioffsetptr0+vdwjidx0B,
282 vdwioffsetptr0+vdwjidx0C,
283 vdwioffsetptr0+vdwjidx0D,
286 c6grid_00 = gmx_mm256_load_4real_swizzle_pd(vdwgridioffsetptr0+vdwjidx0A,
287 vdwgridioffsetptr0+vdwjidx0B,
288 vdwgridioffsetptr0+vdwjidx0C,
289 vdwgridioffsetptr0+vdwjidx0D);
291 /* EWALD ELECTROSTATICS */
293 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
294 ewrt = _mm256_mul_pd(r00,ewtabscale);
295 ewitab = _mm256_cvttpd_epi32(ewrt);
296 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
297 ewitab = _mm_slli_epi32(ewitab,2);
298 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
299 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
300 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
301 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
302 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
303 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
304 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
305 velec = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_sub_pd(rinv00,sh_ewald),velec));
306 felec = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
308 /* Analytical LJ-PME */
309 rinvsix = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
310 ewcljrsq = _mm256_mul_pd(ewclj2,rsq00);
311 ewclj6 = _mm256_mul_pd(ewclj2,_mm256_mul_pd(ewclj2,ewclj2));
312 exponent = avx256_exp_d(ewcljrsq);
313 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
314 poly = _mm256_mul_pd(exponent,_mm256_add_pd(_mm256_sub_pd(one,ewcljrsq),_mm256_mul_pd(_mm256_mul_pd(ewcljrsq,ewcljrsq),one_half)));
315 /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
316 vvdw6 = _mm256_mul_pd(_mm256_sub_pd(c6_00,_mm256_mul_pd(c6grid_00,_mm256_sub_pd(one,poly))),rinvsix);
317 vvdw12 = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
318 vvdw = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
319 _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_add_pd(_mm256_mul_pd(c6_00,sh_vdw_invrcut6),_mm256_mul_pd(c6grid_00,sh_lj_ewald))),one_sixth));
320 /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
321 fvdw = _mm256_mul_pd(_mm256_sub_pd(vvdw12,_mm256_sub_pd(vvdw6,_mm256_mul_pd(_mm256_mul_pd(c6grid_00,one_sixth),_mm256_mul_pd(exponent,ewclj6)))),rinvsq00);
323 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
325 /* Update potential sum for this i atom from the interaction with this j atom. */
326 velec = _mm256_and_pd(velec,cutoff_mask);
327 velecsum = _mm256_add_pd(velecsum,velec);
328 vvdw = _mm256_and_pd(vvdw,cutoff_mask);
329 vvdwsum = _mm256_add_pd(vvdwsum,vvdw);
331 fscal = _mm256_add_pd(felec,fvdw);
333 fscal = _mm256_and_pd(fscal,cutoff_mask);
335 /* Calculate temporary vectorial force */
336 tx = _mm256_mul_pd(fscal,dx00);
337 ty = _mm256_mul_pd(fscal,dy00);
338 tz = _mm256_mul_pd(fscal,dz00);
340 /* Update vectorial force */
341 fix0 = _mm256_add_pd(fix0,tx);
342 fiy0 = _mm256_add_pd(fiy0,ty);
343 fiz0 = _mm256_add_pd(fiz0,tz);
345 fjx0 = _mm256_add_pd(fjx0,tx);
346 fjy0 = _mm256_add_pd(fjy0,ty);
347 fjz0 = _mm256_add_pd(fjz0,tz);
351 /**************************
352 * CALCULATE INTERACTIONS *
353 **************************/
355 if (gmx_mm256_any_lt(rsq10,rcutoff2))
358 r10 = _mm256_mul_pd(rsq10,rinv10);
360 /* Compute parameters for interactions between i and j atoms */
361 qq10 = _mm256_mul_pd(iq1,jq0);
363 /* EWALD ELECTROSTATICS */
365 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
366 ewrt = _mm256_mul_pd(r10,ewtabscale);
367 ewitab = _mm256_cvttpd_epi32(ewrt);
368 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
369 ewitab = _mm_slli_epi32(ewitab,2);
370 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
371 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
372 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
373 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
374 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
375 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
376 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
377 velec = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
378 felec = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
380 cutoff_mask = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
382 /* Update potential sum for this i atom from the interaction with this j atom. */
383 velec = _mm256_and_pd(velec,cutoff_mask);
384 velecsum = _mm256_add_pd(velecsum,velec);
388 fscal = _mm256_and_pd(fscal,cutoff_mask);
390 /* Calculate temporary vectorial force */
391 tx = _mm256_mul_pd(fscal,dx10);
392 ty = _mm256_mul_pd(fscal,dy10);
393 tz = _mm256_mul_pd(fscal,dz10);
395 /* Update vectorial force */
396 fix1 = _mm256_add_pd(fix1,tx);
397 fiy1 = _mm256_add_pd(fiy1,ty);
398 fiz1 = _mm256_add_pd(fiz1,tz);
400 fjx0 = _mm256_add_pd(fjx0,tx);
401 fjy0 = _mm256_add_pd(fjy0,ty);
402 fjz0 = _mm256_add_pd(fjz0,tz);
406 /**************************
407 * CALCULATE INTERACTIONS *
408 **************************/
410 if (gmx_mm256_any_lt(rsq20,rcutoff2))
413 r20 = _mm256_mul_pd(rsq20,rinv20);
415 /* Compute parameters for interactions between i and j atoms */
416 qq20 = _mm256_mul_pd(iq2,jq0);
418 /* EWALD ELECTROSTATICS */
420 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
421 ewrt = _mm256_mul_pd(r20,ewtabscale);
422 ewitab = _mm256_cvttpd_epi32(ewrt);
423 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
424 ewitab = _mm_slli_epi32(ewitab,2);
425 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
426 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
427 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
428 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
429 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
430 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
431 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
432 velec = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
433 felec = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
435 cutoff_mask = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
437 /* Update potential sum for this i atom from the interaction with this j atom. */
438 velec = _mm256_and_pd(velec,cutoff_mask);
439 velecsum = _mm256_add_pd(velecsum,velec);
443 fscal = _mm256_and_pd(fscal,cutoff_mask);
445 /* Calculate temporary vectorial force */
446 tx = _mm256_mul_pd(fscal,dx20);
447 ty = _mm256_mul_pd(fscal,dy20);
448 tz = _mm256_mul_pd(fscal,dz20);
450 /* Update vectorial force */
451 fix2 = _mm256_add_pd(fix2,tx);
452 fiy2 = _mm256_add_pd(fiy2,ty);
453 fiz2 = _mm256_add_pd(fiz2,tz);
455 fjx0 = _mm256_add_pd(fjx0,tx);
456 fjy0 = _mm256_add_pd(fjy0,ty);
457 fjz0 = _mm256_add_pd(fjz0,tz);
461 fjptrA = f+j_coord_offsetA;
462 fjptrB = f+j_coord_offsetB;
463 fjptrC = f+j_coord_offsetC;
464 fjptrD = f+j_coord_offsetD;
466 gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
468 /* Inner loop uses 177 flops */
474 /* Get j neighbor index, and coordinate index */
475 jnrlistA = jjnr[jidx];
476 jnrlistB = jjnr[jidx+1];
477 jnrlistC = jjnr[jidx+2];
478 jnrlistD = jjnr[jidx+3];
479 /* Sign of each element will be negative for non-real atoms.
480 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
481 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
483 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
485 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
486 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
487 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
489 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
490 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
491 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
492 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
493 j_coord_offsetA = DIM*jnrA;
494 j_coord_offsetB = DIM*jnrB;
495 j_coord_offsetC = DIM*jnrC;
496 j_coord_offsetD = DIM*jnrD;
498 /* load j atom coordinates */
499 gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
500 x+j_coord_offsetC,x+j_coord_offsetD,
503 /* Calculate displacement vector */
504 dx00 = _mm256_sub_pd(ix0,jx0);
505 dy00 = _mm256_sub_pd(iy0,jy0);
506 dz00 = _mm256_sub_pd(iz0,jz0);
507 dx10 = _mm256_sub_pd(ix1,jx0);
508 dy10 = _mm256_sub_pd(iy1,jy0);
509 dz10 = _mm256_sub_pd(iz1,jz0);
510 dx20 = _mm256_sub_pd(ix2,jx0);
511 dy20 = _mm256_sub_pd(iy2,jy0);
512 dz20 = _mm256_sub_pd(iz2,jz0);
514 /* Calculate squared distance and things based on it */
515 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
516 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
517 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
519 rinv00 = avx256_invsqrt_d(rsq00);
520 rinv10 = avx256_invsqrt_d(rsq10);
521 rinv20 = avx256_invsqrt_d(rsq20);
523 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
524 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
525 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
527 /* Load parameters for j particles */
528 jq0 = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
529 charge+jnrC+0,charge+jnrD+0);
530 vdwjidx0A = 2*vdwtype[jnrA+0];
531 vdwjidx0B = 2*vdwtype[jnrB+0];
532 vdwjidx0C = 2*vdwtype[jnrC+0];
533 vdwjidx0D = 2*vdwtype[jnrD+0];
535 fjx0 = _mm256_setzero_pd();
536 fjy0 = _mm256_setzero_pd();
537 fjz0 = _mm256_setzero_pd();
539 /**************************
540 * CALCULATE INTERACTIONS *
541 **************************/
543 if (gmx_mm256_any_lt(rsq00,rcutoff2))
546 r00 = _mm256_mul_pd(rsq00,rinv00);
547 r00 = _mm256_andnot_pd(dummy_mask,r00);
549 /* Compute parameters for interactions between i and j atoms */
550 qq00 = _mm256_mul_pd(iq0,jq0);
551 gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
552 vdwioffsetptr0+vdwjidx0B,
553 vdwioffsetptr0+vdwjidx0C,
554 vdwioffsetptr0+vdwjidx0D,
557 c6grid_00 = gmx_mm256_load_4real_swizzle_pd(vdwgridioffsetptr0+vdwjidx0A,
558 vdwgridioffsetptr0+vdwjidx0B,
559 vdwgridioffsetptr0+vdwjidx0C,
560 vdwgridioffsetptr0+vdwjidx0D);
562 /* EWALD ELECTROSTATICS */
564 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
565 ewrt = _mm256_mul_pd(r00,ewtabscale);
566 ewitab = _mm256_cvttpd_epi32(ewrt);
567 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
568 ewitab = _mm_slli_epi32(ewitab,2);
569 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
570 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
571 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
572 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
573 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
574 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
575 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
576 velec = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_sub_pd(rinv00,sh_ewald),velec));
577 felec = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
579 /* Analytical LJ-PME */
580 rinvsix = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
581 ewcljrsq = _mm256_mul_pd(ewclj2,rsq00);
582 ewclj6 = _mm256_mul_pd(ewclj2,_mm256_mul_pd(ewclj2,ewclj2));
583 exponent = avx256_exp_d(ewcljrsq);
584 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
585 poly = _mm256_mul_pd(exponent,_mm256_add_pd(_mm256_sub_pd(one,ewcljrsq),_mm256_mul_pd(_mm256_mul_pd(ewcljrsq,ewcljrsq),one_half)));
586 /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
587 vvdw6 = _mm256_mul_pd(_mm256_sub_pd(c6_00,_mm256_mul_pd(c6grid_00,_mm256_sub_pd(one,poly))),rinvsix);
588 vvdw12 = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
589 vvdw = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
590 _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_add_pd(_mm256_mul_pd(c6_00,sh_vdw_invrcut6),_mm256_mul_pd(c6grid_00,sh_lj_ewald))),one_sixth));
591 /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
592 fvdw = _mm256_mul_pd(_mm256_sub_pd(vvdw12,_mm256_sub_pd(vvdw6,_mm256_mul_pd(_mm256_mul_pd(c6grid_00,one_sixth),_mm256_mul_pd(exponent,ewclj6)))),rinvsq00);
594 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
596 /* Update potential sum for this i atom from the interaction with this j atom. */
597 velec = _mm256_and_pd(velec,cutoff_mask);
598 velec = _mm256_andnot_pd(dummy_mask,velec);
599 velecsum = _mm256_add_pd(velecsum,velec);
600 vvdw = _mm256_and_pd(vvdw,cutoff_mask);
601 vvdw = _mm256_andnot_pd(dummy_mask,vvdw);
602 vvdwsum = _mm256_add_pd(vvdwsum,vvdw);
604 fscal = _mm256_add_pd(felec,fvdw);
606 fscal = _mm256_and_pd(fscal,cutoff_mask);
608 fscal = _mm256_andnot_pd(dummy_mask,fscal);
610 /* Calculate temporary vectorial force */
611 tx = _mm256_mul_pd(fscal,dx00);
612 ty = _mm256_mul_pd(fscal,dy00);
613 tz = _mm256_mul_pd(fscal,dz00);
615 /* Update vectorial force */
616 fix0 = _mm256_add_pd(fix0,tx);
617 fiy0 = _mm256_add_pd(fiy0,ty);
618 fiz0 = _mm256_add_pd(fiz0,tz);
620 fjx0 = _mm256_add_pd(fjx0,tx);
621 fjy0 = _mm256_add_pd(fjy0,ty);
622 fjz0 = _mm256_add_pd(fjz0,tz);
626 /**************************
627 * CALCULATE INTERACTIONS *
628 **************************/
630 if (gmx_mm256_any_lt(rsq10,rcutoff2))
633 r10 = _mm256_mul_pd(rsq10,rinv10);
634 r10 = _mm256_andnot_pd(dummy_mask,r10);
636 /* Compute parameters for interactions between i and j atoms */
637 qq10 = _mm256_mul_pd(iq1,jq0);
639 /* EWALD ELECTROSTATICS */
641 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
642 ewrt = _mm256_mul_pd(r10,ewtabscale);
643 ewitab = _mm256_cvttpd_epi32(ewrt);
644 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
645 ewitab = _mm_slli_epi32(ewitab,2);
646 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
647 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
648 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
649 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
650 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
651 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
652 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
653 velec = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
654 felec = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
656 cutoff_mask = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
658 /* Update potential sum for this i atom from the interaction with this j atom. */
659 velec = _mm256_and_pd(velec,cutoff_mask);
660 velec = _mm256_andnot_pd(dummy_mask,velec);
661 velecsum = _mm256_add_pd(velecsum,velec);
665 fscal = _mm256_and_pd(fscal,cutoff_mask);
667 fscal = _mm256_andnot_pd(dummy_mask,fscal);
669 /* Calculate temporary vectorial force */
670 tx = _mm256_mul_pd(fscal,dx10);
671 ty = _mm256_mul_pd(fscal,dy10);
672 tz = _mm256_mul_pd(fscal,dz10);
674 /* Update vectorial force */
675 fix1 = _mm256_add_pd(fix1,tx);
676 fiy1 = _mm256_add_pd(fiy1,ty);
677 fiz1 = _mm256_add_pd(fiz1,tz);
679 fjx0 = _mm256_add_pd(fjx0,tx);
680 fjy0 = _mm256_add_pd(fjy0,ty);
681 fjz0 = _mm256_add_pd(fjz0,tz);
685 /**************************
686 * CALCULATE INTERACTIONS *
687 **************************/
689 if (gmx_mm256_any_lt(rsq20,rcutoff2))
692 r20 = _mm256_mul_pd(rsq20,rinv20);
693 r20 = _mm256_andnot_pd(dummy_mask,r20);
695 /* Compute parameters for interactions between i and j atoms */
696 qq20 = _mm256_mul_pd(iq2,jq0);
698 /* EWALD ELECTROSTATICS */
700 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
701 ewrt = _mm256_mul_pd(r20,ewtabscale);
702 ewitab = _mm256_cvttpd_epi32(ewrt);
703 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
704 ewitab = _mm_slli_epi32(ewitab,2);
705 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
706 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
707 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
708 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
709 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
710 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
711 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
712 velec = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
713 felec = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
715 cutoff_mask = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
717 /* Update potential sum for this i atom from the interaction with this j atom. */
718 velec = _mm256_and_pd(velec,cutoff_mask);
719 velec = _mm256_andnot_pd(dummy_mask,velec);
720 velecsum = _mm256_add_pd(velecsum,velec);
724 fscal = _mm256_and_pd(fscal,cutoff_mask);
726 fscal = _mm256_andnot_pd(dummy_mask,fscal);
728 /* Calculate temporary vectorial force */
729 tx = _mm256_mul_pd(fscal,dx20);
730 ty = _mm256_mul_pd(fscal,dy20);
731 tz = _mm256_mul_pd(fscal,dz20);
733 /* Update vectorial force */
734 fix2 = _mm256_add_pd(fix2,tx);
735 fiy2 = _mm256_add_pd(fiy2,ty);
736 fiz2 = _mm256_add_pd(fiz2,tz);
738 fjx0 = _mm256_add_pd(fjx0,tx);
739 fjy0 = _mm256_add_pd(fjy0,ty);
740 fjz0 = _mm256_add_pd(fjz0,tz);
744 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
745 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
746 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
747 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
749 gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
751 /* Inner loop uses 180 flops */
754 /* End of innermost loop */
756 gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
757 f+i_coord_offset,fshift+i_shift_offset);
760 /* Update potential energies */
761 gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
762 gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
764 /* Increment number of inner iterations */
765 inneriter += j_index_end - j_index_start;
767 /* Outer loop uses 20 flops */
770 /* Increment number of outer iterations */
773 /* Update outer/inner flops */
775 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*180);
778 * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3P1_F_avx_256_double
779 * Electrostatics interaction: Ewald
780 * VdW interaction: LJEwald
781 * Geometry: Water3-Particle
782 * Calculate force/pot: Force
785 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3P1_F_avx_256_double
786 (t_nblist * gmx_restrict nlist,
787 rvec * gmx_restrict xx,
788 rvec * gmx_restrict ff,
789 struct t_forcerec * gmx_restrict fr,
790 t_mdatoms * gmx_restrict mdatoms,
791 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
792 t_nrnb * gmx_restrict nrnb)
794 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
795 * just 0 for non-waters.
796 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
797 * jnr indices corresponding to data put in the four positions in the SIMD register.
799 int i_shift_offset,i_coord_offset,outeriter,inneriter;
800 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
801 int jnrA,jnrB,jnrC,jnrD;
802 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
803 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
804 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
805 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
807 real *shiftvec,*fshift,*x,*f;
808 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
810 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
811 real * vdwioffsetptr0;
812 real * vdwgridioffsetptr0;
813 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
814 real * vdwioffsetptr1;
815 real * vdwgridioffsetptr1;
816 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
817 real * vdwioffsetptr2;
818 real * vdwgridioffsetptr2;
819 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
820 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
821 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
822 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
823 __m256d dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
824 __m256d dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
825 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
828 __m256d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
831 __m256d one_sixth = _mm256_set1_pd(1.0/6.0);
832 __m256d one_twelfth = _mm256_set1_pd(1.0/12.0);
837 __m256d ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
838 __m256d one_half = _mm256_set1_pd(0.5);
839 __m256d minus_one = _mm256_set1_pd(-1.0);
841 __m256d ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
842 __m256d beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
844 __m256d dummy_mask,cutoff_mask;
845 __m128 tmpmask0,tmpmask1;
846 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
847 __m256d one = _mm256_set1_pd(1.0);
848 __m256d two = _mm256_set1_pd(2.0);
854 jindex = nlist->jindex;
856 shiftidx = nlist->shift;
858 shiftvec = fr->shift_vec[0];
859 fshift = fr->fshift[0];
860 facel = _mm256_set1_pd(fr->ic->epsfac);
861 charge = mdatoms->chargeA;
862 nvdwtype = fr->ntype;
864 vdwtype = mdatoms->typeA;
865 vdwgridparam = fr->ljpme_c6grid;
866 sh_lj_ewald = _mm256_set1_pd(fr->ic->sh_lj_ewald);
867 ewclj = _mm256_set1_pd(fr->ic->ewaldcoeff_lj);
868 ewclj2 = _mm256_mul_pd(minus_one,_mm256_mul_pd(ewclj,ewclj));
870 sh_ewald = _mm256_set1_pd(fr->ic->sh_ewald);
871 beta = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
872 beta2 = _mm256_mul_pd(beta,beta);
873 beta3 = _mm256_mul_pd(beta,beta2);
875 ewtab = fr->ic->tabq_coul_F;
876 ewtabscale = _mm256_set1_pd(fr->ic->tabq_scale);
877 ewtabhalfspace = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
879 /* Setup water-specific parameters */
880 inr = nlist->iinr[0];
881 iq0 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
882 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
883 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
884 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
885 vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
887 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
888 rcutoff_scalar = fr->ic->rcoulomb;
889 rcutoff = _mm256_set1_pd(rcutoff_scalar);
890 rcutoff2 = _mm256_mul_pd(rcutoff,rcutoff);
892 sh_vdw_invrcut6 = _mm256_set1_pd(fr->ic->sh_invrc6);
893 rvdw = _mm256_set1_pd(fr->ic->rvdw);
895 /* Avoid stupid compiler warnings */
896 jnrA = jnrB = jnrC = jnrD = 0;
905 for(iidx=0;iidx<4*DIM;iidx++)
910 /* Start outer loop over neighborlists */
911 for(iidx=0; iidx<nri; iidx++)
913 /* Load shift vector for this list */
914 i_shift_offset = DIM*shiftidx[iidx];
916 /* Load limits for loop over neighbors */
917 j_index_start = jindex[iidx];
918 j_index_end = jindex[iidx+1];
920 /* Get outer coordinate index */
922 i_coord_offset = DIM*inr;
924 /* Load i particle coords and add shift vector */
925 gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
926 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
928 fix0 = _mm256_setzero_pd();
929 fiy0 = _mm256_setzero_pd();
930 fiz0 = _mm256_setzero_pd();
931 fix1 = _mm256_setzero_pd();
932 fiy1 = _mm256_setzero_pd();
933 fiz1 = _mm256_setzero_pd();
934 fix2 = _mm256_setzero_pd();
935 fiy2 = _mm256_setzero_pd();
936 fiz2 = _mm256_setzero_pd();
938 /* Start inner kernel loop */
939 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
942 /* Get j neighbor index, and coordinate index */
947 j_coord_offsetA = DIM*jnrA;
948 j_coord_offsetB = DIM*jnrB;
949 j_coord_offsetC = DIM*jnrC;
950 j_coord_offsetD = DIM*jnrD;
952 /* load j atom coordinates */
953 gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
954 x+j_coord_offsetC,x+j_coord_offsetD,
957 /* Calculate displacement vector */
958 dx00 = _mm256_sub_pd(ix0,jx0);
959 dy00 = _mm256_sub_pd(iy0,jy0);
960 dz00 = _mm256_sub_pd(iz0,jz0);
961 dx10 = _mm256_sub_pd(ix1,jx0);
962 dy10 = _mm256_sub_pd(iy1,jy0);
963 dz10 = _mm256_sub_pd(iz1,jz0);
964 dx20 = _mm256_sub_pd(ix2,jx0);
965 dy20 = _mm256_sub_pd(iy2,jy0);
966 dz20 = _mm256_sub_pd(iz2,jz0);
968 /* Calculate squared distance and things based on it */
969 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
970 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
971 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
973 rinv00 = avx256_invsqrt_d(rsq00);
974 rinv10 = avx256_invsqrt_d(rsq10);
975 rinv20 = avx256_invsqrt_d(rsq20);
977 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
978 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
979 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
981 /* Load parameters for j particles */
982 jq0 = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
983 charge+jnrC+0,charge+jnrD+0);
984 vdwjidx0A = 2*vdwtype[jnrA+0];
985 vdwjidx0B = 2*vdwtype[jnrB+0];
986 vdwjidx0C = 2*vdwtype[jnrC+0];
987 vdwjidx0D = 2*vdwtype[jnrD+0];
989 fjx0 = _mm256_setzero_pd();
990 fjy0 = _mm256_setzero_pd();
991 fjz0 = _mm256_setzero_pd();
993 /**************************
994 * CALCULATE INTERACTIONS *
995 **************************/
997 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1000 r00 = _mm256_mul_pd(rsq00,rinv00);
1002 /* Compute parameters for interactions between i and j atoms */
1003 qq00 = _mm256_mul_pd(iq0,jq0);
1004 gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
1005 vdwioffsetptr0+vdwjidx0B,
1006 vdwioffsetptr0+vdwjidx0C,
1007 vdwioffsetptr0+vdwjidx0D,
1010 c6grid_00 = gmx_mm256_load_4real_swizzle_pd(vdwgridioffsetptr0+vdwjidx0A,
1011 vdwgridioffsetptr0+vdwjidx0B,
1012 vdwgridioffsetptr0+vdwjidx0C,
1013 vdwgridioffsetptr0+vdwjidx0D);
1015 /* EWALD ELECTROSTATICS */
1017 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1018 ewrt = _mm256_mul_pd(r00,ewtabscale);
1019 ewitab = _mm256_cvttpd_epi32(ewrt);
1020 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1021 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1022 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1024 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1025 felec = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
1027 /* Analytical LJ-PME */
1028 rinvsix = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
1029 ewcljrsq = _mm256_mul_pd(ewclj2,rsq00);
1030 ewclj6 = _mm256_mul_pd(ewclj2,_mm256_mul_pd(ewclj2,ewclj2));
1031 exponent = avx256_exp_d(ewcljrsq);
1032 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1033 poly = _mm256_mul_pd(exponent,_mm256_add_pd(_mm256_sub_pd(one,ewcljrsq),_mm256_mul_pd(_mm256_mul_pd(ewcljrsq,ewcljrsq),one_half)));
1034 /* f6A = 6 * C6grid * (1 - poly) */
1035 f6A = _mm256_mul_pd(c6grid_00,_mm256_sub_pd(one,poly));
1036 /* f6B = C6grid * exponent * beta^6 */
1037 f6B = _mm256_mul_pd(_mm256_mul_pd(c6grid_00,one_sixth),_mm256_mul_pd(exponent,ewclj6));
1038 /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1039 fvdw = _mm256_mul_pd(_mm256_add_pd(_mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),_mm256_sub_pd(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1041 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1043 fscal = _mm256_add_pd(felec,fvdw);
1045 fscal = _mm256_and_pd(fscal,cutoff_mask);
1047 /* Calculate temporary vectorial force */
1048 tx = _mm256_mul_pd(fscal,dx00);
1049 ty = _mm256_mul_pd(fscal,dy00);
1050 tz = _mm256_mul_pd(fscal,dz00);
1052 /* Update vectorial force */
1053 fix0 = _mm256_add_pd(fix0,tx);
1054 fiy0 = _mm256_add_pd(fiy0,ty);
1055 fiz0 = _mm256_add_pd(fiz0,tz);
1057 fjx0 = _mm256_add_pd(fjx0,tx);
1058 fjy0 = _mm256_add_pd(fjy0,ty);
1059 fjz0 = _mm256_add_pd(fjz0,tz);
1063 /**************************
1064 * CALCULATE INTERACTIONS *
1065 **************************/
1067 if (gmx_mm256_any_lt(rsq10,rcutoff2))
1070 r10 = _mm256_mul_pd(rsq10,rinv10);
1072 /* Compute parameters for interactions between i and j atoms */
1073 qq10 = _mm256_mul_pd(iq1,jq0);
1075 /* EWALD ELECTROSTATICS */
1077 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1078 ewrt = _mm256_mul_pd(r10,ewtabscale);
1079 ewitab = _mm256_cvttpd_epi32(ewrt);
1080 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1081 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1082 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1084 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1085 felec = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
1087 cutoff_mask = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1091 fscal = _mm256_and_pd(fscal,cutoff_mask);
1093 /* Calculate temporary vectorial force */
1094 tx = _mm256_mul_pd(fscal,dx10);
1095 ty = _mm256_mul_pd(fscal,dy10);
1096 tz = _mm256_mul_pd(fscal,dz10);
1098 /* Update vectorial force */
1099 fix1 = _mm256_add_pd(fix1,tx);
1100 fiy1 = _mm256_add_pd(fiy1,ty);
1101 fiz1 = _mm256_add_pd(fiz1,tz);
1103 fjx0 = _mm256_add_pd(fjx0,tx);
1104 fjy0 = _mm256_add_pd(fjy0,ty);
1105 fjz0 = _mm256_add_pd(fjz0,tz);
1109 /**************************
1110 * CALCULATE INTERACTIONS *
1111 **************************/
1113 if (gmx_mm256_any_lt(rsq20,rcutoff2))
1116 r20 = _mm256_mul_pd(rsq20,rinv20);
1118 /* Compute parameters for interactions between i and j atoms */
1119 qq20 = _mm256_mul_pd(iq2,jq0);
1121 /* EWALD ELECTROSTATICS */
1123 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1124 ewrt = _mm256_mul_pd(r20,ewtabscale);
1125 ewitab = _mm256_cvttpd_epi32(ewrt);
1126 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1127 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1128 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1130 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1131 felec = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
1133 cutoff_mask = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1137 fscal = _mm256_and_pd(fscal,cutoff_mask);
1139 /* Calculate temporary vectorial force */
1140 tx = _mm256_mul_pd(fscal,dx20);
1141 ty = _mm256_mul_pd(fscal,dy20);
1142 tz = _mm256_mul_pd(fscal,dz20);
1144 /* Update vectorial force */
1145 fix2 = _mm256_add_pd(fix2,tx);
1146 fiy2 = _mm256_add_pd(fiy2,ty);
1147 fiz2 = _mm256_add_pd(fiz2,tz);
1149 fjx0 = _mm256_add_pd(fjx0,tx);
1150 fjy0 = _mm256_add_pd(fjy0,ty);
1151 fjz0 = _mm256_add_pd(fjz0,tz);
1155 fjptrA = f+j_coord_offsetA;
1156 fjptrB = f+j_coord_offsetB;
1157 fjptrC = f+j_coord_offsetC;
1158 fjptrD = f+j_coord_offsetD;
1160 gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
1162 /* Inner loop uses 143 flops */
1165 if(jidx<j_index_end)
1168 /* Get j neighbor index, and coordinate index */
1169 jnrlistA = jjnr[jidx];
1170 jnrlistB = jjnr[jidx+1];
1171 jnrlistC = jjnr[jidx+2];
1172 jnrlistD = jjnr[jidx+3];
1173 /* Sign of each element will be negative for non-real atoms.
1174 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1175 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
1177 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
1179 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
1180 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
1181 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
1183 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
1184 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
1185 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
1186 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
1187 j_coord_offsetA = DIM*jnrA;
1188 j_coord_offsetB = DIM*jnrB;
1189 j_coord_offsetC = DIM*jnrC;
1190 j_coord_offsetD = DIM*jnrD;
1192 /* load j atom coordinates */
1193 gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1194 x+j_coord_offsetC,x+j_coord_offsetD,
1197 /* Calculate displacement vector */
1198 dx00 = _mm256_sub_pd(ix0,jx0);
1199 dy00 = _mm256_sub_pd(iy0,jy0);
1200 dz00 = _mm256_sub_pd(iz0,jz0);
1201 dx10 = _mm256_sub_pd(ix1,jx0);
1202 dy10 = _mm256_sub_pd(iy1,jy0);
1203 dz10 = _mm256_sub_pd(iz1,jz0);
1204 dx20 = _mm256_sub_pd(ix2,jx0);
1205 dy20 = _mm256_sub_pd(iy2,jy0);
1206 dz20 = _mm256_sub_pd(iz2,jz0);
1208 /* Calculate squared distance and things based on it */
1209 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1210 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1211 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1213 rinv00 = avx256_invsqrt_d(rsq00);
1214 rinv10 = avx256_invsqrt_d(rsq10);
1215 rinv20 = avx256_invsqrt_d(rsq20);
1217 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
1218 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
1219 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
1221 /* Load parameters for j particles */
1222 jq0 = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
1223 charge+jnrC+0,charge+jnrD+0);
1224 vdwjidx0A = 2*vdwtype[jnrA+0];
1225 vdwjidx0B = 2*vdwtype[jnrB+0];
1226 vdwjidx0C = 2*vdwtype[jnrC+0];
1227 vdwjidx0D = 2*vdwtype[jnrD+0];
1229 fjx0 = _mm256_setzero_pd();
1230 fjy0 = _mm256_setzero_pd();
1231 fjz0 = _mm256_setzero_pd();
1233 /**************************
1234 * CALCULATE INTERACTIONS *
1235 **************************/
1237 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1240 r00 = _mm256_mul_pd(rsq00,rinv00);
1241 r00 = _mm256_andnot_pd(dummy_mask,r00);
1243 /* Compute parameters for interactions between i and j atoms */
1244 qq00 = _mm256_mul_pd(iq0,jq0);
1245 gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
1246 vdwioffsetptr0+vdwjidx0B,
1247 vdwioffsetptr0+vdwjidx0C,
1248 vdwioffsetptr0+vdwjidx0D,
1251 c6grid_00 = gmx_mm256_load_4real_swizzle_pd(vdwgridioffsetptr0+vdwjidx0A,
1252 vdwgridioffsetptr0+vdwjidx0B,
1253 vdwgridioffsetptr0+vdwjidx0C,
1254 vdwgridioffsetptr0+vdwjidx0D);
1256 /* EWALD ELECTROSTATICS */
1258 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1259 ewrt = _mm256_mul_pd(r00,ewtabscale);
1260 ewitab = _mm256_cvttpd_epi32(ewrt);
1261 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1262 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1263 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1265 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1266 felec = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
1268 /* Analytical LJ-PME */
1269 rinvsix = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
1270 ewcljrsq = _mm256_mul_pd(ewclj2,rsq00);
1271 ewclj6 = _mm256_mul_pd(ewclj2,_mm256_mul_pd(ewclj2,ewclj2));
1272 exponent = avx256_exp_d(ewcljrsq);
1273 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1274 poly = _mm256_mul_pd(exponent,_mm256_add_pd(_mm256_sub_pd(one,ewcljrsq),_mm256_mul_pd(_mm256_mul_pd(ewcljrsq,ewcljrsq),one_half)));
1275 /* f6A = 6 * C6grid * (1 - poly) */
1276 f6A = _mm256_mul_pd(c6grid_00,_mm256_sub_pd(one,poly));
1277 /* f6B = C6grid * exponent * beta^6 */
1278 f6B = _mm256_mul_pd(_mm256_mul_pd(c6grid_00,one_sixth),_mm256_mul_pd(exponent,ewclj6));
1279 /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1280 fvdw = _mm256_mul_pd(_mm256_add_pd(_mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),_mm256_sub_pd(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1282 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1284 fscal = _mm256_add_pd(felec,fvdw);
1286 fscal = _mm256_and_pd(fscal,cutoff_mask);
1288 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1290 /* Calculate temporary vectorial force */
1291 tx = _mm256_mul_pd(fscal,dx00);
1292 ty = _mm256_mul_pd(fscal,dy00);
1293 tz = _mm256_mul_pd(fscal,dz00);
1295 /* Update vectorial force */
1296 fix0 = _mm256_add_pd(fix0,tx);
1297 fiy0 = _mm256_add_pd(fiy0,ty);
1298 fiz0 = _mm256_add_pd(fiz0,tz);
1300 fjx0 = _mm256_add_pd(fjx0,tx);
1301 fjy0 = _mm256_add_pd(fjy0,ty);
1302 fjz0 = _mm256_add_pd(fjz0,tz);
1306 /**************************
1307 * CALCULATE INTERACTIONS *
1308 **************************/
1310 if (gmx_mm256_any_lt(rsq10,rcutoff2))
1313 r10 = _mm256_mul_pd(rsq10,rinv10);
1314 r10 = _mm256_andnot_pd(dummy_mask,r10);
1316 /* Compute parameters for interactions between i and j atoms */
1317 qq10 = _mm256_mul_pd(iq1,jq0);
1319 /* EWALD ELECTROSTATICS */
1321 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1322 ewrt = _mm256_mul_pd(r10,ewtabscale);
1323 ewitab = _mm256_cvttpd_epi32(ewrt);
1324 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1325 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1326 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1328 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1329 felec = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
1331 cutoff_mask = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1335 fscal = _mm256_and_pd(fscal,cutoff_mask);
1337 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1339 /* Calculate temporary vectorial force */
1340 tx = _mm256_mul_pd(fscal,dx10);
1341 ty = _mm256_mul_pd(fscal,dy10);
1342 tz = _mm256_mul_pd(fscal,dz10);
1344 /* Update vectorial force */
1345 fix1 = _mm256_add_pd(fix1,tx);
1346 fiy1 = _mm256_add_pd(fiy1,ty);
1347 fiz1 = _mm256_add_pd(fiz1,tz);
1349 fjx0 = _mm256_add_pd(fjx0,tx);
1350 fjy0 = _mm256_add_pd(fjy0,ty);
1351 fjz0 = _mm256_add_pd(fjz0,tz);
1355 /**************************
1356 * CALCULATE INTERACTIONS *
1357 **************************/
1359 if (gmx_mm256_any_lt(rsq20,rcutoff2))
1362 r20 = _mm256_mul_pd(rsq20,rinv20);
1363 r20 = _mm256_andnot_pd(dummy_mask,r20);
1365 /* Compute parameters for interactions between i and j atoms */
1366 qq20 = _mm256_mul_pd(iq2,jq0);
1368 /* EWALD ELECTROSTATICS */
1370 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1371 ewrt = _mm256_mul_pd(r20,ewtabscale);
1372 ewitab = _mm256_cvttpd_epi32(ewrt);
1373 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1374 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1375 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1377 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1378 felec = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
1380 cutoff_mask = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1384 fscal = _mm256_and_pd(fscal,cutoff_mask);
1386 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1388 /* Calculate temporary vectorial force */
1389 tx = _mm256_mul_pd(fscal,dx20);
1390 ty = _mm256_mul_pd(fscal,dy20);
1391 tz = _mm256_mul_pd(fscal,dz20);
1393 /* Update vectorial force */
1394 fix2 = _mm256_add_pd(fix2,tx);
1395 fiy2 = _mm256_add_pd(fiy2,ty);
1396 fiz2 = _mm256_add_pd(fiz2,tz);
1398 fjx0 = _mm256_add_pd(fjx0,tx);
1399 fjy0 = _mm256_add_pd(fjy0,ty);
1400 fjz0 = _mm256_add_pd(fjz0,tz);
1404 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1405 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1406 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1407 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1409 gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,fjx0,fjy0,fjz0);
1411 /* Inner loop uses 146 flops */
1414 /* End of innermost loop */
1416 gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1417 f+i_coord_offset,fshift+i_shift_offset);
1419 /* Increment number of inner iterations */
1420 inneriter += j_index_end - j_index_start;
1422 /* Outer loop uses 18 flops */
1425 /* Increment number of outer iterations */
1428 /* Update outer/inner flops */
1430 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*146);