2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_double kernel generator.
44 #include "../nb_kernel.h"
45 #include "gromacs/gmxlib/nrnb.h"
47 #include "kernelutil_x86_avx_256_double.h"
50 * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_VF_avx_256_double
51 * Electrostatics interaction: Ewald
52 * VdW interaction: LJEwald
53 * Geometry: Water4-Water4
54 * Calculate force/pot: PotentialAndForce
57 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_VF_avx_256_double
58 (t_nblist * gmx_restrict nlist,
59 rvec * gmx_restrict xx,
60 rvec * gmx_restrict ff,
61 struct t_forcerec * gmx_restrict fr,
62 t_mdatoms * gmx_restrict mdatoms,
63 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
64 t_nrnb * gmx_restrict nrnb)
66 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
67 * just 0 for non-waters.
68 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
69 * jnr indices corresponding to data put in the four positions in the SIMD register.
71 int i_shift_offset,i_coord_offset,outeriter,inneriter;
72 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
73 int jnrA,jnrB,jnrC,jnrD;
74 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
75 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
76 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
77 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
79 real *shiftvec,*fshift,*x,*f;
80 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
82 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
83 real * vdwioffsetptr0;
84 real * vdwgridioffsetptr0;
85 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
86 real * vdwioffsetptr1;
87 real * vdwgridioffsetptr1;
88 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
89 real * vdwioffsetptr2;
90 real * vdwgridioffsetptr2;
91 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
92 real * vdwioffsetptr3;
93 real * vdwgridioffsetptr3;
94 __m256d ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
95 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
96 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
97 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
98 __m256d jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
99 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
100 __m256d jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
101 int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
102 __m256d jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
103 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
104 __m256d dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
105 __m256d dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
106 __m256d dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
107 __m256d dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
108 __m256d dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
109 __m256d dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
110 __m256d dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
111 __m256d dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
112 __m256d dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
113 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
116 __m256d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
119 __m256d one_sixth = _mm256_set1_pd(1.0/6.0);
120 __m256d one_twelfth = _mm256_set1_pd(1.0/12.0);
132 __m256d ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
133 __m256d one_half = _mm256_set1_pd(0.5);
134 __m256d minus_one = _mm256_set1_pd(-1.0);
136 __m256d ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
137 __m256d beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
139 __m256d dummy_mask,cutoff_mask;
140 __m128 tmpmask0,tmpmask1;
141 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
142 __m256d one = _mm256_set1_pd(1.0);
143 __m256d two = _mm256_set1_pd(2.0);
149 jindex = nlist->jindex;
151 shiftidx = nlist->shift;
153 shiftvec = fr->shift_vec[0];
154 fshift = fr->fshift[0];
155 facel = _mm256_set1_pd(fr->ic->epsfac);
156 charge = mdatoms->chargeA;
157 nvdwtype = fr->ntype;
159 vdwtype = mdatoms->typeA;
160 vdwgridparam = fr->ljpme_c6grid;
161 sh_lj_ewald = _mm256_set1_pd(fr->ic->sh_lj_ewald);
162 ewclj = _mm256_set1_pd(fr->ic->ewaldcoeff_lj);
163 ewclj2 = _mm256_mul_pd(minus_one,_mm256_mul_pd(ewclj,ewclj));
165 sh_ewald = _mm256_set1_pd(fr->ic->sh_ewald);
166 beta = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
167 beta2 = _mm256_mul_pd(beta,beta);
168 beta3 = _mm256_mul_pd(beta,beta2);
170 ewtab = fr->ic->tabq_coul_FDV0;
171 ewtabscale = _mm256_set1_pd(fr->ic->tabq_scale);
172 ewtabhalfspace = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
174 /* Setup water-specific parameters */
175 inr = nlist->iinr[0];
176 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
177 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
178 iq3 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
179 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
180 vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
182 jq1 = _mm256_set1_pd(charge[inr+1]);
183 jq2 = _mm256_set1_pd(charge[inr+2]);
184 jq3 = _mm256_set1_pd(charge[inr+3]);
185 vdwjidx0A = 2*vdwtype[inr+0];
186 c6_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
187 c12_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
188 c6grid_00 = _mm256_set1_pd(vdwgridioffsetptr0[vdwjidx0A]);
189 qq11 = _mm256_mul_pd(iq1,jq1);
190 qq12 = _mm256_mul_pd(iq1,jq2);
191 qq13 = _mm256_mul_pd(iq1,jq3);
192 qq21 = _mm256_mul_pd(iq2,jq1);
193 qq22 = _mm256_mul_pd(iq2,jq2);
194 qq23 = _mm256_mul_pd(iq2,jq3);
195 qq31 = _mm256_mul_pd(iq3,jq1);
196 qq32 = _mm256_mul_pd(iq3,jq2);
197 qq33 = _mm256_mul_pd(iq3,jq3);
199 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
200 rcutoff_scalar = fr->ic->rcoulomb;
201 rcutoff = _mm256_set1_pd(rcutoff_scalar);
202 rcutoff2 = _mm256_mul_pd(rcutoff,rcutoff);
204 sh_vdw_invrcut6 = _mm256_set1_pd(fr->ic->sh_invrc6);
205 rvdw = _mm256_set1_pd(fr->ic->rvdw);
207 /* Avoid stupid compiler warnings */
208 jnrA = jnrB = jnrC = jnrD = 0;
217 for(iidx=0;iidx<4*DIM;iidx++)
222 /* Start outer loop over neighborlists */
223 for(iidx=0; iidx<nri; iidx++)
225 /* Load shift vector for this list */
226 i_shift_offset = DIM*shiftidx[iidx];
228 /* Load limits for loop over neighbors */
229 j_index_start = jindex[iidx];
230 j_index_end = jindex[iidx+1];
232 /* Get outer coordinate index */
234 i_coord_offset = DIM*inr;
236 /* Load i particle coords and add shift vector */
237 gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
238 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
240 fix0 = _mm256_setzero_pd();
241 fiy0 = _mm256_setzero_pd();
242 fiz0 = _mm256_setzero_pd();
243 fix1 = _mm256_setzero_pd();
244 fiy1 = _mm256_setzero_pd();
245 fiz1 = _mm256_setzero_pd();
246 fix2 = _mm256_setzero_pd();
247 fiy2 = _mm256_setzero_pd();
248 fiz2 = _mm256_setzero_pd();
249 fix3 = _mm256_setzero_pd();
250 fiy3 = _mm256_setzero_pd();
251 fiz3 = _mm256_setzero_pd();
253 /* Reset potential sums */
254 velecsum = _mm256_setzero_pd();
255 vvdwsum = _mm256_setzero_pd();
257 /* Start inner kernel loop */
258 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
261 /* Get j neighbor index, and coordinate index */
266 j_coord_offsetA = DIM*jnrA;
267 j_coord_offsetB = DIM*jnrB;
268 j_coord_offsetC = DIM*jnrC;
269 j_coord_offsetD = DIM*jnrD;
271 /* load j atom coordinates */
272 gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
273 x+j_coord_offsetC,x+j_coord_offsetD,
274 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
275 &jy2,&jz2,&jx3,&jy3,&jz3);
277 /* Calculate displacement vector */
278 dx00 = _mm256_sub_pd(ix0,jx0);
279 dy00 = _mm256_sub_pd(iy0,jy0);
280 dz00 = _mm256_sub_pd(iz0,jz0);
281 dx11 = _mm256_sub_pd(ix1,jx1);
282 dy11 = _mm256_sub_pd(iy1,jy1);
283 dz11 = _mm256_sub_pd(iz1,jz1);
284 dx12 = _mm256_sub_pd(ix1,jx2);
285 dy12 = _mm256_sub_pd(iy1,jy2);
286 dz12 = _mm256_sub_pd(iz1,jz2);
287 dx13 = _mm256_sub_pd(ix1,jx3);
288 dy13 = _mm256_sub_pd(iy1,jy3);
289 dz13 = _mm256_sub_pd(iz1,jz3);
290 dx21 = _mm256_sub_pd(ix2,jx1);
291 dy21 = _mm256_sub_pd(iy2,jy1);
292 dz21 = _mm256_sub_pd(iz2,jz1);
293 dx22 = _mm256_sub_pd(ix2,jx2);
294 dy22 = _mm256_sub_pd(iy2,jy2);
295 dz22 = _mm256_sub_pd(iz2,jz2);
296 dx23 = _mm256_sub_pd(ix2,jx3);
297 dy23 = _mm256_sub_pd(iy2,jy3);
298 dz23 = _mm256_sub_pd(iz2,jz3);
299 dx31 = _mm256_sub_pd(ix3,jx1);
300 dy31 = _mm256_sub_pd(iy3,jy1);
301 dz31 = _mm256_sub_pd(iz3,jz1);
302 dx32 = _mm256_sub_pd(ix3,jx2);
303 dy32 = _mm256_sub_pd(iy3,jy2);
304 dz32 = _mm256_sub_pd(iz3,jz2);
305 dx33 = _mm256_sub_pd(ix3,jx3);
306 dy33 = _mm256_sub_pd(iy3,jy3);
307 dz33 = _mm256_sub_pd(iz3,jz3);
309 /* Calculate squared distance and things based on it */
310 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
311 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
312 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
313 rsq13 = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
314 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
315 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
316 rsq23 = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
317 rsq31 = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
318 rsq32 = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
319 rsq33 = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
321 rinv00 = avx256_invsqrt_d(rsq00);
322 rinv11 = avx256_invsqrt_d(rsq11);
323 rinv12 = avx256_invsqrt_d(rsq12);
324 rinv13 = avx256_invsqrt_d(rsq13);
325 rinv21 = avx256_invsqrt_d(rsq21);
326 rinv22 = avx256_invsqrt_d(rsq22);
327 rinv23 = avx256_invsqrt_d(rsq23);
328 rinv31 = avx256_invsqrt_d(rsq31);
329 rinv32 = avx256_invsqrt_d(rsq32);
330 rinv33 = avx256_invsqrt_d(rsq33);
332 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
333 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
334 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
335 rinvsq13 = _mm256_mul_pd(rinv13,rinv13);
336 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
337 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
338 rinvsq23 = _mm256_mul_pd(rinv23,rinv23);
339 rinvsq31 = _mm256_mul_pd(rinv31,rinv31);
340 rinvsq32 = _mm256_mul_pd(rinv32,rinv32);
341 rinvsq33 = _mm256_mul_pd(rinv33,rinv33);
343 fjx0 = _mm256_setzero_pd();
344 fjy0 = _mm256_setzero_pd();
345 fjz0 = _mm256_setzero_pd();
346 fjx1 = _mm256_setzero_pd();
347 fjy1 = _mm256_setzero_pd();
348 fjz1 = _mm256_setzero_pd();
349 fjx2 = _mm256_setzero_pd();
350 fjy2 = _mm256_setzero_pd();
351 fjz2 = _mm256_setzero_pd();
352 fjx3 = _mm256_setzero_pd();
353 fjy3 = _mm256_setzero_pd();
354 fjz3 = _mm256_setzero_pd();
356 /**************************
357 * CALCULATE INTERACTIONS *
358 **************************/
360 if (gmx_mm256_any_lt(rsq00,rcutoff2))
363 r00 = _mm256_mul_pd(rsq00,rinv00);
365 /* Analytical LJ-PME */
366 rinvsix = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
367 ewcljrsq = _mm256_mul_pd(ewclj2,rsq00);
368 ewclj6 = _mm256_mul_pd(ewclj2,_mm256_mul_pd(ewclj2,ewclj2));
369 exponent = avx256_exp_d(ewcljrsq);
370 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
371 poly = _mm256_mul_pd(exponent,_mm256_add_pd(_mm256_sub_pd(one,ewcljrsq),_mm256_mul_pd(_mm256_mul_pd(ewcljrsq,ewcljrsq),one_half)));
372 /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
373 vvdw6 = _mm256_mul_pd(_mm256_sub_pd(c6_00,_mm256_mul_pd(c6grid_00,_mm256_sub_pd(one,poly))),rinvsix);
374 vvdw12 = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
375 vvdw = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
376 _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_add_pd(_mm256_mul_pd(c6_00,sh_vdw_invrcut6),_mm256_mul_pd(c6grid_00,sh_lj_ewald))),one_sixth));
377 /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
378 fvdw = _mm256_mul_pd(_mm256_sub_pd(vvdw12,_mm256_sub_pd(vvdw6,_mm256_mul_pd(_mm256_mul_pd(c6grid_00,one_sixth),_mm256_mul_pd(exponent,ewclj6)))),rinvsq00);
380 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
382 /* Update potential sum for this i atom from the interaction with this j atom. */
383 vvdw = _mm256_and_pd(vvdw,cutoff_mask);
384 vvdwsum = _mm256_add_pd(vvdwsum,vvdw);
388 fscal = _mm256_and_pd(fscal,cutoff_mask);
390 /* Calculate temporary vectorial force */
391 tx = _mm256_mul_pd(fscal,dx00);
392 ty = _mm256_mul_pd(fscal,dy00);
393 tz = _mm256_mul_pd(fscal,dz00);
395 /* Update vectorial force */
396 fix0 = _mm256_add_pd(fix0,tx);
397 fiy0 = _mm256_add_pd(fiy0,ty);
398 fiz0 = _mm256_add_pd(fiz0,tz);
400 fjx0 = _mm256_add_pd(fjx0,tx);
401 fjy0 = _mm256_add_pd(fjy0,ty);
402 fjz0 = _mm256_add_pd(fjz0,tz);
406 /**************************
407 * CALCULATE INTERACTIONS *
408 **************************/
410 if (gmx_mm256_any_lt(rsq11,rcutoff2))
413 r11 = _mm256_mul_pd(rsq11,rinv11);
415 /* EWALD ELECTROSTATICS */
417 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
418 ewrt = _mm256_mul_pd(r11,ewtabscale);
419 ewitab = _mm256_cvttpd_epi32(ewrt);
420 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
421 ewitab = _mm_slli_epi32(ewitab,2);
422 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
423 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
424 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
425 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
426 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
427 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
428 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
429 velec = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_sub_pd(rinv11,sh_ewald),velec));
430 felec = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
432 cutoff_mask = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
434 /* Update potential sum for this i atom from the interaction with this j atom. */
435 velec = _mm256_and_pd(velec,cutoff_mask);
436 velecsum = _mm256_add_pd(velecsum,velec);
440 fscal = _mm256_and_pd(fscal,cutoff_mask);
442 /* Calculate temporary vectorial force */
443 tx = _mm256_mul_pd(fscal,dx11);
444 ty = _mm256_mul_pd(fscal,dy11);
445 tz = _mm256_mul_pd(fscal,dz11);
447 /* Update vectorial force */
448 fix1 = _mm256_add_pd(fix1,tx);
449 fiy1 = _mm256_add_pd(fiy1,ty);
450 fiz1 = _mm256_add_pd(fiz1,tz);
452 fjx1 = _mm256_add_pd(fjx1,tx);
453 fjy1 = _mm256_add_pd(fjy1,ty);
454 fjz1 = _mm256_add_pd(fjz1,tz);
458 /**************************
459 * CALCULATE INTERACTIONS *
460 **************************/
462 if (gmx_mm256_any_lt(rsq12,rcutoff2))
465 r12 = _mm256_mul_pd(rsq12,rinv12);
467 /* EWALD ELECTROSTATICS */
469 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
470 ewrt = _mm256_mul_pd(r12,ewtabscale);
471 ewitab = _mm256_cvttpd_epi32(ewrt);
472 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
473 ewitab = _mm_slli_epi32(ewitab,2);
474 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
475 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
476 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
477 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
478 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
479 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
480 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
481 velec = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_sub_pd(rinv12,sh_ewald),velec));
482 felec = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
484 cutoff_mask = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
486 /* Update potential sum for this i atom from the interaction with this j atom. */
487 velec = _mm256_and_pd(velec,cutoff_mask);
488 velecsum = _mm256_add_pd(velecsum,velec);
492 fscal = _mm256_and_pd(fscal,cutoff_mask);
494 /* Calculate temporary vectorial force */
495 tx = _mm256_mul_pd(fscal,dx12);
496 ty = _mm256_mul_pd(fscal,dy12);
497 tz = _mm256_mul_pd(fscal,dz12);
499 /* Update vectorial force */
500 fix1 = _mm256_add_pd(fix1,tx);
501 fiy1 = _mm256_add_pd(fiy1,ty);
502 fiz1 = _mm256_add_pd(fiz1,tz);
504 fjx2 = _mm256_add_pd(fjx2,tx);
505 fjy2 = _mm256_add_pd(fjy2,ty);
506 fjz2 = _mm256_add_pd(fjz2,tz);
510 /**************************
511 * CALCULATE INTERACTIONS *
512 **************************/
514 if (gmx_mm256_any_lt(rsq13,rcutoff2))
517 r13 = _mm256_mul_pd(rsq13,rinv13);
519 /* EWALD ELECTROSTATICS */
521 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
522 ewrt = _mm256_mul_pd(r13,ewtabscale);
523 ewitab = _mm256_cvttpd_epi32(ewrt);
524 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
525 ewitab = _mm_slli_epi32(ewitab,2);
526 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
527 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
528 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
529 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
530 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
531 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
532 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
533 velec = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_sub_pd(rinv13,sh_ewald),velec));
534 felec = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
536 cutoff_mask = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
538 /* Update potential sum for this i atom from the interaction with this j atom. */
539 velec = _mm256_and_pd(velec,cutoff_mask);
540 velecsum = _mm256_add_pd(velecsum,velec);
544 fscal = _mm256_and_pd(fscal,cutoff_mask);
546 /* Calculate temporary vectorial force */
547 tx = _mm256_mul_pd(fscal,dx13);
548 ty = _mm256_mul_pd(fscal,dy13);
549 tz = _mm256_mul_pd(fscal,dz13);
551 /* Update vectorial force */
552 fix1 = _mm256_add_pd(fix1,tx);
553 fiy1 = _mm256_add_pd(fiy1,ty);
554 fiz1 = _mm256_add_pd(fiz1,tz);
556 fjx3 = _mm256_add_pd(fjx3,tx);
557 fjy3 = _mm256_add_pd(fjy3,ty);
558 fjz3 = _mm256_add_pd(fjz3,tz);
562 /**************************
563 * CALCULATE INTERACTIONS *
564 **************************/
566 if (gmx_mm256_any_lt(rsq21,rcutoff2))
569 r21 = _mm256_mul_pd(rsq21,rinv21);
571 /* EWALD ELECTROSTATICS */
573 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
574 ewrt = _mm256_mul_pd(r21,ewtabscale);
575 ewitab = _mm256_cvttpd_epi32(ewrt);
576 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
577 ewitab = _mm_slli_epi32(ewitab,2);
578 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
579 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
580 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
581 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
582 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
583 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
584 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
585 velec = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_sub_pd(rinv21,sh_ewald),velec));
586 felec = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
588 cutoff_mask = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
590 /* Update potential sum for this i atom from the interaction with this j atom. */
591 velec = _mm256_and_pd(velec,cutoff_mask);
592 velecsum = _mm256_add_pd(velecsum,velec);
596 fscal = _mm256_and_pd(fscal,cutoff_mask);
598 /* Calculate temporary vectorial force */
599 tx = _mm256_mul_pd(fscal,dx21);
600 ty = _mm256_mul_pd(fscal,dy21);
601 tz = _mm256_mul_pd(fscal,dz21);
603 /* Update vectorial force */
604 fix2 = _mm256_add_pd(fix2,tx);
605 fiy2 = _mm256_add_pd(fiy2,ty);
606 fiz2 = _mm256_add_pd(fiz2,tz);
608 fjx1 = _mm256_add_pd(fjx1,tx);
609 fjy1 = _mm256_add_pd(fjy1,ty);
610 fjz1 = _mm256_add_pd(fjz1,tz);
614 /**************************
615 * CALCULATE INTERACTIONS *
616 **************************/
618 if (gmx_mm256_any_lt(rsq22,rcutoff2))
621 r22 = _mm256_mul_pd(rsq22,rinv22);
623 /* EWALD ELECTROSTATICS */
625 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
626 ewrt = _mm256_mul_pd(r22,ewtabscale);
627 ewitab = _mm256_cvttpd_epi32(ewrt);
628 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
629 ewitab = _mm_slli_epi32(ewitab,2);
630 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
631 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
632 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
633 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
634 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
635 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
636 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
637 velec = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_sub_pd(rinv22,sh_ewald),velec));
638 felec = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
640 cutoff_mask = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
642 /* Update potential sum for this i atom from the interaction with this j atom. */
643 velec = _mm256_and_pd(velec,cutoff_mask);
644 velecsum = _mm256_add_pd(velecsum,velec);
648 fscal = _mm256_and_pd(fscal,cutoff_mask);
650 /* Calculate temporary vectorial force */
651 tx = _mm256_mul_pd(fscal,dx22);
652 ty = _mm256_mul_pd(fscal,dy22);
653 tz = _mm256_mul_pd(fscal,dz22);
655 /* Update vectorial force */
656 fix2 = _mm256_add_pd(fix2,tx);
657 fiy2 = _mm256_add_pd(fiy2,ty);
658 fiz2 = _mm256_add_pd(fiz2,tz);
660 fjx2 = _mm256_add_pd(fjx2,tx);
661 fjy2 = _mm256_add_pd(fjy2,ty);
662 fjz2 = _mm256_add_pd(fjz2,tz);
666 /**************************
667 * CALCULATE INTERACTIONS *
668 **************************/
670 if (gmx_mm256_any_lt(rsq23,rcutoff2))
673 r23 = _mm256_mul_pd(rsq23,rinv23);
675 /* EWALD ELECTROSTATICS */
677 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
678 ewrt = _mm256_mul_pd(r23,ewtabscale);
679 ewitab = _mm256_cvttpd_epi32(ewrt);
680 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
681 ewitab = _mm_slli_epi32(ewitab,2);
682 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
683 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
684 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
685 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
686 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
687 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
688 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
689 velec = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_sub_pd(rinv23,sh_ewald),velec));
690 felec = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
692 cutoff_mask = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
694 /* Update potential sum for this i atom from the interaction with this j atom. */
695 velec = _mm256_and_pd(velec,cutoff_mask);
696 velecsum = _mm256_add_pd(velecsum,velec);
700 fscal = _mm256_and_pd(fscal,cutoff_mask);
702 /* Calculate temporary vectorial force */
703 tx = _mm256_mul_pd(fscal,dx23);
704 ty = _mm256_mul_pd(fscal,dy23);
705 tz = _mm256_mul_pd(fscal,dz23);
707 /* Update vectorial force */
708 fix2 = _mm256_add_pd(fix2,tx);
709 fiy2 = _mm256_add_pd(fiy2,ty);
710 fiz2 = _mm256_add_pd(fiz2,tz);
712 fjx3 = _mm256_add_pd(fjx3,tx);
713 fjy3 = _mm256_add_pd(fjy3,ty);
714 fjz3 = _mm256_add_pd(fjz3,tz);
718 /**************************
719 * CALCULATE INTERACTIONS *
720 **************************/
722 if (gmx_mm256_any_lt(rsq31,rcutoff2))
725 r31 = _mm256_mul_pd(rsq31,rinv31);
727 /* EWALD ELECTROSTATICS */
729 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
730 ewrt = _mm256_mul_pd(r31,ewtabscale);
731 ewitab = _mm256_cvttpd_epi32(ewrt);
732 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
733 ewitab = _mm_slli_epi32(ewitab,2);
734 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
735 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
736 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
737 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
738 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
739 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
740 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
741 velec = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_sub_pd(rinv31,sh_ewald),velec));
742 felec = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
744 cutoff_mask = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
746 /* Update potential sum for this i atom from the interaction with this j atom. */
747 velec = _mm256_and_pd(velec,cutoff_mask);
748 velecsum = _mm256_add_pd(velecsum,velec);
752 fscal = _mm256_and_pd(fscal,cutoff_mask);
754 /* Calculate temporary vectorial force */
755 tx = _mm256_mul_pd(fscal,dx31);
756 ty = _mm256_mul_pd(fscal,dy31);
757 tz = _mm256_mul_pd(fscal,dz31);
759 /* Update vectorial force */
760 fix3 = _mm256_add_pd(fix3,tx);
761 fiy3 = _mm256_add_pd(fiy3,ty);
762 fiz3 = _mm256_add_pd(fiz3,tz);
764 fjx1 = _mm256_add_pd(fjx1,tx);
765 fjy1 = _mm256_add_pd(fjy1,ty);
766 fjz1 = _mm256_add_pd(fjz1,tz);
770 /**************************
771 * CALCULATE INTERACTIONS *
772 **************************/
774 if (gmx_mm256_any_lt(rsq32,rcutoff2))
777 r32 = _mm256_mul_pd(rsq32,rinv32);
779 /* EWALD ELECTROSTATICS */
781 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
782 ewrt = _mm256_mul_pd(r32,ewtabscale);
783 ewitab = _mm256_cvttpd_epi32(ewrt);
784 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
785 ewitab = _mm_slli_epi32(ewitab,2);
786 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
787 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
788 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
789 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
790 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
791 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
792 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
793 velec = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_sub_pd(rinv32,sh_ewald),velec));
794 felec = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
796 cutoff_mask = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
798 /* Update potential sum for this i atom from the interaction with this j atom. */
799 velec = _mm256_and_pd(velec,cutoff_mask);
800 velecsum = _mm256_add_pd(velecsum,velec);
804 fscal = _mm256_and_pd(fscal,cutoff_mask);
806 /* Calculate temporary vectorial force */
807 tx = _mm256_mul_pd(fscal,dx32);
808 ty = _mm256_mul_pd(fscal,dy32);
809 tz = _mm256_mul_pd(fscal,dz32);
811 /* Update vectorial force */
812 fix3 = _mm256_add_pd(fix3,tx);
813 fiy3 = _mm256_add_pd(fiy3,ty);
814 fiz3 = _mm256_add_pd(fiz3,tz);
816 fjx2 = _mm256_add_pd(fjx2,tx);
817 fjy2 = _mm256_add_pd(fjy2,ty);
818 fjz2 = _mm256_add_pd(fjz2,tz);
822 /**************************
823 * CALCULATE INTERACTIONS *
824 **************************/
826 if (gmx_mm256_any_lt(rsq33,rcutoff2))
829 r33 = _mm256_mul_pd(rsq33,rinv33);
831 /* EWALD ELECTROSTATICS */
833 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
834 ewrt = _mm256_mul_pd(r33,ewtabscale);
835 ewitab = _mm256_cvttpd_epi32(ewrt);
836 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
837 ewitab = _mm_slli_epi32(ewitab,2);
838 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
839 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
840 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
841 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
842 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
843 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
844 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
845 velec = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_sub_pd(rinv33,sh_ewald),velec));
846 felec = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
848 cutoff_mask = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
850 /* Update potential sum for this i atom from the interaction with this j atom. */
851 velec = _mm256_and_pd(velec,cutoff_mask);
852 velecsum = _mm256_add_pd(velecsum,velec);
856 fscal = _mm256_and_pd(fscal,cutoff_mask);
858 /* Calculate temporary vectorial force */
859 tx = _mm256_mul_pd(fscal,dx33);
860 ty = _mm256_mul_pd(fscal,dy33);
861 tz = _mm256_mul_pd(fscal,dz33);
863 /* Update vectorial force */
864 fix3 = _mm256_add_pd(fix3,tx);
865 fiy3 = _mm256_add_pd(fiy3,ty);
866 fiz3 = _mm256_add_pd(fiz3,tz);
868 fjx3 = _mm256_add_pd(fjx3,tx);
869 fjy3 = _mm256_add_pd(fjy3,ty);
870 fjz3 = _mm256_add_pd(fjz3,tz);
874 fjptrA = f+j_coord_offsetA;
875 fjptrB = f+j_coord_offsetB;
876 fjptrC = f+j_coord_offsetC;
877 fjptrD = f+j_coord_offsetD;
879 gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
880 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
881 fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
883 /* Inner loop uses 479 flops */
889 /* Get j neighbor index, and coordinate index */
890 jnrlistA = jjnr[jidx];
891 jnrlistB = jjnr[jidx+1];
892 jnrlistC = jjnr[jidx+2];
893 jnrlistD = jjnr[jidx+3];
894 /* Sign of each element will be negative for non-real atoms.
895 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
896 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
898 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
900 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
901 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
902 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
904 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
905 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
906 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
907 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
908 j_coord_offsetA = DIM*jnrA;
909 j_coord_offsetB = DIM*jnrB;
910 j_coord_offsetC = DIM*jnrC;
911 j_coord_offsetD = DIM*jnrD;
913 /* load j atom coordinates */
914 gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
915 x+j_coord_offsetC,x+j_coord_offsetD,
916 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
917 &jy2,&jz2,&jx3,&jy3,&jz3);
919 /* Calculate displacement vector */
920 dx00 = _mm256_sub_pd(ix0,jx0);
921 dy00 = _mm256_sub_pd(iy0,jy0);
922 dz00 = _mm256_sub_pd(iz0,jz0);
923 dx11 = _mm256_sub_pd(ix1,jx1);
924 dy11 = _mm256_sub_pd(iy1,jy1);
925 dz11 = _mm256_sub_pd(iz1,jz1);
926 dx12 = _mm256_sub_pd(ix1,jx2);
927 dy12 = _mm256_sub_pd(iy1,jy2);
928 dz12 = _mm256_sub_pd(iz1,jz2);
929 dx13 = _mm256_sub_pd(ix1,jx3);
930 dy13 = _mm256_sub_pd(iy1,jy3);
931 dz13 = _mm256_sub_pd(iz1,jz3);
932 dx21 = _mm256_sub_pd(ix2,jx1);
933 dy21 = _mm256_sub_pd(iy2,jy1);
934 dz21 = _mm256_sub_pd(iz2,jz1);
935 dx22 = _mm256_sub_pd(ix2,jx2);
936 dy22 = _mm256_sub_pd(iy2,jy2);
937 dz22 = _mm256_sub_pd(iz2,jz2);
938 dx23 = _mm256_sub_pd(ix2,jx3);
939 dy23 = _mm256_sub_pd(iy2,jy3);
940 dz23 = _mm256_sub_pd(iz2,jz3);
941 dx31 = _mm256_sub_pd(ix3,jx1);
942 dy31 = _mm256_sub_pd(iy3,jy1);
943 dz31 = _mm256_sub_pd(iz3,jz1);
944 dx32 = _mm256_sub_pd(ix3,jx2);
945 dy32 = _mm256_sub_pd(iy3,jy2);
946 dz32 = _mm256_sub_pd(iz3,jz2);
947 dx33 = _mm256_sub_pd(ix3,jx3);
948 dy33 = _mm256_sub_pd(iy3,jy3);
949 dz33 = _mm256_sub_pd(iz3,jz3);
951 /* Calculate squared distance and things based on it */
952 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
953 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
954 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
955 rsq13 = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
956 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
957 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
958 rsq23 = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
959 rsq31 = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
960 rsq32 = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
961 rsq33 = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
963 rinv00 = avx256_invsqrt_d(rsq00);
964 rinv11 = avx256_invsqrt_d(rsq11);
965 rinv12 = avx256_invsqrt_d(rsq12);
966 rinv13 = avx256_invsqrt_d(rsq13);
967 rinv21 = avx256_invsqrt_d(rsq21);
968 rinv22 = avx256_invsqrt_d(rsq22);
969 rinv23 = avx256_invsqrt_d(rsq23);
970 rinv31 = avx256_invsqrt_d(rsq31);
971 rinv32 = avx256_invsqrt_d(rsq32);
972 rinv33 = avx256_invsqrt_d(rsq33);
974 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
975 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
976 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
977 rinvsq13 = _mm256_mul_pd(rinv13,rinv13);
978 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
979 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
980 rinvsq23 = _mm256_mul_pd(rinv23,rinv23);
981 rinvsq31 = _mm256_mul_pd(rinv31,rinv31);
982 rinvsq32 = _mm256_mul_pd(rinv32,rinv32);
983 rinvsq33 = _mm256_mul_pd(rinv33,rinv33);
985 fjx0 = _mm256_setzero_pd();
986 fjy0 = _mm256_setzero_pd();
987 fjz0 = _mm256_setzero_pd();
988 fjx1 = _mm256_setzero_pd();
989 fjy1 = _mm256_setzero_pd();
990 fjz1 = _mm256_setzero_pd();
991 fjx2 = _mm256_setzero_pd();
992 fjy2 = _mm256_setzero_pd();
993 fjz2 = _mm256_setzero_pd();
994 fjx3 = _mm256_setzero_pd();
995 fjy3 = _mm256_setzero_pd();
996 fjz3 = _mm256_setzero_pd();
998 /**************************
999 * CALCULATE INTERACTIONS *
1000 **************************/
1002 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1005 r00 = _mm256_mul_pd(rsq00,rinv00);
1006 r00 = _mm256_andnot_pd(dummy_mask,r00);
1008 /* Analytical LJ-PME */
1009 rinvsix = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
1010 ewcljrsq = _mm256_mul_pd(ewclj2,rsq00);
1011 ewclj6 = _mm256_mul_pd(ewclj2,_mm256_mul_pd(ewclj2,ewclj2));
1012 exponent = avx256_exp_d(ewcljrsq);
1013 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1014 poly = _mm256_mul_pd(exponent,_mm256_add_pd(_mm256_sub_pd(one,ewcljrsq),_mm256_mul_pd(_mm256_mul_pd(ewcljrsq,ewcljrsq),one_half)));
1015 /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
1016 vvdw6 = _mm256_mul_pd(_mm256_sub_pd(c6_00,_mm256_mul_pd(c6grid_00,_mm256_sub_pd(one,poly))),rinvsix);
1017 vvdw12 = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
1018 vvdw = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
1019 _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_add_pd(_mm256_mul_pd(c6_00,sh_vdw_invrcut6),_mm256_mul_pd(c6grid_00,sh_lj_ewald))),one_sixth));
1020 /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
1021 fvdw = _mm256_mul_pd(_mm256_sub_pd(vvdw12,_mm256_sub_pd(vvdw6,_mm256_mul_pd(_mm256_mul_pd(c6grid_00,one_sixth),_mm256_mul_pd(exponent,ewclj6)))),rinvsq00);
1023 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1025 /* Update potential sum for this i atom from the interaction with this j atom. */
1026 vvdw = _mm256_and_pd(vvdw,cutoff_mask);
1027 vvdw = _mm256_andnot_pd(dummy_mask,vvdw);
1028 vvdwsum = _mm256_add_pd(vvdwsum,vvdw);
1032 fscal = _mm256_and_pd(fscal,cutoff_mask);
1034 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1036 /* Calculate temporary vectorial force */
1037 tx = _mm256_mul_pd(fscal,dx00);
1038 ty = _mm256_mul_pd(fscal,dy00);
1039 tz = _mm256_mul_pd(fscal,dz00);
1041 /* Update vectorial force */
1042 fix0 = _mm256_add_pd(fix0,tx);
1043 fiy0 = _mm256_add_pd(fiy0,ty);
1044 fiz0 = _mm256_add_pd(fiz0,tz);
1046 fjx0 = _mm256_add_pd(fjx0,tx);
1047 fjy0 = _mm256_add_pd(fjy0,ty);
1048 fjz0 = _mm256_add_pd(fjz0,tz);
1052 /**************************
1053 * CALCULATE INTERACTIONS *
1054 **************************/
1056 if (gmx_mm256_any_lt(rsq11,rcutoff2))
1059 r11 = _mm256_mul_pd(rsq11,rinv11);
1060 r11 = _mm256_andnot_pd(dummy_mask,r11);
1062 /* EWALD ELECTROSTATICS */
1064 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1065 ewrt = _mm256_mul_pd(r11,ewtabscale);
1066 ewitab = _mm256_cvttpd_epi32(ewrt);
1067 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1068 ewitab = _mm_slli_epi32(ewitab,2);
1069 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1070 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1071 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1072 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1073 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1074 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1075 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1076 velec = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_sub_pd(rinv11,sh_ewald),velec));
1077 felec = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
1079 cutoff_mask = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
1081 /* Update potential sum for this i atom from the interaction with this j atom. */
1082 velec = _mm256_and_pd(velec,cutoff_mask);
1083 velec = _mm256_andnot_pd(dummy_mask,velec);
1084 velecsum = _mm256_add_pd(velecsum,velec);
1088 fscal = _mm256_and_pd(fscal,cutoff_mask);
1090 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1092 /* Calculate temporary vectorial force */
1093 tx = _mm256_mul_pd(fscal,dx11);
1094 ty = _mm256_mul_pd(fscal,dy11);
1095 tz = _mm256_mul_pd(fscal,dz11);
1097 /* Update vectorial force */
1098 fix1 = _mm256_add_pd(fix1,tx);
1099 fiy1 = _mm256_add_pd(fiy1,ty);
1100 fiz1 = _mm256_add_pd(fiz1,tz);
1102 fjx1 = _mm256_add_pd(fjx1,tx);
1103 fjy1 = _mm256_add_pd(fjy1,ty);
1104 fjz1 = _mm256_add_pd(fjz1,tz);
1108 /**************************
1109 * CALCULATE INTERACTIONS *
1110 **************************/
1112 if (gmx_mm256_any_lt(rsq12,rcutoff2))
1115 r12 = _mm256_mul_pd(rsq12,rinv12);
1116 r12 = _mm256_andnot_pd(dummy_mask,r12);
1118 /* EWALD ELECTROSTATICS */
1120 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1121 ewrt = _mm256_mul_pd(r12,ewtabscale);
1122 ewitab = _mm256_cvttpd_epi32(ewrt);
1123 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1124 ewitab = _mm_slli_epi32(ewitab,2);
1125 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1126 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1127 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1128 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1129 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1130 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1131 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1132 velec = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_sub_pd(rinv12,sh_ewald),velec));
1133 felec = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
1135 cutoff_mask = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
1137 /* Update potential sum for this i atom from the interaction with this j atom. */
1138 velec = _mm256_and_pd(velec,cutoff_mask);
1139 velec = _mm256_andnot_pd(dummy_mask,velec);
1140 velecsum = _mm256_add_pd(velecsum,velec);
1144 fscal = _mm256_and_pd(fscal,cutoff_mask);
1146 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1148 /* Calculate temporary vectorial force */
1149 tx = _mm256_mul_pd(fscal,dx12);
1150 ty = _mm256_mul_pd(fscal,dy12);
1151 tz = _mm256_mul_pd(fscal,dz12);
1153 /* Update vectorial force */
1154 fix1 = _mm256_add_pd(fix1,tx);
1155 fiy1 = _mm256_add_pd(fiy1,ty);
1156 fiz1 = _mm256_add_pd(fiz1,tz);
1158 fjx2 = _mm256_add_pd(fjx2,tx);
1159 fjy2 = _mm256_add_pd(fjy2,ty);
1160 fjz2 = _mm256_add_pd(fjz2,tz);
1164 /**************************
1165 * CALCULATE INTERACTIONS *
1166 **************************/
1168 if (gmx_mm256_any_lt(rsq13,rcutoff2))
1171 r13 = _mm256_mul_pd(rsq13,rinv13);
1172 r13 = _mm256_andnot_pd(dummy_mask,r13);
1174 /* EWALD ELECTROSTATICS */
1176 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1177 ewrt = _mm256_mul_pd(r13,ewtabscale);
1178 ewitab = _mm256_cvttpd_epi32(ewrt);
1179 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1180 ewitab = _mm_slli_epi32(ewitab,2);
1181 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1182 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1183 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1184 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1185 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1186 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1187 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1188 velec = _mm256_mul_pd(qq13,_mm256_sub_pd(_mm256_sub_pd(rinv13,sh_ewald),velec));
1189 felec = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
1191 cutoff_mask = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
1193 /* Update potential sum for this i atom from the interaction with this j atom. */
1194 velec = _mm256_and_pd(velec,cutoff_mask);
1195 velec = _mm256_andnot_pd(dummy_mask,velec);
1196 velecsum = _mm256_add_pd(velecsum,velec);
1200 fscal = _mm256_and_pd(fscal,cutoff_mask);
1202 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1204 /* Calculate temporary vectorial force */
1205 tx = _mm256_mul_pd(fscal,dx13);
1206 ty = _mm256_mul_pd(fscal,dy13);
1207 tz = _mm256_mul_pd(fscal,dz13);
1209 /* Update vectorial force */
1210 fix1 = _mm256_add_pd(fix1,tx);
1211 fiy1 = _mm256_add_pd(fiy1,ty);
1212 fiz1 = _mm256_add_pd(fiz1,tz);
1214 fjx3 = _mm256_add_pd(fjx3,tx);
1215 fjy3 = _mm256_add_pd(fjy3,ty);
1216 fjz3 = _mm256_add_pd(fjz3,tz);
1220 /**************************
1221 * CALCULATE INTERACTIONS *
1222 **************************/
1224 if (gmx_mm256_any_lt(rsq21,rcutoff2))
1227 r21 = _mm256_mul_pd(rsq21,rinv21);
1228 r21 = _mm256_andnot_pd(dummy_mask,r21);
1230 /* EWALD ELECTROSTATICS */
1232 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1233 ewrt = _mm256_mul_pd(r21,ewtabscale);
1234 ewitab = _mm256_cvttpd_epi32(ewrt);
1235 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1236 ewitab = _mm_slli_epi32(ewitab,2);
1237 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1238 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1239 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1240 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1241 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1242 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1243 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1244 velec = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_sub_pd(rinv21,sh_ewald),velec));
1245 felec = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
1247 cutoff_mask = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
1249 /* Update potential sum for this i atom from the interaction with this j atom. */
1250 velec = _mm256_and_pd(velec,cutoff_mask);
1251 velec = _mm256_andnot_pd(dummy_mask,velec);
1252 velecsum = _mm256_add_pd(velecsum,velec);
1256 fscal = _mm256_and_pd(fscal,cutoff_mask);
1258 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1260 /* Calculate temporary vectorial force */
1261 tx = _mm256_mul_pd(fscal,dx21);
1262 ty = _mm256_mul_pd(fscal,dy21);
1263 tz = _mm256_mul_pd(fscal,dz21);
1265 /* Update vectorial force */
1266 fix2 = _mm256_add_pd(fix2,tx);
1267 fiy2 = _mm256_add_pd(fiy2,ty);
1268 fiz2 = _mm256_add_pd(fiz2,tz);
1270 fjx1 = _mm256_add_pd(fjx1,tx);
1271 fjy1 = _mm256_add_pd(fjy1,ty);
1272 fjz1 = _mm256_add_pd(fjz1,tz);
1276 /**************************
1277 * CALCULATE INTERACTIONS *
1278 **************************/
1280 if (gmx_mm256_any_lt(rsq22,rcutoff2))
1283 r22 = _mm256_mul_pd(rsq22,rinv22);
1284 r22 = _mm256_andnot_pd(dummy_mask,r22);
1286 /* EWALD ELECTROSTATICS */
1288 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1289 ewrt = _mm256_mul_pd(r22,ewtabscale);
1290 ewitab = _mm256_cvttpd_epi32(ewrt);
1291 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1292 ewitab = _mm_slli_epi32(ewitab,2);
1293 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1294 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1295 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1296 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1297 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1298 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1299 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1300 velec = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_sub_pd(rinv22,sh_ewald),velec));
1301 felec = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
1303 cutoff_mask = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
1305 /* Update potential sum for this i atom from the interaction with this j atom. */
1306 velec = _mm256_and_pd(velec,cutoff_mask);
1307 velec = _mm256_andnot_pd(dummy_mask,velec);
1308 velecsum = _mm256_add_pd(velecsum,velec);
1312 fscal = _mm256_and_pd(fscal,cutoff_mask);
1314 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1316 /* Calculate temporary vectorial force */
1317 tx = _mm256_mul_pd(fscal,dx22);
1318 ty = _mm256_mul_pd(fscal,dy22);
1319 tz = _mm256_mul_pd(fscal,dz22);
1321 /* Update vectorial force */
1322 fix2 = _mm256_add_pd(fix2,tx);
1323 fiy2 = _mm256_add_pd(fiy2,ty);
1324 fiz2 = _mm256_add_pd(fiz2,tz);
1326 fjx2 = _mm256_add_pd(fjx2,tx);
1327 fjy2 = _mm256_add_pd(fjy2,ty);
1328 fjz2 = _mm256_add_pd(fjz2,tz);
1332 /**************************
1333 * CALCULATE INTERACTIONS *
1334 **************************/
1336 if (gmx_mm256_any_lt(rsq23,rcutoff2))
1339 r23 = _mm256_mul_pd(rsq23,rinv23);
1340 r23 = _mm256_andnot_pd(dummy_mask,r23);
1342 /* EWALD ELECTROSTATICS */
1344 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1345 ewrt = _mm256_mul_pd(r23,ewtabscale);
1346 ewitab = _mm256_cvttpd_epi32(ewrt);
1347 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1348 ewitab = _mm_slli_epi32(ewitab,2);
1349 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1350 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1351 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1352 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1353 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1354 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1355 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1356 velec = _mm256_mul_pd(qq23,_mm256_sub_pd(_mm256_sub_pd(rinv23,sh_ewald),velec));
1357 felec = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
1359 cutoff_mask = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
1361 /* Update potential sum for this i atom from the interaction with this j atom. */
1362 velec = _mm256_and_pd(velec,cutoff_mask);
1363 velec = _mm256_andnot_pd(dummy_mask,velec);
1364 velecsum = _mm256_add_pd(velecsum,velec);
1368 fscal = _mm256_and_pd(fscal,cutoff_mask);
1370 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1372 /* Calculate temporary vectorial force */
1373 tx = _mm256_mul_pd(fscal,dx23);
1374 ty = _mm256_mul_pd(fscal,dy23);
1375 tz = _mm256_mul_pd(fscal,dz23);
1377 /* Update vectorial force */
1378 fix2 = _mm256_add_pd(fix2,tx);
1379 fiy2 = _mm256_add_pd(fiy2,ty);
1380 fiz2 = _mm256_add_pd(fiz2,tz);
1382 fjx3 = _mm256_add_pd(fjx3,tx);
1383 fjy3 = _mm256_add_pd(fjy3,ty);
1384 fjz3 = _mm256_add_pd(fjz3,tz);
1388 /**************************
1389 * CALCULATE INTERACTIONS *
1390 **************************/
1392 if (gmx_mm256_any_lt(rsq31,rcutoff2))
1395 r31 = _mm256_mul_pd(rsq31,rinv31);
1396 r31 = _mm256_andnot_pd(dummy_mask,r31);
1398 /* EWALD ELECTROSTATICS */
1400 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1401 ewrt = _mm256_mul_pd(r31,ewtabscale);
1402 ewitab = _mm256_cvttpd_epi32(ewrt);
1403 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1404 ewitab = _mm_slli_epi32(ewitab,2);
1405 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1406 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1407 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1408 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1409 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1410 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1411 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1412 velec = _mm256_mul_pd(qq31,_mm256_sub_pd(_mm256_sub_pd(rinv31,sh_ewald),velec));
1413 felec = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
1415 cutoff_mask = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
1417 /* Update potential sum for this i atom from the interaction with this j atom. */
1418 velec = _mm256_and_pd(velec,cutoff_mask);
1419 velec = _mm256_andnot_pd(dummy_mask,velec);
1420 velecsum = _mm256_add_pd(velecsum,velec);
1424 fscal = _mm256_and_pd(fscal,cutoff_mask);
1426 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1428 /* Calculate temporary vectorial force */
1429 tx = _mm256_mul_pd(fscal,dx31);
1430 ty = _mm256_mul_pd(fscal,dy31);
1431 tz = _mm256_mul_pd(fscal,dz31);
1433 /* Update vectorial force */
1434 fix3 = _mm256_add_pd(fix3,tx);
1435 fiy3 = _mm256_add_pd(fiy3,ty);
1436 fiz3 = _mm256_add_pd(fiz3,tz);
1438 fjx1 = _mm256_add_pd(fjx1,tx);
1439 fjy1 = _mm256_add_pd(fjy1,ty);
1440 fjz1 = _mm256_add_pd(fjz1,tz);
1444 /**************************
1445 * CALCULATE INTERACTIONS *
1446 **************************/
1448 if (gmx_mm256_any_lt(rsq32,rcutoff2))
1451 r32 = _mm256_mul_pd(rsq32,rinv32);
1452 r32 = _mm256_andnot_pd(dummy_mask,r32);
1454 /* EWALD ELECTROSTATICS */
1456 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1457 ewrt = _mm256_mul_pd(r32,ewtabscale);
1458 ewitab = _mm256_cvttpd_epi32(ewrt);
1459 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1460 ewitab = _mm_slli_epi32(ewitab,2);
1461 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1462 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1463 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1464 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1465 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1466 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1467 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1468 velec = _mm256_mul_pd(qq32,_mm256_sub_pd(_mm256_sub_pd(rinv32,sh_ewald),velec));
1469 felec = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
1471 cutoff_mask = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
1473 /* Update potential sum for this i atom from the interaction with this j atom. */
1474 velec = _mm256_and_pd(velec,cutoff_mask);
1475 velec = _mm256_andnot_pd(dummy_mask,velec);
1476 velecsum = _mm256_add_pd(velecsum,velec);
1480 fscal = _mm256_and_pd(fscal,cutoff_mask);
1482 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1484 /* Calculate temporary vectorial force */
1485 tx = _mm256_mul_pd(fscal,dx32);
1486 ty = _mm256_mul_pd(fscal,dy32);
1487 tz = _mm256_mul_pd(fscal,dz32);
1489 /* Update vectorial force */
1490 fix3 = _mm256_add_pd(fix3,tx);
1491 fiy3 = _mm256_add_pd(fiy3,ty);
1492 fiz3 = _mm256_add_pd(fiz3,tz);
1494 fjx2 = _mm256_add_pd(fjx2,tx);
1495 fjy2 = _mm256_add_pd(fjy2,ty);
1496 fjz2 = _mm256_add_pd(fjz2,tz);
1500 /**************************
1501 * CALCULATE INTERACTIONS *
1502 **************************/
1504 if (gmx_mm256_any_lt(rsq33,rcutoff2))
1507 r33 = _mm256_mul_pd(rsq33,rinv33);
1508 r33 = _mm256_andnot_pd(dummy_mask,r33);
1510 /* EWALD ELECTROSTATICS */
1512 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1513 ewrt = _mm256_mul_pd(r33,ewtabscale);
1514 ewitab = _mm256_cvttpd_epi32(ewrt);
1515 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1516 ewitab = _mm_slli_epi32(ewitab,2);
1517 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1518 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1519 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1520 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1521 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1522 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1523 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1524 velec = _mm256_mul_pd(qq33,_mm256_sub_pd(_mm256_sub_pd(rinv33,sh_ewald),velec));
1525 felec = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
1527 cutoff_mask = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
1529 /* Update potential sum for this i atom from the interaction with this j atom. */
1530 velec = _mm256_and_pd(velec,cutoff_mask);
1531 velec = _mm256_andnot_pd(dummy_mask,velec);
1532 velecsum = _mm256_add_pd(velecsum,velec);
1536 fscal = _mm256_and_pd(fscal,cutoff_mask);
1538 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1540 /* Calculate temporary vectorial force */
1541 tx = _mm256_mul_pd(fscal,dx33);
1542 ty = _mm256_mul_pd(fscal,dy33);
1543 tz = _mm256_mul_pd(fscal,dz33);
1545 /* Update vectorial force */
1546 fix3 = _mm256_add_pd(fix3,tx);
1547 fiy3 = _mm256_add_pd(fiy3,ty);
1548 fiz3 = _mm256_add_pd(fiz3,tz);
1550 fjx3 = _mm256_add_pd(fjx3,tx);
1551 fjy3 = _mm256_add_pd(fjy3,ty);
1552 fjz3 = _mm256_add_pd(fjz3,tz);
1556 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1557 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1558 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1559 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1561 gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1562 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
1563 fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1565 /* Inner loop uses 489 flops */
1568 /* End of innermost loop */
1570 gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1571 f+i_coord_offset,fshift+i_shift_offset);
1574 /* Update potential energies */
1575 gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1576 gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
1578 /* Increment number of inner iterations */
1579 inneriter += j_index_end - j_index_start;
1581 /* Outer loop uses 26 flops */
1584 /* Increment number of outer iterations */
1587 /* Update outer/inner flops */
1589 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*489);
1592 * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_F_avx_256_double
1593 * Electrostatics interaction: Ewald
1594 * VdW interaction: LJEwald
1595 * Geometry: Water4-Water4
1596 * Calculate force/pot: Force
1599 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_F_avx_256_double
1600 (t_nblist * gmx_restrict nlist,
1601 rvec * gmx_restrict xx,
1602 rvec * gmx_restrict ff,
1603 struct t_forcerec * gmx_restrict fr,
1604 t_mdatoms * gmx_restrict mdatoms,
1605 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1606 t_nrnb * gmx_restrict nrnb)
1608 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
1609 * just 0 for non-waters.
1610 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1611 * jnr indices corresponding to data put in the four positions in the SIMD register.
1613 int i_shift_offset,i_coord_offset,outeriter,inneriter;
1614 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1615 int jnrA,jnrB,jnrC,jnrD;
1616 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1617 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1618 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1619 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
1620 real rcutoff_scalar;
1621 real *shiftvec,*fshift,*x,*f;
1622 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1623 real scratch[4*DIM];
1624 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1625 real * vdwioffsetptr0;
1626 real * vdwgridioffsetptr0;
1627 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1628 real * vdwioffsetptr1;
1629 real * vdwgridioffsetptr1;
1630 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1631 real * vdwioffsetptr2;
1632 real * vdwgridioffsetptr2;
1633 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1634 real * vdwioffsetptr3;
1635 real * vdwgridioffsetptr3;
1636 __m256d ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1637 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
1638 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1639 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1640 __m256d jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1641 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1642 __m256d jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1643 int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
1644 __m256d jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
1645 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1646 __m256d dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1647 __m256d dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1648 __m256d dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
1649 __m256d dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1650 __m256d dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1651 __m256d dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
1652 __m256d dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
1653 __m256d dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
1654 __m256d dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
1655 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
1658 __m256d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1661 __m256d one_sixth = _mm256_set1_pd(1.0/6.0);
1662 __m256d one_twelfth = _mm256_set1_pd(1.0/12.0);
1674 __m256d ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
1675 __m256d one_half = _mm256_set1_pd(0.5);
1676 __m256d minus_one = _mm256_set1_pd(-1.0);
1678 __m256d ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1679 __m256d beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1681 __m256d dummy_mask,cutoff_mask;
1682 __m128 tmpmask0,tmpmask1;
1683 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1684 __m256d one = _mm256_set1_pd(1.0);
1685 __m256d two = _mm256_set1_pd(2.0);
1691 jindex = nlist->jindex;
1693 shiftidx = nlist->shift;
1695 shiftvec = fr->shift_vec[0];
1696 fshift = fr->fshift[0];
1697 facel = _mm256_set1_pd(fr->ic->epsfac);
1698 charge = mdatoms->chargeA;
1699 nvdwtype = fr->ntype;
1700 vdwparam = fr->nbfp;
1701 vdwtype = mdatoms->typeA;
1702 vdwgridparam = fr->ljpme_c6grid;
1703 sh_lj_ewald = _mm256_set1_pd(fr->ic->sh_lj_ewald);
1704 ewclj = _mm256_set1_pd(fr->ic->ewaldcoeff_lj);
1705 ewclj2 = _mm256_mul_pd(minus_one,_mm256_mul_pd(ewclj,ewclj));
1707 sh_ewald = _mm256_set1_pd(fr->ic->sh_ewald);
1708 beta = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
1709 beta2 = _mm256_mul_pd(beta,beta);
1710 beta3 = _mm256_mul_pd(beta,beta2);
1712 ewtab = fr->ic->tabq_coul_F;
1713 ewtabscale = _mm256_set1_pd(fr->ic->tabq_scale);
1714 ewtabhalfspace = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
1716 /* Setup water-specific parameters */
1717 inr = nlist->iinr[0];
1718 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1719 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1720 iq3 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
1721 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
1722 vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
1724 jq1 = _mm256_set1_pd(charge[inr+1]);
1725 jq2 = _mm256_set1_pd(charge[inr+2]);
1726 jq3 = _mm256_set1_pd(charge[inr+3]);
1727 vdwjidx0A = 2*vdwtype[inr+0];
1728 c6_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
1729 c12_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
1730 c6grid_00 = _mm256_set1_pd(vdwgridioffsetptr0[vdwjidx0A]);
1731 qq11 = _mm256_mul_pd(iq1,jq1);
1732 qq12 = _mm256_mul_pd(iq1,jq2);
1733 qq13 = _mm256_mul_pd(iq1,jq3);
1734 qq21 = _mm256_mul_pd(iq2,jq1);
1735 qq22 = _mm256_mul_pd(iq2,jq2);
1736 qq23 = _mm256_mul_pd(iq2,jq3);
1737 qq31 = _mm256_mul_pd(iq3,jq1);
1738 qq32 = _mm256_mul_pd(iq3,jq2);
1739 qq33 = _mm256_mul_pd(iq3,jq3);
1741 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1742 rcutoff_scalar = fr->ic->rcoulomb;
1743 rcutoff = _mm256_set1_pd(rcutoff_scalar);
1744 rcutoff2 = _mm256_mul_pd(rcutoff,rcutoff);
1746 sh_vdw_invrcut6 = _mm256_set1_pd(fr->ic->sh_invrc6);
1747 rvdw = _mm256_set1_pd(fr->ic->rvdw);
1749 /* Avoid stupid compiler warnings */
1750 jnrA = jnrB = jnrC = jnrD = 0;
1751 j_coord_offsetA = 0;
1752 j_coord_offsetB = 0;
1753 j_coord_offsetC = 0;
1754 j_coord_offsetD = 0;
1759 for(iidx=0;iidx<4*DIM;iidx++)
1761 scratch[iidx] = 0.0;
1764 /* Start outer loop over neighborlists */
1765 for(iidx=0; iidx<nri; iidx++)
1767 /* Load shift vector for this list */
1768 i_shift_offset = DIM*shiftidx[iidx];
1770 /* Load limits for loop over neighbors */
1771 j_index_start = jindex[iidx];
1772 j_index_end = jindex[iidx+1];
1774 /* Get outer coordinate index */
1776 i_coord_offset = DIM*inr;
1778 /* Load i particle coords and add shift vector */
1779 gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1780 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1782 fix0 = _mm256_setzero_pd();
1783 fiy0 = _mm256_setzero_pd();
1784 fiz0 = _mm256_setzero_pd();
1785 fix1 = _mm256_setzero_pd();
1786 fiy1 = _mm256_setzero_pd();
1787 fiz1 = _mm256_setzero_pd();
1788 fix2 = _mm256_setzero_pd();
1789 fiy2 = _mm256_setzero_pd();
1790 fiz2 = _mm256_setzero_pd();
1791 fix3 = _mm256_setzero_pd();
1792 fiy3 = _mm256_setzero_pd();
1793 fiz3 = _mm256_setzero_pd();
1795 /* Start inner kernel loop */
1796 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1799 /* Get j neighbor index, and coordinate index */
1801 jnrB = jjnr[jidx+1];
1802 jnrC = jjnr[jidx+2];
1803 jnrD = jjnr[jidx+3];
1804 j_coord_offsetA = DIM*jnrA;
1805 j_coord_offsetB = DIM*jnrB;
1806 j_coord_offsetC = DIM*jnrC;
1807 j_coord_offsetD = DIM*jnrD;
1809 /* load j atom coordinates */
1810 gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1811 x+j_coord_offsetC,x+j_coord_offsetD,
1812 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1813 &jy2,&jz2,&jx3,&jy3,&jz3);
1815 /* Calculate displacement vector */
1816 dx00 = _mm256_sub_pd(ix0,jx0);
1817 dy00 = _mm256_sub_pd(iy0,jy0);
1818 dz00 = _mm256_sub_pd(iz0,jz0);
1819 dx11 = _mm256_sub_pd(ix1,jx1);
1820 dy11 = _mm256_sub_pd(iy1,jy1);
1821 dz11 = _mm256_sub_pd(iz1,jz1);
1822 dx12 = _mm256_sub_pd(ix1,jx2);
1823 dy12 = _mm256_sub_pd(iy1,jy2);
1824 dz12 = _mm256_sub_pd(iz1,jz2);
1825 dx13 = _mm256_sub_pd(ix1,jx3);
1826 dy13 = _mm256_sub_pd(iy1,jy3);
1827 dz13 = _mm256_sub_pd(iz1,jz3);
1828 dx21 = _mm256_sub_pd(ix2,jx1);
1829 dy21 = _mm256_sub_pd(iy2,jy1);
1830 dz21 = _mm256_sub_pd(iz2,jz1);
1831 dx22 = _mm256_sub_pd(ix2,jx2);
1832 dy22 = _mm256_sub_pd(iy2,jy2);
1833 dz22 = _mm256_sub_pd(iz2,jz2);
1834 dx23 = _mm256_sub_pd(ix2,jx3);
1835 dy23 = _mm256_sub_pd(iy2,jy3);
1836 dz23 = _mm256_sub_pd(iz2,jz3);
1837 dx31 = _mm256_sub_pd(ix3,jx1);
1838 dy31 = _mm256_sub_pd(iy3,jy1);
1839 dz31 = _mm256_sub_pd(iz3,jz1);
1840 dx32 = _mm256_sub_pd(ix3,jx2);
1841 dy32 = _mm256_sub_pd(iy3,jy2);
1842 dz32 = _mm256_sub_pd(iz3,jz2);
1843 dx33 = _mm256_sub_pd(ix3,jx3);
1844 dy33 = _mm256_sub_pd(iy3,jy3);
1845 dz33 = _mm256_sub_pd(iz3,jz3);
1847 /* Calculate squared distance and things based on it */
1848 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1849 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1850 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1851 rsq13 = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
1852 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1853 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1854 rsq23 = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
1855 rsq31 = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
1856 rsq32 = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
1857 rsq33 = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
1859 rinv00 = avx256_invsqrt_d(rsq00);
1860 rinv11 = avx256_invsqrt_d(rsq11);
1861 rinv12 = avx256_invsqrt_d(rsq12);
1862 rinv13 = avx256_invsqrt_d(rsq13);
1863 rinv21 = avx256_invsqrt_d(rsq21);
1864 rinv22 = avx256_invsqrt_d(rsq22);
1865 rinv23 = avx256_invsqrt_d(rsq23);
1866 rinv31 = avx256_invsqrt_d(rsq31);
1867 rinv32 = avx256_invsqrt_d(rsq32);
1868 rinv33 = avx256_invsqrt_d(rsq33);
1870 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
1871 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
1872 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
1873 rinvsq13 = _mm256_mul_pd(rinv13,rinv13);
1874 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
1875 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
1876 rinvsq23 = _mm256_mul_pd(rinv23,rinv23);
1877 rinvsq31 = _mm256_mul_pd(rinv31,rinv31);
1878 rinvsq32 = _mm256_mul_pd(rinv32,rinv32);
1879 rinvsq33 = _mm256_mul_pd(rinv33,rinv33);
1881 fjx0 = _mm256_setzero_pd();
1882 fjy0 = _mm256_setzero_pd();
1883 fjz0 = _mm256_setzero_pd();
1884 fjx1 = _mm256_setzero_pd();
1885 fjy1 = _mm256_setzero_pd();
1886 fjz1 = _mm256_setzero_pd();
1887 fjx2 = _mm256_setzero_pd();
1888 fjy2 = _mm256_setzero_pd();
1889 fjz2 = _mm256_setzero_pd();
1890 fjx3 = _mm256_setzero_pd();
1891 fjy3 = _mm256_setzero_pd();
1892 fjz3 = _mm256_setzero_pd();
1894 /**************************
1895 * CALCULATE INTERACTIONS *
1896 **************************/
1898 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1901 r00 = _mm256_mul_pd(rsq00,rinv00);
1903 /* Analytical LJ-PME */
1904 rinvsix = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
1905 ewcljrsq = _mm256_mul_pd(ewclj2,rsq00);
1906 ewclj6 = _mm256_mul_pd(ewclj2,_mm256_mul_pd(ewclj2,ewclj2));
1907 exponent = avx256_exp_d(ewcljrsq);
1908 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1909 poly = _mm256_mul_pd(exponent,_mm256_add_pd(_mm256_sub_pd(one,ewcljrsq),_mm256_mul_pd(_mm256_mul_pd(ewcljrsq,ewcljrsq),one_half)));
1910 /* f6A = 6 * C6grid * (1 - poly) */
1911 f6A = _mm256_mul_pd(c6grid_00,_mm256_sub_pd(one,poly));
1912 /* f6B = C6grid * exponent * beta^6 */
1913 f6B = _mm256_mul_pd(_mm256_mul_pd(c6grid_00,one_sixth),_mm256_mul_pd(exponent,ewclj6));
1914 /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1915 fvdw = _mm256_mul_pd(_mm256_add_pd(_mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),_mm256_sub_pd(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1917 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1921 fscal = _mm256_and_pd(fscal,cutoff_mask);
1923 /* Calculate temporary vectorial force */
1924 tx = _mm256_mul_pd(fscal,dx00);
1925 ty = _mm256_mul_pd(fscal,dy00);
1926 tz = _mm256_mul_pd(fscal,dz00);
1928 /* Update vectorial force */
1929 fix0 = _mm256_add_pd(fix0,tx);
1930 fiy0 = _mm256_add_pd(fiy0,ty);
1931 fiz0 = _mm256_add_pd(fiz0,tz);
1933 fjx0 = _mm256_add_pd(fjx0,tx);
1934 fjy0 = _mm256_add_pd(fjy0,ty);
1935 fjz0 = _mm256_add_pd(fjz0,tz);
1939 /**************************
1940 * CALCULATE INTERACTIONS *
1941 **************************/
1943 if (gmx_mm256_any_lt(rsq11,rcutoff2))
1946 r11 = _mm256_mul_pd(rsq11,rinv11);
1948 /* EWALD ELECTROSTATICS */
1950 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1951 ewrt = _mm256_mul_pd(r11,ewtabscale);
1952 ewitab = _mm256_cvttpd_epi32(ewrt);
1953 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1954 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1955 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1957 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1958 felec = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
1960 cutoff_mask = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
1964 fscal = _mm256_and_pd(fscal,cutoff_mask);
1966 /* Calculate temporary vectorial force */
1967 tx = _mm256_mul_pd(fscal,dx11);
1968 ty = _mm256_mul_pd(fscal,dy11);
1969 tz = _mm256_mul_pd(fscal,dz11);
1971 /* Update vectorial force */
1972 fix1 = _mm256_add_pd(fix1,tx);
1973 fiy1 = _mm256_add_pd(fiy1,ty);
1974 fiz1 = _mm256_add_pd(fiz1,tz);
1976 fjx1 = _mm256_add_pd(fjx1,tx);
1977 fjy1 = _mm256_add_pd(fjy1,ty);
1978 fjz1 = _mm256_add_pd(fjz1,tz);
1982 /**************************
1983 * CALCULATE INTERACTIONS *
1984 **************************/
1986 if (gmx_mm256_any_lt(rsq12,rcutoff2))
1989 r12 = _mm256_mul_pd(rsq12,rinv12);
1991 /* EWALD ELECTROSTATICS */
1993 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1994 ewrt = _mm256_mul_pd(r12,ewtabscale);
1995 ewitab = _mm256_cvttpd_epi32(ewrt);
1996 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1997 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1998 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2000 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2001 felec = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
2003 cutoff_mask = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2007 fscal = _mm256_and_pd(fscal,cutoff_mask);
2009 /* Calculate temporary vectorial force */
2010 tx = _mm256_mul_pd(fscal,dx12);
2011 ty = _mm256_mul_pd(fscal,dy12);
2012 tz = _mm256_mul_pd(fscal,dz12);
2014 /* Update vectorial force */
2015 fix1 = _mm256_add_pd(fix1,tx);
2016 fiy1 = _mm256_add_pd(fiy1,ty);
2017 fiz1 = _mm256_add_pd(fiz1,tz);
2019 fjx2 = _mm256_add_pd(fjx2,tx);
2020 fjy2 = _mm256_add_pd(fjy2,ty);
2021 fjz2 = _mm256_add_pd(fjz2,tz);
2025 /**************************
2026 * CALCULATE INTERACTIONS *
2027 **************************/
2029 if (gmx_mm256_any_lt(rsq13,rcutoff2))
2032 r13 = _mm256_mul_pd(rsq13,rinv13);
2034 /* EWALD ELECTROSTATICS */
2036 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2037 ewrt = _mm256_mul_pd(r13,ewtabscale);
2038 ewitab = _mm256_cvttpd_epi32(ewrt);
2039 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2040 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2041 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2043 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2044 felec = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
2046 cutoff_mask = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
2050 fscal = _mm256_and_pd(fscal,cutoff_mask);
2052 /* Calculate temporary vectorial force */
2053 tx = _mm256_mul_pd(fscal,dx13);
2054 ty = _mm256_mul_pd(fscal,dy13);
2055 tz = _mm256_mul_pd(fscal,dz13);
2057 /* Update vectorial force */
2058 fix1 = _mm256_add_pd(fix1,tx);
2059 fiy1 = _mm256_add_pd(fiy1,ty);
2060 fiz1 = _mm256_add_pd(fiz1,tz);
2062 fjx3 = _mm256_add_pd(fjx3,tx);
2063 fjy3 = _mm256_add_pd(fjy3,ty);
2064 fjz3 = _mm256_add_pd(fjz3,tz);
2068 /**************************
2069 * CALCULATE INTERACTIONS *
2070 **************************/
2072 if (gmx_mm256_any_lt(rsq21,rcutoff2))
2075 r21 = _mm256_mul_pd(rsq21,rinv21);
2077 /* EWALD ELECTROSTATICS */
2079 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2080 ewrt = _mm256_mul_pd(r21,ewtabscale);
2081 ewitab = _mm256_cvttpd_epi32(ewrt);
2082 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2083 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2084 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2086 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2087 felec = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
2089 cutoff_mask = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
2093 fscal = _mm256_and_pd(fscal,cutoff_mask);
2095 /* Calculate temporary vectorial force */
2096 tx = _mm256_mul_pd(fscal,dx21);
2097 ty = _mm256_mul_pd(fscal,dy21);
2098 tz = _mm256_mul_pd(fscal,dz21);
2100 /* Update vectorial force */
2101 fix2 = _mm256_add_pd(fix2,tx);
2102 fiy2 = _mm256_add_pd(fiy2,ty);
2103 fiz2 = _mm256_add_pd(fiz2,tz);
2105 fjx1 = _mm256_add_pd(fjx1,tx);
2106 fjy1 = _mm256_add_pd(fjy1,ty);
2107 fjz1 = _mm256_add_pd(fjz1,tz);
2111 /**************************
2112 * CALCULATE INTERACTIONS *
2113 **************************/
2115 if (gmx_mm256_any_lt(rsq22,rcutoff2))
2118 r22 = _mm256_mul_pd(rsq22,rinv22);
2120 /* EWALD ELECTROSTATICS */
2122 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2123 ewrt = _mm256_mul_pd(r22,ewtabscale);
2124 ewitab = _mm256_cvttpd_epi32(ewrt);
2125 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2126 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2127 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2129 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2130 felec = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
2132 cutoff_mask = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
2136 fscal = _mm256_and_pd(fscal,cutoff_mask);
2138 /* Calculate temporary vectorial force */
2139 tx = _mm256_mul_pd(fscal,dx22);
2140 ty = _mm256_mul_pd(fscal,dy22);
2141 tz = _mm256_mul_pd(fscal,dz22);
2143 /* Update vectorial force */
2144 fix2 = _mm256_add_pd(fix2,tx);
2145 fiy2 = _mm256_add_pd(fiy2,ty);
2146 fiz2 = _mm256_add_pd(fiz2,tz);
2148 fjx2 = _mm256_add_pd(fjx2,tx);
2149 fjy2 = _mm256_add_pd(fjy2,ty);
2150 fjz2 = _mm256_add_pd(fjz2,tz);
2154 /**************************
2155 * CALCULATE INTERACTIONS *
2156 **************************/
2158 if (gmx_mm256_any_lt(rsq23,rcutoff2))
2161 r23 = _mm256_mul_pd(rsq23,rinv23);
2163 /* EWALD ELECTROSTATICS */
2165 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2166 ewrt = _mm256_mul_pd(r23,ewtabscale);
2167 ewitab = _mm256_cvttpd_epi32(ewrt);
2168 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2169 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2170 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2172 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2173 felec = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
2175 cutoff_mask = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
2179 fscal = _mm256_and_pd(fscal,cutoff_mask);
2181 /* Calculate temporary vectorial force */
2182 tx = _mm256_mul_pd(fscal,dx23);
2183 ty = _mm256_mul_pd(fscal,dy23);
2184 tz = _mm256_mul_pd(fscal,dz23);
2186 /* Update vectorial force */
2187 fix2 = _mm256_add_pd(fix2,tx);
2188 fiy2 = _mm256_add_pd(fiy2,ty);
2189 fiz2 = _mm256_add_pd(fiz2,tz);
2191 fjx3 = _mm256_add_pd(fjx3,tx);
2192 fjy3 = _mm256_add_pd(fjy3,ty);
2193 fjz3 = _mm256_add_pd(fjz3,tz);
2197 /**************************
2198 * CALCULATE INTERACTIONS *
2199 **************************/
2201 if (gmx_mm256_any_lt(rsq31,rcutoff2))
2204 r31 = _mm256_mul_pd(rsq31,rinv31);
2206 /* EWALD ELECTROSTATICS */
2208 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2209 ewrt = _mm256_mul_pd(r31,ewtabscale);
2210 ewitab = _mm256_cvttpd_epi32(ewrt);
2211 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2212 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2213 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2215 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2216 felec = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
2218 cutoff_mask = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
2222 fscal = _mm256_and_pd(fscal,cutoff_mask);
2224 /* Calculate temporary vectorial force */
2225 tx = _mm256_mul_pd(fscal,dx31);
2226 ty = _mm256_mul_pd(fscal,dy31);
2227 tz = _mm256_mul_pd(fscal,dz31);
2229 /* Update vectorial force */
2230 fix3 = _mm256_add_pd(fix3,tx);
2231 fiy3 = _mm256_add_pd(fiy3,ty);
2232 fiz3 = _mm256_add_pd(fiz3,tz);
2234 fjx1 = _mm256_add_pd(fjx1,tx);
2235 fjy1 = _mm256_add_pd(fjy1,ty);
2236 fjz1 = _mm256_add_pd(fjz1,tz);
2240 /**************************
2241 * CALCULATE INTERACTIONS *
2242 **************************/
2244 if (gmx_mm256_any_lt(rsq32,rcutoff2))
2247 r32 = _mm256_mul_pd(rsq32,rinv32);
2249 /* EWALD ELECTROSTATICS */
2251 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2252 ewrt = _mm256_mul_pd(r32,ewtabscale);
2253 ewitab = _mm256_cvttpd_epi32(ewrt);
2254 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2255 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2256 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2258 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2259 felec = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
2261 cutoff_mask = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
2265 fscal = _mm256_and_pd(fscal,cutoff_mask);
2267 /* Calculate temporary vectorial force */
2268 tx = _mm256_mul_pd(fscal,dx32);
2269 ty = _mm256_mul_pd(fscal,dy32);
2270 tz = _mm256_mul_pd(fscal,dz32);
2272 /* Update vectorial force */
2273 fix3 = _mm256_add_pd(fix3,tx);
2274 fiy3 = _mm256_add_pd(fiy3,ty);
2275 fiz3 = _mm256_add_pd(fiz3,tz);
2277 fjx2 = _mm256_add_pd(fjx2,tx);
2278 fjy2 = _mm256_add_pd(fjy2,ty);
2279 fjz2 = _mm256_add_pd(fjz2,tz);
2283 /**************************
2284 * CALCULATE INTERACTIONS *
2285 **************************/
2287 if (gmx_mm256_any_lt(rsq33,rcutoff2))
2290 r33 = _mm256_mul_pd(rsq33,rinv33);
2292 /* EWALD ELECTROSTATICS */
2294 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2295 ewrt = _mm256_mul_pd(r33,ewtabscale);
2296 ewitab = _mm256_cvttpd_epi32(ewrt);
2297 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2298 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2299 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2301 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2302 felec = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
2304 cutoff_mask = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
2308 fscal = _mm256_and_pd(fscal,cutoff_mask);
2310 /* Calculate temporary vectorial force */
2311 tx = _mm256_mul_pd(fscal,dx33);
2312 ty = _mm256_mul_pd(fscal,dy33);
2313 tz = _mm256_mul_pd(fscal,dz33);
2315 /* Update vectorial force */
2316 fix3 = _mm256_add_pd(fix3,tx);
2317 fiy3 = _mm256_add_pd(fiy3,ty);
2318 fiz3 = _mm256_add_pd(fiz3,tz);
2320 fjx3 = _mm256_add_pd(fjx3,tx);
2321 fjy3 = _mm256_add_pd(fjy3,ty);
2322 fjz3 = _mm256_add_pd(fjz3,tz);
2326 fjptrA = f+j_coord_offsetA;
2327 fjptrB = f+j_coord_offsetB;
2328 fjptrC = f+j_coord_offsetC;
2329 fjptrD = f+j_coord_offsetD;
2331 gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
2332 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2333 fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2335 /* Inner loop uses 403 flops */
2338 if(jidx<j_index_end)
2341 /* Get j neighbor index, and coordinate index */
2342 jnrlistA = jjnr[jidx];
2343 jnrlistB = jjnr[jidx+1];
2344 jnrlistC = jjnr[jidx+2];
2345 jnrlistD = jjnr[jidx+3];
2346 /* Sign of each element will be negative for non-real atoms.
2347 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2348 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
2350 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
2352 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
2353 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
2354 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
2356 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
2357 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
2358 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
2359 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
2360 j_coord_offsetA = DIM*jnrA;
2361 j_coord_offsetB = DIM*jnrB;
2362 j_coord_offsetC = DIM*jnrC;
2363 j_coord_offsetD = DIM*jnrD;
2365 /* load j atom coordinates */
2366 gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
2367 x+j_coord_offsetC,x+j_coord_offsetD,
2368 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
2369 &jy2,&jz2,&jx3,&jy3,&jz3);
2371 /* Calculate displacement vector */
2372 dx00 = _mm256_sub_pd(ix0,jx0);
2373 dy00 = _mm256_sub_pd(iy0,jy0);
2374 dz00 = _mm256_sub_pd(iz0,jz0);
2375 dx11 = _mm256_sub_pd(ix1,jx1);
2376 dy11 = _mm256_sub_pd(iy1,jy1);
2377 dz11 = _mm256_sub_pd(iz1,jz1);
2378 dx12 = _mm256_sub_pd(ix1,jx2);
2379 dy12 = _mm256_sub_pd(iy1,jy2);
2380 dz12 = _mm256_sub_pd(iz1,jz2);
2381 dx13 = _mm256_sub_pd(ix1,jx3);
2382 dy13 = _mm256_sub_pd(iy1,jy3);
2383 dz13 = _mm256_sub_pd(iz1,jz3);
2384 dx21 = _mm256_sub_pd(ix2,jx1);
2385 dy21 = _mm256_sub_pd(iy2,jy1);
2386 dz21 = _mm256_sub_pd(iz2,jz1);
2387 dx22 = _mm256_sub_pd(ix2,jx2);
2388 dy22 = _mm256_sub_pd(iy2,jy2);
2389 dz22 = _mm256_sub_pd(iz2,jz2);
2390 dx23 = _mm256_sub_pd(ix2,jx3);
2391 dy23 = _mm256_sub_pd(iy2,jy3);
2392 dz23 = _mm256_sub_pd(iz2,jz3);
2393 dx31 = _mm256_sub_pd(ix3,jx1);
2394 dy31 = _mm256_sub_pd(iy3,jy1);
2395 dz31 = _mm256_sub_pd(iz3,jz1);
2396 dx32 = _mm256_sub_pd(ix3,jx2);
2397 dy32 = _mm256_sub_pd(iy3,jy2);
2398 dz32 = _mm256_sub_pd(iz3,jz2);
2399 dx33 = _mm256_sub_pd(ix3,jx3);
2400 dy33 = _mm256_sub_pd(iy3,jy3);
2401 dz33 = _mm256_sub_pd(iz3,jz3);
2403 /* Calculate squared distance and things based on it */
2404 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
2405 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
2406 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
2407 rsq13 = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
2408 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
2409 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
2410 rsq23 = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
2411 rsq31 = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
2412 rsq32 = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
2413 rsq33 = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
2415 rinv00 = avx256_invsqrt_d(rsq00);
2416 rinv11 = avx256_invsqrt_d(rsq11);
2417 rinv12 = avx256_invsqrt_d(rsq12);
2418 rinv13 = avx256_invsqrt_d(rsq13);
2419 rinv21 = avx256_invsqrt_d(rsq21);
2420 rinv22 = avx256_invsqrt_d(rsq22);
2421 rinv23 = avx256_invsqrt_d(rsq23);
2422 rinv31 = avx256_invsqrt_d(rsq31);
2423 rinv32 = avx256_invsqrt_d(rsq32);
2424 rinv33 = avx256_invsqrt_d(rsq33);
2426 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
2427 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
2428 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
2429 rinvsq13 = _mm256_mul_pd(rinv13,rinv13);
2430 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
2431 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
2432 rinvsq23 = _mm256_mul_pd(rinv23,rinv23);
2433 rinvsq31 = _mm256_mul_pd(rinv31,rinv31);
2434 rinvsq32 = _mm256_mul_pd(rinv32,rinv32);
2435 rinvsq33 = _mm256_mul_pd(rinv33,rinv33);
2437 fjx0 = _mm256_setzero_pd();
2438 fjy0 = _mm256_setzero_pd();
2439 fjz0 = _mm256_setzero_pd();
2440 fjx1 = _mm256_setzero_pd();
2441 fjy1 = _mm256_setzero_pd();
2442 fjz1 = _mm256_setzero_pd();
2443 fjx2 = _mm256_setzero_pd();
2444 fjy2 = _mm256_setzero_pd();
2445 fjz2 = _mm256_setzero_pd();
2446 fjx3 = _mm256_setzero_pd();
2447 fjy3 = _mm256_setzero_pd();
2448 fjz3 = _mm256_setzero_pd();
2450 /**************************
2451 * CALCULATE INTERACTIONS *
2452 **************************/
2454 if (gmx_mm256_any_lt(rsq00,rcutoff2))
2457 r00 = _mm256_mul_pd(rsq00,rinv00);
2458 r00 = _mm256_andnot_pd(dummy_mask,r00);
2460 /* Analytical LJ-PME */
2461 rinvsix = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
2462 ewcljrsq = _mm256_mul_pd(ewclj2,rsq00);
2463 ewclj6 = _mm256_mul_pd(ewclj2,_mm256_mul_pd(ewclj2,ewclj2));
2464 exponent = avx256_exp_d(ewcljrsq);
2465 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
2466 poly = _mm256_mul_pd(exponent,_mm256_add_pd(_mm256_sub_pd(one,ewcljrsq),_mm256_mul_pd(_mm256_mul_pd(ewcljrsq,ewcljrsq),one_half)));
2467 /* f6A = 6 * C6grid * (1 - poly) */
2468 f6A = _mm256_mul_pd(c6grid_00,_mm256_sub_pd(one,poly));
2469 /* f6B = C6grid * exponent * beta^6 */
2470 f6B = _mm256_mul_pd(_mm256_mul_pd(c6grid_00,one_sixth),_mm256_mul_pd(exponent,ewclj6));
2471 /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
2472 fvdw = _mm256_mul_pd(_mm256_add_pd(_mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),_mm256_sub_pd(c6_00,f6A)),rinvsix),f6B),rinvsq00);
2474 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
2478 fscal = _mm256_and_pd(fscal,cutoff_mask);
2480 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2482 /* Calculate temporary vectorial force */
2483 tx = _mm256_mul_pd(fscal,dx00);
2484 ty = _mm256_mul_pd(fscal,dy00);
2485 tz = _mm256_mul_pd(fscal,dz00);
2487 /* Update vectorial force */
2488 fix0 = _mm256_add_pd(fix0,tx);
2489 fiy0 = _mm256_add_pd(fiy0,ty);
2490 fiz0 = _mm256_add_pd(fiz0,tz);
2492 fjx0 = _mm256_add_pd(fjx0,tx);
2493 fjy0 = _mm256_add_pd(fjy0,ty);
2494 fjz0 = _mm256_add_pd(fjz0,tz);
2498 /**************************
2499 * CALCULATE INTERACTIONS *
2500 **************************/
2502 if (gmx_mm256_any_lt(rsq11,rcutoff2))
2505 r11 = _mm256_mul_pd(rsq11,rinv11);
2506 r11 = _mm256_andnot_pd(dummy_mask,r11);
2508 /* EWALD ELECTROSTATICS */
2510 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2511 ewrt = _mm256_mul_pd(r11,ewtabscale);
2512 ewitab = _mm256_cvttpd_epi32(ewrt);
2513 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2514 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2515 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2517 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2518 felec = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
2520 cutoff_mask = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
2524 fscal = _mm256_and_pd(fscal,cutoff_mask);
2526 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2528 /* Calculate temporary vectorial force */
2529 tx = _mm256_mul_pd(fscal,dx11);
2530 ty = _mm256_mul_pd(fscal,dy11);
2531 tz = _mm256_mul_pd(fscal,dz11);
2533 /* Update vectorial force */
2534 fix1 = _mm256_add_pd(fix1,tx);
2535 fiy1 = _mm256_add_pd(fiy1,ty);
2536 fiz1 = _mm256_add_pd(fiz1,tz);
2538 fjx1 = _mm256_add_pd(fjx1,tx);
2539 fjy1 = _mm256_add_pd(fjy1,ty);
2540 fjz1 = _mm256_add_pd(fjz1,tz);
2544 /**************************
2545 * CALCULATE INTERACTIONS *
2546 **************************/
2548 if (gmx_mm256_any_lt(rsq12,rcutoff2))
2551 r12 = _mm256_mul_pd(rsq12,rinv12);
2552 r12 = _mm256_andnot_pd(dummy_mask,r12);
2554 /* EWALD ELECTROSTATICS */
2556 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2557 ewrt = _mm256_mul_pd(r12,ewtabscale);
2558 ewitab = _mm256_cvttpd_epi32(ewrt);
2559 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2560 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2561 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2563 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2564 felec = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
2566 cutoff_mask = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2570 fscal = _mm256_and_pd(fscal,cutoff_mask);
2572 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2574 /* Calculate temporary vectorial force */
2575 tx = _mm256_mul_pd(fscal,dx12);
2576 ty = _mm256_mul_pd(fscal,dy12);
2577 tz = _mm256_mul_pd(fscal,dz12);
2579 /* Update vectorial force */
2580 fix1 = _mm256_add_pd(fix1,tx);
2581 fiy1 = _mm256_add_pd(fiy1,ty);
2582 fiz1 = _mm256_add_pd(fiz1,tz);
2584 fjx2 = _mm256_add_pd(fjx2,tx);
2585 fjy2 = _mm256_add_pd(fjy2,ty);
2586 fjz2 = _mm256_add_pd(fjz2,tz);
2590 /**************************
2591 * CALCULATE INTERACTIONS *
2592 **************************/
2594 if (gmx_mm256_any_lt(rsq13,rcutoff2))
2597 r13 = _mm256_mul_pd(rsq13,rinv13);
2598 r13 = _mm256_andnot_pd(dummy_mask,r13);
2600 /* EWALD ELECTROSTATICS */
2602 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2603 ewrt = _mm256_mul_pd(r13,ewtabscale);
2604 ewitab = _mm256_cvttpd_epi32(ewrt);
2605 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2606 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2607 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2609 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2610 felec = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
2612 cutoff_mask = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
2616 fscal = _mm256_and_pd(fscal,cutoff_mask);
2618 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2620 /* Calculate temporary vectorial force */
2621 tx = _mm256_mul_pd(fscal,dx13);
2622 ty = _mm256_mul_pd(fscal,dy13);
2623 tz = _mm256_mul_pd(fscal,dz13);
2625 /* Update vectorial force */
2626 fix1 = _mm256_add_pd(fix1,tx);
2627 fiy1 = _mm256_add_pd(fiy1,ty);
2628 fiz1 = _mm256_add_pd(fiz1,tz);
2630 fjx3 = _mm256_add_pd(fjx3,tx);
2631 fjy3 = _mm256_add_pd(fjy3,ty);
2632 fjz3 = _mm256_add_pd(fjz3,tz);
2636 /**************************
2637 * CALCULATE INTERACTIONS *
2638 **************************/
2640 if (gmx_mm256_any_lt(rsq21,rcutoff2))
2643 r21 = _mm256_mul_pd(rsq21,rinv21);
2644 r21 = _mm256_andnot_pd(dummy_mask,r21);
2646 /* EWALD ELECTROSTATICS */
2648 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2649 ewrt = _mm256_mul_pd(r21,ewtabscale);
2650 ewitab = _mm256_cvttpd_epi32(ewrt);
2651 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2652 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2653 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2655 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2656 felec = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
2658 cutoff_mask = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
2662 fscal = _mm256_and_pd(fscal,cutoff_mask);
2664 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2666 /* Calculate temporary vectorial force */
2667 tx = _mm256_mul_pd(fscal,dx21);
2668 ty = _mm256_mul_pd(fscal,dy21);
2669 tz = _mm256_mul_pd(fscal,dz21);
2671 /* Update vectorial force */
2672 fix2 = _mm256_add_pd(fix2,tx);
2673 fiy2 = _mm256_add_pd(fiy2,ty);
2674 fiz2 = _mm256_add_pd(fiz2,tz);
2676 fjx1 = _mm256_add_pd(fjx1,tx);
2677 fjy1 = _mm256_add_pd(fjy1,ty);
2678 fjz1 = _mm256_add_pd(fjz1,tz);
2682 /**************************
2683 * CALCULATE INTERACTIONS *
2684 **************************/
2686 if (gmx_mm256_any_lt(rsq22,rcutoff2))
2689 r22 = _mm256_mul_pd(rsq22,rinv22);
2690 r22 = _mm256_andnot_pd(dummy_mask,r22);
2692 /* EWALD ELECTROSTATICS */
2694 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2695 ewrt = _mm256_mul_pd(r22,ewtabscale);
2696 ewitab = _mm256_cvttpd_epi32(ewrt);
2697 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2698 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2699 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2701 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2702 felec = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
2704 cutoff_mask = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
2708 fscal = _mm256_and_pd(fscal,cutoff_mask);
2710 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2712 /* Calculate temporary vectorial force */
2713 tx = _mm256_mul_pd(fscal,dx22);
2714 ty = _mm256_mul_pd(fscal,dy22);
2715 tz = _mm256_mul_pd(fscal,dz22);
2717 /* Update vectorial force */
2718 fix2 = _mm256_add_pd(fix2,tx);
2719 fiy2 = _mm256_add_pd(fiy2,ty);
2720 fiz2 = _mm256_add_pd(fiz2,tz);
2722 fjx2 = _mm256_add_pd(fjx2,tx);
2723 fjy2 = _mm256_add_pd(fjy2,ty);
2724 fjz2 = _mm256_add_pd(fjz2,tz);
2728 /**************************
2729 * CALCULATE INTERACTIONS *
2730 **************************/
2732 if (gmx_mm256_any_lt(rsq23,rcutoff2))
2735 r23 = _mm256_mul_pd(rsq23,rinv23);
2736 r23 = _mm256_andnot_pd(dummy_mask,r23);
2738 /* EWALD ELECTROSTATICS */
2740 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2741 ewrt = _mm256_mul_pd(r23,ewtabscale);
2742 ewitab = _mm256_cvttpd_epi32(ewrt);
2743 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2744 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2745 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2747 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2748 felec = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
2750 cutoff_mask = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
2754 fscal = _mm256_and_pd(fscal,cutoff_mask);
2756 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2758 /* Calculate temporary vectorial force */
2759 tx = _mm256_mul_pd(fscal,dx23);
2760 ty = _mm256_mul_pd(fscal,dy23);
2761 tz = _mm256_mul_pd(fscal,dz23);
2763 /* Update vectorial force */
2764 fix2 = _mm256_add_pd(fix2,tx);
2765 fiy2 = _mm256_add_pd(fiy2,ty);
2766 fiz2 = _mm256_add_pd(fiz2,tz);
2768 fjx3 = _mm256_add_pd(fjx3,tx);
2769 fjy3 = _mm256_add_pd(fjy3,ty);
2770 fjz3 = _mm256_add_pd(fjz3,tz);
2774 /**************************
2775 * CALCULATE INTERACTIONS *
2776 **************************/
2778 if (gmx_mm256_any_lt(rsq31,rcutoff2))
2781 r31 = _mm256_mul_pd(rsq31,rinv31);
2782 r31 = _mm256_andnot_pd(dummy_mask,r31);
2784 /* EWALD ELECTROSTATICS */
2786 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2787 ewrt = _mm256_mul_pd(r31,ewtabscale);
2788 ewitab = _mm256_cvttpd_epi32(ewrt);
2789 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2790 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2791 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2793 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2794 felec = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
2796 cutoff_mask = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
2800 fscal = _mm256_and_pd(fscal,cutoff_mask);
2802 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2804 /* Calculate temporary vectorial force */
2805 tx = _mm256_mul_pd(fscal,dx31);
2806 ty = _mm256_mul_pd(fscal,dy31);
2807 tz = _mm256_mul_pd(fscal,dz31);
2809 /* Update vectorial force */
2810 fix3 = _mm256_add_pd(fix3,tx);
2811 fiy3 = _mm256_add_pd(fiy3,ty);
2812 fiz3 = _mm256_add_pd(fiz3,tz);
2814 fjx1 = _mm256_add_pd(fjx1,tx);
2815 fjy1 = _mm256_add_pd(fjy1,ty);
2816 fjz1 = _mm256_add_pd(fjz1,tz);
2820 /**************************
2821 * CALCULATE INTERACTIONS *
2822 **************************/
2824 if (gmx_mm256_any_lt(rsq32,rcutoff2))
2827 r32 = _mm256_mul_pd(rsq32,rinv32);
2828 r32 = _mm256_andnot_pd(dummy_mask,r32);
2830 /* EWALD ELECTROSTATICS */
2832 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2833 ewrt = _mm256_mul_pd(r32,ewtabscale);
2834 ewitab = _mm256_cvttpd_epi32(ewrt);
2835 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2836 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2837 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2839 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2840 felec = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
2842 cutoff_mask = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
2846 fscal = _mm256_and_pd(fscal,cutoff_mask);
2848 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2850 /* Calculate temporary vectorial force */
2851 tx = _mm256_mul_pd(fscal,dx32);
2852 ty = _mm256_mul_pd(fscal,dy32);
2853 tz = _mm256_mul_pd(fscal,dz32);
2855 /* Update vectorial force */
2856 fix3 = _mm256_add_pd(fix3,tx);
2857 fiy3 = _mm256_add_pd(fiy3,ty);
2858 fiz3 = _mm256_add_pd(fiz3,tz);
2860 fjx2 = _mm256_add_pd(fjx2,tx);
2861 fjy2 = _mm256_add_pd(fjy2,ty);
2862 fjz2 = _mm256_add_pd(fjz2,tz);
2866 /**************************
2867 * CALCULATE INTERACTIONS *
2868 **************************/
2870 if (gmx_mm256_any_lt(rsq33,rcutoff2))
2873 r33 = _mm256_mul_pd(rsq33,rinv33);
2874 r33 = _mm256_andnot_pd(dummy_mask,r33);
2876 /* EWALD ELECTROSTATICS */
2878 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2879 ewrt = _mm256_mul_pd(r33,ewtabscale);
2880 ewitab = _mm256_cvttpd_epi32(ewrt);
2881 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2882 gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2883 ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2885 felec = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2886 felec = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
2888 cutoff_mask = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
2892 fscal = _mm256_and_pd(fscal,cutoff_mask);
2894 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2896 /* Calculate temporary vectorial force */
2897 tx = _mm256_mul_pd(fscal,dx33);
2898 ty = _mm256_mul_pd(fscal,dy33);
2899 tz = _mm256_mul_pd(fscal,dz33);
2901 /* Update vectorial force */
2902 fix3 = _mm256_add_pd(fix3,tx);
2903 fiy3 = _mm256_add_pd(fiy3,ty);
2904 fiz3 = _mm256_add_pd(fiz3,tz);
2906 fjx3 = _mm256_add_pd(fjx3,tx);
2907 fjy3 = _mm256_add_pd(fjy3,ty);
2908 fjz3 = _mm256_add_pd(fjz3,tz);
2912 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2913 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2914 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2915 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2917 gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
2918 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2919 fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2921 /* Inner loop uses 413 flops */
2924 /* End of innermost loop */
2926 gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
2927 f+i_coord_offset,fshift+i_shift_offset);
2929 /* Increment number of inner iterations */
2930 inneriter += j_index_end - j_index_start;
2932 /* Outer loop uses 24 flops */
2935 /* Increment number of outer iterations */
2938 /* Update outer/inner flops */
2940 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*413);