2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_single kernel generator.
44 #include "../nb_kernel.h"
45 #include "gromacs/gmxlib/nrnb.h"
47 #include "kernelutil_x86_avx_256_single.h"
50 * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwLJEw_GeomW3W3_VF_avx_256_single
51 * Electrostatics interaction: Ewald
52 * VdW interaction: LJEwald
53 * Geometry: Water3-Water3
54 * Calculate force/pot: PotentialAndForce
57 nb_kernel_ElecEw_VdwLJEw_GeomW3W3_VF_avx_256_single
58 (t_nblist * gmx_restrict nlist,
59 rvec * gmx_restrict xx,
60 rvec * gmx_restrict ff,
61 struct t_forcerec * gmx_restrict fr,
62 t_mdatoms * gmx_restrict mdatoms,
63 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
64 t_nrnb * gmx_restrict nrnb)
66 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
67 * just 0 for non-waters.
68 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
69 * jnr indices corresponding to data put in the four positions in the SIMD register.
71 int i_shift_offset,i_coord_offset,outeriter,inneriter;
72 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
73 int jnrA,jnrB,jnrC,jnrD;
74 int jnrE,jnrF,jnrG,jnrH;
75 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
76 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
77 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
78 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
79 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
81 real *shiftvec,*fshift,*x,*f;
82 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
84 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
85 real * vdwioffsetptr0;
86 real * vdwgridioffsetptr0;
87 __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
88 real * vdwioffsetptr1;
89 real * vdwgridioffsetptr1;
90 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
91 real * vdwioffsetptr2;
92 real * vdwgridioffsetptr2;
93 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
94 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
95 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
96 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
97 __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
98 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
99 __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
100 __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
101 __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
102 __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
103 __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
104 __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
105 __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
106 __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
107 __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
108 __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
109 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
112 __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
115 __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
116 __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
127 __m256 ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
128 __m256 one_half = _mm256_set1_ps(0.5);
129 __m256 minus_one = _mm256_set1_ps(-1.0);
131 __m128i ewitab_lo,ewitab_hi;
132 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
133 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
135 __m256 dummy_mask,cutoff_mask;
136 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
137 __m256 one = _mm256_set1_ps(1.0);
138 __m256 two = _mm256_set1_ps(2.0);
144 jindex = nlist->jindex;
146 shiftidx = nlist->shift;
148 shiftvec = fr->shift_vec[0];
149 fshift = fr->fshift[0];
150 facel = _mm256_set1_ps(fr->ic->epsfac);
151 charge = mdatoms->chargeA;
152 nvdwtype = fr->ntype;
154 vdwtype = mdatoms->typeA;
155 vdwgridparam = fr->ljpme_c6grid;
156 sh_lj_ewald = _mm256_set1_ps(fr->ic->sh_lj_ewald);
157 ewclj = _mm256_set1_ps(fr->ic->ewaldcoeff_lj);
158 ewclj2 = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
160 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
161 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
162 beta2 = _mm256_mul_ps(beta,beta);
163 beta3 = _mm256_mul_ps(beta,beta2);
165 ewtab = fr->ic->tabq_coul_FDV0;
166 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
167 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
169 /* Setup water-specific parameters */
170 inr = nlist->iinr[0];
171 iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
172 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
173 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
174 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
175 vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
177 jq0 = _mm256_set1_ps(charge[inr+0]);
178 jq1 = _mm256_set1_ps(charge[inr+1]);
179 jq2 = _mm256_set1_ps(charge[inr+2]);
180 vdwjidx0A = 2*vdwtype[inr+0];
181 qq00 = _mm256_mul_ps(iq0,jq0);
182 c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
183 c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
184 c6grid_00 = _mm256_set1_ps(vdwgridioffsetptr0[vdwjidx0A]);
185 qq01 = _mm256_mul_ps(iq0,jq1);
186 qq02 = _mm256_mul_ps(iq0,jq2);
187 qq10 = _mm256_mul_ps(iq1,jq0);
188 qq11 = _mm256_mul_ps(iq1,jq1);
189 qq12 = _mm256_mul_ps(iq1,jq2);
190 qq20 = _mm256_mul_ps(iq2,jq0);
191 qq21 = _mm256_mul_ps(iq2,jq1);
192 qq22 = _mm256_mul_ps(iq2,jq2);
194 /* Avoid stupid compiler warnings */
195 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
208 for(iidx=0;iidx<4*DIM;iidx++)
213 /* Start outer loop over neighborlists */
214 for(iidx=0; iidx<nri; iidx++)
216 /* Load shift vector for this list */
217 i_shift_offset = DIM*shiftidx[iidx];
219 /* Load limits for loop over neighbors */
220 j_index_start = jindex[iidx];
221 j_index_end = jindex[iidx+1];
223 /* Get outer coordinate index */
225 i_coord_offset = DIM*inr;
227 /* Load i particle coords and add shift vector */
228 gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
229 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
231 fix0 = _mm256_setzero_ps();
232 fiy0 = _mm256_setzero_ps();
233 fiz0 = _mm256_setzero_ps();
234 fix1 = _mm256_setzero_ps();
235 fiy1 = _mm256_setzero_ps();
236 fiz1 = _mm256_setzero_ps();
237 fix2 = _mm256_setzero_ps();
238 fiy2 = _mm256_setzero_ps();
239 fiz2 = _mm256_setzero_ps();
241 /* Reset potential sums */
242 velecsum = _mm256_setzero_ps();
243 vvdwsum = _mm256_setzero_ps();
245 /* Start inner kernel loop */
246 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
249 /* Get j neighbor index, and coordinate index */
258 j_coord_offsetA = DIM*jnrA;
259 j_coord_offsetB = DIM*jnrB;
260 j_coord_offsetC = DIM*jnrC;
261 j_coord_offsetD = DIM*jnrD;
262 j_coord_offsetE = DIM*jnrE;
263 j_coord_offsetF = DIM*jnrF;
264 j_coord_offsetG = DIM*jnrG;
265 j_coord_offsetH = DIM*jnrH;
267 /* load j atom coordinates */
268 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
269 x+j_coord_offsetC,x+j_coord_offsetD,
270 x+j_coord_offsetE,x+j_coord_offsetF,
271 x+j_coord_offsetG,x+j_coord_offsetH,
272 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
274 /* Calculate displacement vector */
275 dx00 = _mm256_sub_ps(ix0,jx0);
276 dy00 = _mm256_sub_ps(iy0,jy0);
277 dz00 = _mm256_sub_ps(iz0,jz0);
278 dx01 = _mm256_sub_ps(ix0,jx1);
279 dy01 = _mm256_sub_ps(iy0,jy1);
280 dz01 = _mm256_sub_ps(iz0,jz1);
281 dx02 = _mm256_sub_ps(ix0,jx2);
282 dy02 = _mm256_sub_ps(iy0,jy2);
283 dz02 = _mm256_sub_ps(iz0,jz2);
284 dx10 = _mm256_sub_ps(ix1,jx0);
285 dy10 = _mm256_sub_ps(iy1,jy0);
286 dz10 = _mm256_sub_ps(iz1,jz0);
287 dx11 = _mm256_sub_ps(ix1,jx1);
288 dy11 = _mm256_sub_ps(iy1,jy1);
289 dz11 = _mm256_sub_ps(iz1,jz1);
290 dx12 = _mm256_sub_ps(ix1,jx2);
291 dy12 = _mm256_sub_ps(iy1,jy2);
292 dz12 = _mm256_sub_ps(iz1,jz2);
293 dx20 = _mm256_sub_ps(ix2,jx0);
294 dy20 = _mm256_sub_ps(iy2,jy0);
295 dz20 = _mm256_sub_ps(iz2,jz0);
296 dx21 = _mm256_sub_ps(ix2,jx1);
297 dy21 = _mm256_sub_ps(iy2,jy1);
298 dz21 = _mm256_sub_ps(iz2,jz1);
299 dx22 = _mm256_sub_ps(ix2,jx2);
300 dy22 = _mm256_sub_ps(iy2,jy2);
301 dz22 = _mm256_sub_ps(iz2,jz2);
303 /* Calculate squared distance and things based on it */
304 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
305 rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
306 rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
307 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
308 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
309 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
310 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
311 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
312 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
314 rinv00 = avx256_invsqrt_f(rsq00);
315 rinv01 = avx256_invsqrt_f(rsq01);
316 rinv02 = avx256_invsqrt_f(rsq02);
317 rinv10 = avx256_invsqrt_f(rsq10);
318 rinv11 = avx256_invsqrt_f(rsq11);
319 rinv12 = avx256_invsqrt_f(rsq12);
320 rinv20 = avx256_invsqrt_f(rsq20);
321 rinv21 = avx256_invsqrt_f(rsq21);
322 rinv22 = avx256_invsqrt_f(rsq22);
324 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
325 rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
326 rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
327 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
328 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
329 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
330 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
331 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
332 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
334 fjx0 = _mm256_setzero_ps();
335 fjy0 = _mm256_setzero_ps();
336 fjz0 = _mm256_setzero_ps();
337 fjx1 = _mm256_setzero_ps();
338 fjy1 = _mm256_setzero_ps();
339 fjz1 = _mm256_setzero_ps();
340 fjx2 = _mm256_setzero_ps();
341 fjy2 = _mm256_setzero_ps();
342 fjz2 = _mm256_setzero_ps();
344 /**************************
345 * CALCULATE INTERACTIONS *
346 **************************/
348 r00 = _mm256_mul_ps(rsq00,rinv00);
350 /* EWALD ELECTROSTATICS */
352 /* Analytical PME correction */
353 zeta2 = _mm256_mul_ps(beta2,rsq00);
354 rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
355 pmecorrF = avx256_pmecorrF_f(zeta2);
356 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
357 felec = _mm256_mul_ps(qq00,felec);
358 pmecorrV = avx256_pmecorrV_f(zeta2);
359 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
360 velec = _mm256_sub_ps(rinv00,pmecorrV);
361 velec = _mm256_mul_ps(qq00,velec);
363 /* Analytical LJ-PME */
364 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
365 ewcljrsq = _mm256_mul_ps(ewclj2,rsq00);
366 ewclj6 = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
367 exponent = avx256_exp_f(ewcljrsq);
368 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
369 poly = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
370 /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
371 vvdw6 = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
372 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
373 vvdw = _mm256_sub_ps(_mm256_mul_ps(vvdw12,one_twelfth),_mm256_mul_ps(vvdw6,one_sixth));
374 /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
375 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
377 /* Update potential sum for this i atom from the interaction with this j atom. */
378 velecsum = _mm256_add_ps(velecsum,velec);
379 vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
381 fscal = _mm256_add_ps(felec,fvdw);
383 /* Calculate temporary vectorial force */
384 tx = _mm256_mul_ps(fscal,dx00);
385 ty = _mm256_mul_ps(fscal,dy00);
386 tz = _mm256_mul_ps(fscal,dz00);
388 /* Update vectorial force */
389 fix0 = _mm256_add_ps(fix0,tx);
390 fiy0 = _mm256_add_ps(fiy0,ty);
391 fiz0 = _mm256_add_ps(fiz0,tz);
393 fjx0 = _mm256_add_ps(fjx0,tx);
394 fjy0 = _mm256_add_ps(fjy0,ty);
395 fjz0 = _mm256_add_ps(fjz0,tz);
397 /**************************
398 * CALCULATE INTERACTIONS *
399 **************************/
401 r01 = _mm256_mul_ps(rsq01,rinv01);
403 /* EWALD ELECTROSTATICS */
405 /* Analytical PME correction */
406 zeta2 = _mm256_mul_ps(beta2,rsq01);
407 rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
408 pmecorrF = avx256_pmecorrF_f(zeta2);
409 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
410 felec = _mm256_mul_ps(qq01,felec);
411 pmecorrV = avx256_pmecorrV_f(zeta2);
412 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
413 velec = _mm256_sub_ps(rinv01,pmecorrV);
414 velec = _mm256_mul_ps(qq01,velec);
416 /* Update potential sum for this i atom from the interaction with this j atom. */
417 velecsum = _mm256_add_ps(velecsum,velec);
421 /* Calculate temporary vectorial force */
422 tx = _mm256_mul_ps(fscal,dx01);
423 ty = _mm256_mul_ps(fscal,dy01);
424 tz = _mm256_mul_ps(fscal,dz01);
426 /* Update vectorial force */
427 fix0 = _mm256_add_ps(fix0,tx);
428 fiy0 = _mm256_add_ps(fiy0,ty);
429 fiz0 = _mm256_add_ps(fiz0,tz);
431 fjx1 = _mm256_add_ps(fjx1,tx);
432 fjy1 = _mm256_add_ps(fjy1,ty);
433 fjz1 = _mm256_add_ps(fjz1,tz);
435 /**************************
436 * CALCULATE INTERACTIONS *
437 **************************/
439 r02 = _mm256_mul_ps(rsq02,rinv02);
441 /* EWALD ELECTROSTATICS */
443 /* Analytical PME correction */
444 zeta2 = _mm256_mul_ps(beta2,rsq02);
445 rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
446 pmecorrF = avx256_pmecorrF_f(zeta2);
447 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
448 felec = _mm256_mul_ps(qq02,felec);
449 pmecorrV = avx256_pmecorrV_f(zeta2);
450 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
451 velec = _mm256_sub_ps(rinv02,pmecorrV);
452 velec = _mm256_mul_ps(qq02,velec);
454 /* Update potential sum for this i atom from the interaction with this j atom. */
455 velecsum = _mm256_add_ps(velecsum,velec);
459 /* Calculate temporary vectorial force */
460 tx = _mm256_mul_ps(fscal,dx02);
461 ty = _mm256_mul_ps(fscal,dy02);
462 tz = _mm256_mul_ps(fscal,dz02);
464 /* Update vectorial force */
465 fix0 = _mm256_add_ps(fix0,tx);
466 fiy0 = _mm256_add_ps(fiy0,ty);
467 fiz0 = _mm256_add_ps(fiz0,tz);
469 fjx2 = _mm256_add_ps(fjx2,tx);
470 fjy2 = _mm256_add_ps(fjy2,ty);
471 fjz2 = _mm256_add_ps(fjz2,tz);
473 /**************************
474 * CALCULATE INTERACTIONS *
475 **************************/
477 r10 = _mm256_mul_ps(rsq10,rinv10);
479 /* EWALD ELECTROSTATICS */
481 /* Analytical PME correction */
482 zeta2 = _mm256_mul_ps(beta2,rsq10);
483 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
484 pmecorrF = avx256_pmecorrF_f(zeta2);
485 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
486 felec = _mm256_mul_ps(qq10,felec);
487 pmecorrV = avx256_pmecorrV_f(zeta2);
488 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
489 velec = _mm256_sub_ps(rinv10,pmecorrV);
490 velec = _mm256_mul_ps(qq10,velec);
492 /* Update potential sum for this i atom from the interaction with this j atom. */
493 velecsum = _mm256_add_ps(velecsum,velec);
497 /* Calculate temporary vectorial force */
498 tx = _mm256_mul_ps(fscal,dx10);
499 ty = _mm256_mul_ps(fscal,dy10);
500 tz = _mm256_mul_ps(fscal,dz10);
502 /* Update vectorial force */
503 fix1 = _mm256_add_ps(fix1,tx);
504 fiy1 = _mm256_add_ps(fiy1,ty);
505 fiz1 = _mm256_add_ps(fiz1,tz);
507 fjx0 = _mm256_add_ps(fjx0,tx);
508 fjy0 = _mm256_add_ps(fjy0,ty);
509 fjz0 = _mm256_add_ps(fjz0,tz);
511 /**************************
512 * CALCULATE INTERACTIONS *
513 **************************/
515 r11 = _mm256_mul_ps(rsq11,rinv11);
517 /* EWALD ELECTROSTATICS */
519 /* Analytical PME correction */
520 zeta2 = _mm256_mul_ps(beta2,rsq11);
521 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
522 pmecorrF = avx256_pmecorrF_f(zeta2);
523 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
524 felec = _mm256_mul_ps(qq11,felec);
525 pmecorrV = avx256_pmecorrV_f(zeta2);
526 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
527 velec = _mm256_sub_ps(rinv11,pmecorrV);
528 velec = _mm256_mul_ps(qq11,velec);
530 /* Update potential sum for this i atom from the interaction with this j atom. */
531 velecsum = _mm256_add_ps(velecsum,velec);
535 /* Calculate temporary vectorial force */
536 tx = _mm256_mul_ps(fscal,dx11);
537 ty = _mm256_mul_ps(fscal,dy11);
538 tz = _mm256_mul_ps(fscal,dz11);
540 /* Update vectorial force */
541 fix1 = _mm256_add_ps(fix1,tx);
542 fiy1 = _mm256_add_ps(fiy1,ty);
543 fiz1 = _mm256_add_ps(fiz1,tz);
545 fjx1 = _mm256_add_ps(fjx1,tx);
546 fjy1 = _mm256_add_ps(fjy1,ty);
547 fjz1 = _mm256_add_ps(fjz1,tz);
549 /**************************
550 * CALCULATE INTERACTIONS *
551 **************************/
553 r12 = _mm256_mul_ps(rsq12,rinv12);
555 /* EWALD ELECTROSTATICS */
557 /* Analytical PME correction */
558 zeta2 = _mm256_mul_ps(beta2,rsq12);
559 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
560 pmecorrF = avx256_pmecorrF_f(zeta2);
561 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
562 felec = _mm256_mul_ps(qq12,felec);
563 pmecorrV = avx256_pmecorrV_f(zeta2);
564 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
565 velec = _mm256_sub_ps(rinv12,pmecorrV);
566 velec = _mm256_mul_ps(qq12,velec);
568 /* Update potential sum for this i atom from the interaction with this j atom. */
569 velecsum = _mm256_add_ps(velecsum,velec);
573 /* Calculate temporary vectorial force */
574 tx = _mm256_mul_ps(fscal,dx12);
575 ty = _mm256_mul_ps(fscal,dy12);
576 tz = _mm256_mul_ps(fscal,dz12);
578 /* Update vectorial force */
579 fix1 = _mm256_add_ps(fix1,tx);
580 fiy1 = _mm256_add_ps(fiy1,ty);
581 fiz1 = _mm256_add_ps(fiz1,tz);
583 fjx2 = _mm256_add_ps(fjx2,tx);
584 fjy2 = _mm256_add_ps(fjy2,ty);
585 fjz2 = _mm256_add_ps(fjz2,tz);
587 /**************************
588 * CALCULATE INTERACTIONS *
589 **************************/
591 r20 = _mm256_mul_ps(rsq20,rinv20);
593 /* EWALD ELECTROSTATICS */
595 /* Analytical PME correction */
596 zeta2 = _mm256_mul_ps(beta2,rsq20);
597 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
598 pmecorrF = avx256_pmecorrF_f(zeta2);
599 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
600 felec = _mm256_mul_ps(qq20,felec);
601 pmecorrV = avx256_pmecorrV_f(zeta2);
602 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
603 velec = _mm256_sub_ps(rinv20,pmecorrV);
604 velec = _mm256_mul_ps(qq20,velec);
606 /* Update potential sum for this i atom from the interaction with this j atom. */
607 velecsum = _mm256_add_ps(velecsum,velec);
611 /* Calculate temporary vectorial force */
612 tx = _mm256_mul_ps(fscal,dx20);
613 ty = _mm256_mul_ps(fscal,dy20);
614 tz = _mm256_mul_ps(fscal,dz20);
616 /* Update vectorial force */
617 fix2 = _mm256_add_ps(fix2,tx);
618 fiy2 = _mm256_add_ps(fiy2,ty);
619 fiz2 = _mm256_add_ps(fiz2,tz);
621 fjx0 = _mm256_add_ps(fjx0,tx);
622 fjy0 = _mm256_add_ps(fjy0,ty);
623 fjz0 = _mm256_add_ps(fjz0,tz);
625 /**************************
626 * CALCULATE INTERACTIONS *
627 **************************/
629 r21 = _mm256_mul_ps(rsq21,rinv21);
631 /* EWALD ELECTROSTATICS */
633 /* Analytical PME correction */
634 zeta2 = _mm256_mul_ps(beta2,rsq21);
635 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
636 pmecorrF = avx256_pmecorrF_f(zeta2);
637 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
638 felec = _mm256_mul_ps(qq21,felec);
639 pmecorrV = avx256_pmecorrV_f(zeta2);
640 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
641 velec = _mm256_sub_ps(rinv21,pmecorrV);
642 velec = _mm256_mul_ps(qq21,velec);
644 /* Update potential sum for this i atom from the interaction with this j atom. */
645 velecsum = _mm256_add_ps(velecsum,velec);
649 /* Calculate temporary vectorial force */
650 tx = _mm256_mul_ps(fscal,dx21);
651 ty = _mm256_mul_ps(fscal,dy21);
652 tz = _mm256_mul_ps(fscal,dz21);
654 /* Update vectorial force */
655 fix2 = _mm256_add_ps(fix2,tx);
656 fiy2 = _mm256_add_ps(fiy2,ty);
657 fiz2 = _mm256_add_ps(fiz2,tz);
659 fjx1 = _mm256_add_ps(fjx1,tx);
660 fjy1 = _mm256_add_ps(fjy1,ty);
661 fjz1 = _mm256_add_ps(fjz1,tz);
663 /**************************
664 * CALCULATE INTERACTIONS *
665 **************************/
667 r22 = _mm256_mul_ps(rsq22,rinv22);
669 /* EWALD ELECTROSTATICS */
671 /* Analytical PME correction */
672 zeta2 = _mm256_mul_ps(beta2,rsq22);
673 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
674 pmecorrF = avx256_pmecorrF_f(zeta2);
675 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
676 felec = _mm256_mul_ps(qq22,felec);
677 pmecorrV = avx256_pmecorrV_f(zeta2);
678 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
679 velec = _mm256_sub_ps(rinv22,pmecorrV);
680 velec = _mm256_mul_ps(qq22,velec);
682 /* Update potential sum for this i atom from the interaction with this j atom. */
683 velecsum = _mm256_add_ps(velecsum,velec);
687 /* Calculate temporary vectorial force */
688 tx = _mm256_mul_ps(fscal,dx22);
689 ty = _mm256_mul_ps(fscal,dy22);
690 tz = _mm256_mul_ps(fscal,dz22);
692 /* Update vectorial force */
693 fix2 = _mm256_add_ps(fix2,tx);
694 fiy2 = _mm256_add_ps(fiy2,ty);
695 fiz2 = _mm256_add_ps(fiz2,tz);
697 fjx2 = _mm256_add_ps(fjx2,tx);
698 fjy2 = _mm256_add_ps(fjy2,ty);
699 fjz2 = _mm256_add_ps(fjz2,tz);
701 fjptrA = f+j_coord_offsetA;
702 fjptrB = f+j_coord_offsetB;
703 fjptrC = f+j_coord_offsetC;
704 fjptrD = f+j_coord_offsetD;
705 fjptrE = f+j_coord_offsetE;
706 fjptrF = f+j_coord_offsetF;
707 fjptrG = f+j_coord_offsetG;
708 fjptrH = f+j_coord_offsetH;
710 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
711 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
713 /* Inner loop uses 784 flops */
719 /* Get j neighbor index, and coordinate index */
720 jnrlistA = jjnr[jidx];
721 jnrlistB = jjnr[jidx+1];
722 jnrlistC = jjnr[jidx+2];
723 jnrlistD = jjnr[jidx+3];
724 jnrlistE = jjnr[jidx+4];
725 jnrlistF = jjnr[jidx+5];
726 jnrlistG = jjnr[jidx+6];
727 jnrlistH = jjnr[jidx+7];
728 /* Sign of each element will be negative for non-real atoms.
729 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
730 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
732 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
733 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
735 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
736 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
737 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
738 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
739 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
740 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
741 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
742 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
743 j_coord_offsetA = DIM*jnrA;
744 j_coord_offsetB = DIM*jnrB;
745 j_coord_offsetC = DIM*jnrC;
746 j_coord_offsetD = DIM*jnrD;
747 j_coord_offsetE = DIM*jnrE;
748 j_coord_offsetF = DIM*jnrF;
749 j_coord_offsetG = DIM*jnrG;
750 j_coord_offsetH = DIM*jnrH;
752 /* load j atom coordinates */
753 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
754 x+j_coord_offsetC,x+j_coord_offsetD,
755 x+j_coord_offsetE,x+j_coord_offsetF,
756 x+j_coord_offsetG,x+j_coord_offsetH,
757 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
759 /* Calculate displacement vector */
760 dx00 = _mm256_sub_ps(ix0,jx0);
761 dy00 = _mm256_sub_ps(iy0,jy0);
762 dz00 = _mm256_sub_ps(iz0,jz0);
763 dx01 = _mm256_sub_ps(ix0,jx1);
764 dy01 = _mm256_sub_ps(iy0,jy1);
765 dz01 = _mm256_sub_ps(iz0,jz1);
766 dx02 = _mm256_sub_ps(ix0,jx2);
767 dy02 = _mm256_sub_ps(iy0,jy2);
768 dz02 = _mm256_sub_ps(iz0,jz2);
769 dx10 = _mm256_sub_ps(ix1,jx0);
770 dy10 = _mm256_sub_ps(iy1,jy0);
771 dz10 = _mm256_sub_ps(iz1,jz0);
772 dx11 = _mm256_sub_ps(ix1,jx1);
773 dy11 = _mm256_sub_ps(iy1,jy1);
774 dz11 = _mm256_sub_ps(iz1,jz1);
775 dx12 = _mm256_sub_ps(ix1,jx2);
776 dy12 = _mm256_sub_ps(iy1,jy2);
777 dz12 = _mm256_sub_ps(iz1,jz2);
778 dx20 = _mm256_sub_ps(ix2,jx0);
779 dy20 = _mm256_sub_ps(iy2,jy0);
780 dz20 = _mm256_sub_ps(iz2,jz0);
781 dx21 = _mm256_sub_ps(ix2,jx1);
782 dy21 = _mm256_sub_ps(iy2,jy1);
783 dz21 = _mm256_sub_ps(iz2,jz1);
784 dx22 = _mm256_sub_ps(ix2,jx2);
785 dy22 = _mm256_sub_ps(iy2,jy2);
786 dz22 = _mm256_sub_ps(iz2,jz2);
788 /* Calculate squared distance and things based on it */
789 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
790 rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
791 rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
792 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
793 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
794 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
795 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
796 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
797 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
799 rinv00 = avx256_invsqrt_f(rsq00);
800 rinv01 = avx256_invsqrt_f(rsq01);
801 rinv02 = avx256_invsqrt_f(rsq02);
802 rinv10 = avx256_invsqrt_f(rsq10);
803 rinv11 = avx256_invsqrt_f(rsq11);
804 rinv12 = avx256_invsqrt_f(rsq12);
805 rinv20 = avx256_invsqrt_f(rsq20);
806 rinv21 = avx256_invsqrt_f(rsq21);
807 rinv22 = avx256_invsqrt_f(rsq22);
809 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
810 rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
811 rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
812 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
813 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
814 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
815 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
816 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
817 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
819 fjx0 = _mm256_setzero_ps();
820 fjy0 = _mm256_setzero_ps();
821 fjz0 = _mm256_setzero_ps();
822 fjx1 = _mm256_setzero_ps();
823 fjy1 = _mm256_setzero_ps();
824 fjz1 = _mm256_setzero_ps();
825 fjx2 = _mm256_setzero_ps();
826 fjy2 = _mm256_setzero_ps();
827 fjz2 = _mm256_setzero_ps();
829 /**************************
830 * CALCULATE INTERACTIONS *
831 **************************/
833 r00 = _mm256_mul_ps(rsq00,rinv00);
834 r00 = _mm256_andnot_ps(dummy_mask,r00);
836 /* EWALD ELECTROSTATICS */
838 /* Analytical PME correction */
839 zeta2 = _mm256_mul_ps(beta2,rsq00);
840 rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
841 pmecorrF = avx256_pmecorrF_f(zeta2);
842 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
843 felec = _mm256_mul_ps(qq00,felec);
844 pmecorrV = avx256_pmecorrV_f(zeta2);
845 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
846 velec = _mm256_sub_ps(rinv00,pmecorrV);
847 velec = _mm256_mul_ps(qq00,velec);
849 /* Analytical LJ-PME */
850 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
851 ewcljrsq = _mm256_mul_ps(ewclj2,rsq00);
852 ewclj6 = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
853 exponent = avx256_exp_f(ewcljrsq);
854 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
855 poly = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
856 /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
857 vvdw6 = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
858 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
859 vvdw = _mm256_sub_ps(_mm256_mul_ps(vvdw12,one_twelfth),_mm256_mul_ps(vvdw6,one_sixth));
860 /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
861 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
863 /* Update potential sum for this i atom from the interaction with this j atom. */
864 velec = _mm256_andnot_ps(dummy_mask,velec);
865 velecsum = _mm256_add_ps(velecsum,velec);
866 vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
867 vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
869 fscal = _mm256_add_ps(felec,fvdw);
871 fscal = _mm256_andnot_ps(dummy_mask,fscal);
873 /* Calculate temporary vectorial force */
874 tx = _mm256_mul_ps(fscal,dx00);
875 ty = _mm256_mul_ps(fscal,dy00);
876 tz = _mm256_mul_ps(fscal,dz00);
878 /* Update vectorial force */
879 fix0 = _mm256_add_ps(fix0,tx);
880 fiy0 = _mm256_add_ps(fiy0,ty);
881 fiz0 = _mm256_add_ps(fiz0,tz);
883 fjx0 = _mm256_add_ps(fjx0,tx);
884 fjy0 = _mm256_add_ps(fjy0,ty);
885 fjz0 = _mm256_add_ps(fjz0,tz);
887 /**************************
888 * CALCULATE INTERACTIONS *
889 **************************/
891 r01 = _mm256_mul_ps(rsq01,rinv01);
892 r01 = _mm256_andnot_ps(dummy_mask,r01);
894 /* EWALD ELECTROSTATICS */
896 /* Analytical PME correction */
897 zeta2 = _mm256_mul_ps(beta2,rsq01);
898 rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
899 pmecorrF = avx256_pmecorrF_f(zeta2);
900 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
901 felec = _mm256_mul_ps(qq01,felec);
902 pmecorrV = avx256_pmecorrV_f(zeta2);
903 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
904 velec = _mm256_sub_ps(rinv01,pmecorrV);
905 velec = _mm256_mul_ps(qq01,velec);
907 /* Update potential sum for this i atom from the interaction with this j atom. */
908 velec = _mm256_andnot_ps(dummy_mask,velec);
909 velecsum = _mm256_add_ps(velecsum,velec);
913 fscal = _mm256_andnot_ps(dummy_mask,fscal);
915 /* Calculate temporary vectorial force */
916 tx = _mm256_mul_ps(fscal,dx01);
917 ty = _mm256_mul_ps(fscal,dy01);
918 tz = _mm256_mul_ps(fscal,dz01);
920 /* Update vectorial force */
921 fix0 = _mm256_add_ps(fix0,tx);
922 fiy0 = _mm256_add_ps(fiy0,ty);
923 fiz0 = _mm256_add_ps(fiz0,tz);
925 fjx1 = _mm256_add_ps(fjx1,tx);
926 fjy1 = _mm256_add_ps(fjy1,ty);
927 fjz1 = _mm256_add_ps(fjz1,tz);
929 /**************************
930 * CALCULATE INTERACTIONS *
931 **************************/
933 r02 = _mm256_mul_ps(rsq02,rinv02);
934 r02 = _mm256_andnot_ps(dummy_mask,r02);
936 /* EWALD ELECTROSTATICS */
938 /* Analytical PME correction */
939 zeta2 = _mm256_mul_ps(beta2,rsq02);
940 rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
941 pmecorrF = avx256_pmecorrF_f(zeta2);
942 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
943 felec = _mm256_mul_ps(qq02,felec);
944 pmecorrV = avx256_pmecorrV_f(zeta2);
945 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
946 velec = _mm256_sub_ps(rinv02,pmecorrV);
947 velec = _mm256_mul_ps(qq02,velec);
949 /* Update potential sum for this i atom from the interaction with this j atom. */
950 velec = _mm256_andnot_ps(dummy_mask,velec);
951 velecsum = _mm256_add_ps(velecsum,velec);
955 fscal = _mm256_andnot_ps(dummy_mask,fscal);
957 /* Calculate temporary vectorial force */
958 tx = _mm256_mul_ps(fscal,dx02);
959 ty = _mm256_mul_ps(fscal,dy02);
960 tz = _mm256_mul_ps(fscal,dz02);
962 /* Update vectorial force */
963 fix0 = _mm256_add_ps(fix0,tx);
964 fiy0 = _mm256_add_ps(fiy0,ty);
965 fiz0 = _mm256_add_ps(fiz0,tz);
967 fjx2 = _mm256_add_ps(fjx2,tx);
968 fjy2 = _mm256_add_ps(fjy2,ty);
969 fjz2 = _mm256_add_ps(fjz2,tz);
971 /**************************
972 * CALCULATE INTERACTIONS *
973 **************************/
975 r10 = _mm256_mul_ps(rsq10,rinv10);
976 r10 = _mm256_andnot_ps(dummy_mask,r10);
978 /* EWALD ELECTROSTATICS */
980 /* Analytical PME correction */
981 zeta2 = _mm256_mul_ps(beta2,rsq10);
982 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
983 pmecorrF = avx256_pmecorrF_f(zeta2);
984 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
985 felec = _mm256_mul_ps(qq10,felec);
986 pmecorrV = avx256_pmecorrV_f(zeta2);
987 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
988 velec = _mm256_sub_ps(rinv10,pmecorrV);
989 velec = _mm256_mul_ps(qq10,velec);
991 /* Update potential sum for this i atom from the interaction with this j atom. */
992 velec = _mm256_andnot_ps(dummy_mask,velec);
993 velecsum = _mm256_add_ps(velecsum,velec);
997 fscal = _mm256_andnot_ps(dummy_mask,fscal);
999 /* Calculate temporary vectorial force */
1000 tx = _mm256_mul_ps(fscal,dx10);
1001 ty = _mm256_mul_ps(fscal,dy10);
1002 tz = _mm256_mul_ps(fscal,dz10);
1004 /* Update vectorial force */
1005 fix1 = _mm256_add_ps(fix1,tx);
1006 fiy1 = _mm256_add_ps(fiy1,ty);
1007 fiz1 = _mm256_add_ps(fiz1,tz);
1009 fjx0 = _mm256_add_ps(fjx0,tx);
1010 fjy0 = _mm256_add_ps(fjy0,ty);
1011 fjz0 = _mm256_add_ps(fjz0,tz);
1013 /**************************
1014 * CALCULATE INTERACTIONS *
1015 **************************/
1017 r11 = _mm256_mul_ps(rsq11,rinv11);
1018 r11 = _mm256_andnot_ps(dummy_mask,r11);
1020 /* EWALD ELECTROSTATICS */
1022 /* Analytical PME correction */
1023 zeta2 = _mm256_mul_ps(beta2,rsq11);
1024 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
1025 pmecorrF = avx256_pmecorrF_f(zeta2);
1026 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1027 felec = _mm256_mul_ps(qq11,felec);
1028 pmecorrV = avx256_pmecorrV_f(zeta2);
1029 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1030 velec = _mm256_sub_ps(rinv11,pmecorrV);
1031 velec = _mm256_mul_ps(qq11,velec);
1033 /* Update potential sum for this i atom from the interaction with this j atom. */
1034 velec = _mm256_andnot_ps(dummy_mask,velec);
1035 velecsum = _mm256_add_ps(velecsum,velec);
1039 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1041 /* Calculate temporary vectorial force */
1042 tx = _mm256_mul_ps(fscal,dx11);
1043 ty = _mm256_mul_ps(fscal,dy11);
1044 tz = _mm256_mul_ps(fscal,dz11);
1046 /* Update vectorial force */
1047 fix1 = _mm256_add_ps(fix1,tx);
1048 fiy1 = _mm256_add_ps(fiy1,ty);
1049 fiz1 = _mm256_add_ps(fiz1,tz);
1051 fjx1 = _mm256_add_ps(fjx1,tx);
1052 fjy1 = _mm256_add_ps(fjy1,ty);
1053 fjz1 = _mm256_add_ps(fjz1,tz);
1055 /**************************
1056 * CALCULATE INTERACTIONS *
1057 **************************/
1059 r12 = _mm256_mul_ps(rsq12,rinv12);
1060 r12 = _mm256_andnot_ps(dummy_mask,r12);
1062 /* EWALD ELECTROSTATICS */
1064 /* Analytical PME correction */
1065 zeta2 = _mm256_mul_ps(beta2,rsq12);
1066 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
1067 pmecorrF = avx256_pmecorrF_f(zeta2);
1068 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1069 felec = _mm256_mul_ps(qq12,felec);
1070 pmecorrV = avx256_pmecorrV_f(zeta2);
1071 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1072 velec = _mm256_sub_ps(rinv12,pmecorrV);
1073 velec = _mm256_mul_ps(qq12,velec);
1075 /* Update potential sum for this i atom from the interaction with this j atom. */
1076 velec = _mm256_andnot_ps(dummy_mask,velec);
1077 velecsum = _mm256_add_ps(velecsum,velec);
1081 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1083 /* Calculate temporary vectorial force */
1084 tx = _mm256_mul_ps(fscal,dx12);
1085 ty = _mm256_mul_ps(fscal,dy12);
1086 tz = _mm256_mul_ps(fscal,dz12);
1088 /* Update vectorial force */
1089 fix1 = _mm256_add_ps(fix1,tx);
1090 fiy1 = _mm256_add_ps(fiy1,ty);
1091 fiz1 = _mm256_add_ps(fiz1,tz);
1093 fjx2 = _mm256_add_ps(fjx2,tx);
1094 fjy2 = _mm256_add_ps(fjy2,ty);
1095 fjz2 = _mm256_add_ps(fjz2,tz);
1097 /**************************
1098 * CALCULATE INTERACTIONS *
1099 **************************/
1101 r20 = _mm256_mul_ps(rsq20,rinv20);
1102 r20 = _mm256_andnot_ps(dummy_mask,r20);
1104 /* EWALD ELECTROSTATICS */
1106 /* Analytical PME correction */
1107 zeta2 = _mm256_mul_ps(beta2,rsq20);
1108 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
1109 pmecorrF = avx256_pmecorrF_f(zeta2);
1110 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1111 felec = _mm256_mul_ps(qq20,felec);
1112 pmecorrV = avx256_pmecorrV_f(zeta2);
1113 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1114 velec = _mm256_sub_ps(rinv20,pmecorrV);
1115 velec = _mm256_mul_ps(qq20,velec);
1117 /* Update potential sum for this i atom from the interaction with this j atom. */
1118 velec = _mm256_andnot_ps(dummy_mask,velec);
1119 velecsum = _mm256_add_ps(velecsum,velec);
1123 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1125 /* Calculate temporary vectorial force */
1126 tx = _mm256_mul_ps(fscal,dx20);
1127 ty = _mm256_mul_ps(fscal,dy20);
1128 tz = _mm256_mul_ps(fscal,dz20);
1130 /* Update vectorial force */
1131 fix2 = _mm256_add_ps(fix2,tx);
1132 fiy2 = _mm256_add_ps(fiy2,ty);
1133 fiz2 = _mm256_add_ps(fiz2,tz);
1135 fjx0 = _mm256_add_ps(fjx0,tx);
1136 fjy0 = _mm256_add_ps(fjy0,ty);
1137 fjz0 = _mm256_add_ps(fjz0,tz);
1139 /**************************
1140 * CALCULATE INTERACTIONS *
1141 **************************/
1143 r21 = _mm256_mul_ps(rsq21,rinv21);
1144 r21 = _mm256_andnot_ps(dummy_mask,r21);
1146 /* EWALD ELECTROSTATICS */
1148 /* Analytical PME correction */
1149 zeta2 = _mm256_mul_ps(beta2,rsq21);
1150 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
1151 pmecorrF = avx256_pmecorrF_f(zeta2);
1152 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1153 felec = _mm256_mul_ps(qq21,felec);
1154 pmecorrV = avx256_pmecorrV_f(zeta2);
1155 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1156 velec = _mm256_sub_ps(rinv21,pmecorrV);
1157 velec = _mm256_mul_ps(qq21,velec);
1159 /* Update potential sum for this i atom from the interaction with this j atom. */
1160 velec = _mm256_andnot_ps(dummy_mask,velec);
1161 velecsum = _mm256_add_ps(velecsum,velec);
1165 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1167 /* Calculate temporary vectorial force */
1168 tx = _mm256_mul_ps(fscal,dx21);
1169 ty = _mm256_mul_ps(fscal,dy21);
1170 tz = _mm256_mul_ps(fscal,dz21);
1172 /* Update vectorial force */
1173 fix2 = _mm256_add_ps(fix2,tx);
1174 fiy2 = _mm256_add_ps(fiy2,ty);
1175 fiz2 = _mm256_add_ps(fiz2,tz);
1177 fjx1 = _mm256_add_ps(fjx1,tx);
1178 fjy1 = _mm256_add_ps(fjy1,ty);
1179 fjz1 = _mm256_add_ps(fjz1,tz);
1181 /**************************
1182 * CALCULATE INTERACTIONS *
1183 **************************/
1185 r22 = _mm256_mul_ps(rsq22,rinv22);
1186 r22 = _mm256_andnot_ps(dummy_mask,r22);
1188 /* EWALD ELECTROSTATICS */
1190 /* Analytical PME correction */
1191 zeta2 = _mm256_mul_ps(beta2,rsq22);
1192 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
1193 pmecorrF = avx256_pmecorrF_f(zeta2);
1194 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1195 felec = _mm256_mul_ps(qq22,felec);
1196 pmecorrV = avx256_pmecorrV_f(zeta2);
1197 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1198 velec = _mm256_sub_ps(rinv22,pmecorrV);
1199 velec = _mm256_mul_ps(qq22,velec);
1201 /* Update potential sum for this i atom from the interaction with this j atom. */
1202 velec = _mm256_andnot_ps(dummy_mask,velec);
1203 velecsum = _mm256_add_ps(velecsum,velec);
1207 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1209 /* Calculate temporary vectorial force */
1210 tx = _mm256_mul_ps(fscal,dx22);
1211 ty = _mm256_mul_ps(fscal,dy22);
1212 tz = _mm256_mul_ps(fscal,dz22);
1214 /* Update vectorial force */
1215 fix2 = _mm256_add_ps(fix2,tx);
1216 fiy2 = _mm256_add_ps(fiy2,ty);
1217 fiz2 = _mm256_add_ps(fiz2,tz);
1219 fjx2 = _mm256_add_ps(fjx2,tx);
1220 fjy2 = _mm256_add_ps(fjy2,ty);
1221 fjz2 = _mm256_add_ps(fjz2,tz);
1223 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1224 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1225 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1226 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1227 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1228 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1229 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1230 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1232 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1233 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1235 /* Inner loop uses 793 flops */
1238 /* End of innermost loop */
1240 gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1241 f+i_coord_offset,fshift+i_shift_offset);
1244 /* Update potential energies */
1245 gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1246 gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1248 /* Increment number of inner iterations */
1249 inneriter += j_index_end - j_index_start;
1251 /* Outer loop uses 20 flops */
1254 /* Increment number of outer iterations */
1257 /* Update outer/inner flops */
1259 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*793);
1262 * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwLJEw_GeomW3W3_F_avx_256_single
1263 * Electrostatics interaction: Ewald
1264 * VdW interaction: LJEwald
1265 * Geometry: Water3-Water3
1266 * Calculate force/pot: Force
1269 nb_kernel_ElecEw_VdwLJEw_GeomW3W3_F_avx_256_single
1270 (t_nblist * gmx_restrict nlist,
1271 rvec * gmx_restrict xx,
1272 rvec * gmx_restrict ff,
1273 struct t_forcerec * gmx_restrict fr,
1274 t_mdatoms * gmx_restrict mdatoms,
1275 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1276 t_nrnb * gmx_restrict nrnb)
1278 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
1279 * just 0 for non-waters.
1280 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1281 * jnr indices corresponding to data put in the four positions in the SIMD register.
1283 int i_shift_offset,i_coord_offset,outeriter,inneriter;
1284 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1285 int jnrA,jnrB,jnrC,jnrD;
1286 int jnrE,jnrF,jnrG,jnrH;
1287 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1288 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1289 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1290 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1291 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
1292 real rcutoff_scalar;
1293 real *shiftvec,*fshift,*x,*f;
1294 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1295 real scratch[4*DIM];
1296 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1297 real * vdwioffsetptr0;
1298 real * vdwgridioffsetptr0;
1299 __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1300 real * vdwioffsetptr1;
1301 real * vdwgridioffsetptr1;
1302 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1303 real * vdwioffsetptr2;
1304 real * vdwgridioffsetptr2;
1305 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1306 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1307 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1308 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1309 __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1310 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1311 __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1312 __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1313 __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1314 __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1315 __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1316 __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1317 __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1318 __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1319 __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1320 __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1321 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
1324 __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1327 __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
1328 __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
1339 __m256 ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
1340 __m256 one_half = _mm256_set1_ps(0.5);
1341 __m256 minus_one = _mm256_set1_ps(-1.0);
1343 __m128i ewitab_lo,ewitab_hi;
1344 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1345 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1347 __m256 dummy_mask,cutoff_mask;
1348 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1349 __m256 one = _mm256_set1_ps(1.0);
1350 __m256 two = _mm256_set1_ps(2.0);
1356 jindex = nlist->jindex;
1358 shiftidx = nlist->shift;
1360 shiftvec = fr->shift_vec[0];
1361 fshift = fr->fshift[0];
1362 facel = _mm256_set1_ps(fr->ic->epsfac);
1363 charge = mdatoms->chargeA;
1364 nvdwtype = fr->ntype;
1365 vdwparam = fr->nbfp;
1366 vdwtype = mdatoms->typeA;
1367 vdwgridparam = fr->ljpme_c6grid;
1368 sh_lj_ewald = _mm256_set1_ps(fr->ic->sh_lj_ewald);
1369 ewclj = _mm256_set1_ps(fr->ic->ewaldcoeff_lj);
1370 ewclj2 = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
1372 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
1373 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1374 beta2 = _mm256_mul_ps(beta,beta);
1375 beta3 = _mm256_mul_ps(beta,beta2);
1377 ewtab = fr->ic->tabq_coul_F;
1378 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
1379 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1381 /* Setup water-specific parameters */
1382 inr = nlist->iinr[0];
1383 iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
1384 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1385 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1386 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
1387 vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
1389 jq0 = _mm256_set1_ps(charge[inr+0]);
1390 jq1 = _mm256_set1_ps(charge[inr+1]);
1391 jq2 = _mm256_set1_ps(charge[inr+2]);
1392 vdwjidx0A = 2*vdwtype[inr+0];
1393 qq00 = _mm256_mul_ps(iq0,jq0);
1394 c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1395 c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1396 c6grid_00 = _mm256_set1_ps(vdwgridioffsetptr0[vdwjidx0A]);
1397 qq01 = _mm256_mul_ps(iq0,jq1);
1398 qq02 = _mm256_mul_ps(iq0,jq2);
1399 qq10 = _mm256_mul_ps(iq1,jq0);
1400 qq11 = _mm256_mul_ps(iq1,jq1);
1401 qq12 = _mm256_mul_ps(iq1,jq2);
1402 qq20 = _mm256_mul_ps(iq2,jq0);
1403 qq21 = _mm256_mul_ps(iq2,jq1);
1404 qq22 = _mm256_mul_ps(iq2,jq2);
1406 /* Avoid stupid compiler warnings */
1407 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1408 j_coord_offsetA = 0;
1409 j_coord_offsetB = 0;
1410 j_coord_offsetC = 0;
1411 j_coord_offsetD = 0;
1412 j_coord_offsetE = 0;
1413 j_coord_offsetF = 0;
1414 j_coord_offsetG = 0;
1415 j_coord_offsetH = 0;
1420 for(iidx=0;iidx<4*DIM;iidx++)
1422 scratch[iidx] = 0.0;
1425 /* Start outer loop over neighborlists */
1426 for(iidx=0; iidx<nri; iidx++)
1428 /* Load shift vector for this list */
1429 i_shift_offset = DIM*shiftidx[iidx];
1431 /* Load limits for loop over neighbors */
1432 j_index_start = jindex[iidx];
1433 j_index_end = jindex[iidx+1];
1435 /* Get outer coordinate index */
1437 i_coord_offset = DIM*inr;
1439 /* Load i particle coords and add shift vector */
1440 gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1441 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1443 fix0 = _mm256_setzero_ps();
1444 fiy0 = _mm256_setzero_ps();
1445 fiz0 = _mm256_setzero_ps();
1446 fix1 = _mm256_setzero_ps();
1447 fiy1 = _mm256_setzero_ps();
1448 fiz1 = _mm256_setzero_ps();
1449 fix2 = _mm256_setzero_ps();
1450 fiy2 = _mm256_setzero_ps();
1451 fiz2 = _mm256_setzero_ps();
1453 /* Start inner kernel loop */
1454 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1457 /* Get j neighbor index, and coordinate index */
1459 jnrB = jjnr[jidx+1];
1460 jnrC = jjnr[jidx+2];
1461 jnrD = jjnr[jidx+3];
1462 jnrE = jjnr[jidx+4];
1463 jnrF = jjnr[jidx+5];
1464 jnrG = jjnr[jidx+6];
1465 jnrH = jjnr[jidx+7];
1466 j_coord_offsetA = DIM*jnrA;
1467 j_coord_offsetB = DIM*jnrB;
1468 j_coord_offsetC = DIM*jnrC;
1469 j_coord_offsetD = DIM*jnrD;
1470 j_coord_offsetE = DIM*jnrE;
1471 j_coord_offsetF = DIM*jnrF;
1472 j_coord_offsetG = DIM*jnrG;
1473 j_coord_offsetH = DIM*jnrH;
1475 /* load j atom coordinates */
1476 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1477 x+j_coord_offsetC,x+j_coord_offsetD,
1478 x+j_coord_offsetE,x+j_coord_offsetF,
1479 x+j_coord_offsetG,x+j_coord_offsetH,
1480 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1482 /* Calculate displacement vector */
1483 dx00 = _mm256_sub_ps(ix0,jx0);
1484 dy00 = _mm256_sub_ps(iy0,jy0);
1485 dz00 = _mm256_sub_ps(iz0,jz0);
1486 dx01 = _mm256_sub_ps(ix0,jx1);
1487 dy01 = _mm256_sub_ps(iy0,jy1);
1488 dz01 = _mm256_sub_ps(iz0,jz1);
1489 dx02 = _mm256_sub_ps(ix0,jx2);
1490 dy02 = _mm256_sub_ps(iy0,jy2);
1491 dz02 = _mm256_sub_ps(iz0,jz2);
1492 dx10 = _mm256_sub_ps(ix1,jx0);
1493 dy10 = _mm256_sub_ps(iy1,jy0);
1494 dz10 = _mm256_sub_ps(iz1,jz0);
1495 dx11 = _mm256_sub_ps(ix1,jx1);
1496 dy11 = _mm256_sub_ps(iy1,jy1);
1497 dz11 = _mm256_sub_ps(iz1,jz1);
1498 dx12 = _mm256_sub_ps(ix1,jx2);
1499 dy12 = _mm256_sub_ps(iy1,jy2);
1500 dz12 = _mm256_sub_ps(iz1,jz2);
1501 dx20 = _mm256_sub_ps(ix2,jx0);
1502 dy20 = _mm256_sub_ps(iy2,jy0);
1503 dz20 = _mm256_sub_ps(iz2,jz0);
1504 dx21 = _mm256_sub_ps(ix2,jx1);
1505 dy21 = _mm256_sub_ps(iy2,jy1);
1506 dz21 = _mm256_sub_ps(iz2,jz1);
1507 dx22 = _mm256_sub_ps(ix2,jx2);
1508 dy22 = _mm256_sub_ps(iy2,jy2);
1509 dz22 = _mm256_sub_ps(iz2,jz2);
1511 /* Calculate squared distance and things based on it */
1512 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1513 rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1514 rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1515 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1516 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1517 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1518 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1519 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1520 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1522 rinv00 = avx256_invsqrt_f(rsq00);
1523 rinv01 = avx256_invsqrt_f(rsq01);
1524 rinv02 = avx256_invsqrt_f(rsq02);
1525 rinv10 = avx256_invsqrt_f(rsq10);
1526 rinv11 = avx256_invsqrt_f(rsq11);
1527 rinv12 = avx256_invsqrt_f(rsq12);
1528 rinv20 = avx256_invsqrt_f(rsq20);
1529 rinv21 = avx256_invsqrt_f(rsq21);
1530 rinv22 = avx256_invsqrt_f(rsq22);
1532 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
1533 rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
1534 rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
1535 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
1536 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
1537 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
1538 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
1539 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
1540 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
1542 fjx0 = _mm256_setzero_ps();
1543 fjy0 = _mm256_setzero_ps();
1544 fjz0 = _mm256_setzero_ps();
1545 fjx1 = _mm256_setzero_ps();
1546 fjy1 = _mm256_setzero_ps();
1547 fjz1 = _mm256_setzero_ps();
1548 fjx2 = _mm256_setzero_ps();
1549 fjy2 = _mm256_setzero_ps();
1550 fjz2 = _mm256_setzero_ps();
1552 /**************************
1553 * CALCULATE INTERACTIONS *
1554 **************************/
1556 r00 = _mm256_mul_ps(rsq00,rinv00);
1558 /* EWALD ELECTROSTATICS */
1560 /* Analytical PME correction */
1561 zeta2 = _mm256_mul_ps(beta2,rsq00);
1562 rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
1563 pmecorrF = avx256_pmecorrF_f(zeta2);
1564 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1565 felec = _mm256_mul_ps(qq00,felec);
1567 /* Analytical LJ-PME */
1568 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1569 ewcljrsq = _mm256_mul_ps(ewclj2,rsq00);
1570 ewclj6 = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1571 exponent = avx256_exp_f(ewcljrsq);
1572 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1573 poly = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1574 /* f6A = 6 * C6grid * (1 - poly) */
1575 f6A = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
1576 /* f6B = C6grid * exponent * beta^6 */
1577 f6B = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
1578 /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1579 fvdw = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1581 fscal = _mm256_add_ps(felec,fvdw);
1583 /* Calculate temporary vectorial force */
1584 tx = _mm256_mul_ps(fscal,dx00);
1585 ty = _mm256_mul_ps(fscal,dy00);
1586 tz = _mm256_mul_ps(fscal,dz00);
1588 /* Update vectorial force */
1589 fix0 = _mm256_add_ps(fix0,tx);
1590 fiy0 = _mm256_add_ps(fiy0,ty);
1591 fiz0 = _mm256_add_ps(fiz0,tz);
1593 fjx0 = _mm256_add_ps(fjx0,tx);
1594 fjy0 = _mm256_add_ps(fjy0,ty);
1595 fjz0 = _mm256_add_ps(fjz0,tz);
1597 /**************************
1598 * CALCULATE INTERACTIONS *
1599 **************************/
1601 r01 = _mm256_mul_ps(rsq01,rinv01);
1603 /* EWALD ELECTROSTATICS */
1605 /* Analytical PME correction */
1606 zeta2 = _mm256_mul_ps(beta2,rsq01);
1607 rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
1608 pmecorrF = avx256_pmecorrF_f(zeta2);
1609 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1610 felec = _mm256_mul_ps(qq01,felec);
1614 /* Calculate temporary vectorial force */
1615 tx = _mm256_mul_ps(fscal,dx01);
1616 ty = _mm256_mul_ps(fscal,dy01);
1617 tz = _mm256_mul_ps(fscal,dz01);
1619 /* Update vectorial force */
1620 fix0 = _mm256_add_ps(fix0,tx);
1621 fiy0 = _mm256_add_ps(fiy0,ty);
1622 fiz0 = _mm256_add_ps(fiz0,tz);
1624 fjx1 = _mm256_add_ps(fjx1,tx);
1625 fjy1 = _mm256_add_ps(fjy1,ty);
1626 fjz1 = _mm256_add_ps(fjz1,tz);
1628 /**************************
1629 * CALCULATE INTERACTIONS *
1630 **************************/
1632 r02 = _mm256_mul_ps(rsq02,rinv02);
1634 /* EWALD ELECTROSTATICS */
1636 /* Analytical PME correction */
1637 zeta2 = _mm256_mul_ps(beta2,rsq02);
1638 rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
1639 pmecorrF = avx256_pmecorrF_f(zeta2);
1640 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1641 felec = _mm256_mul_ps(qq02,felec);
1645 /* Calculate temporary vectorial force */
1646 tx = _mm256_mul_ps(fscal,dx02);
1647 ty = _mm256_mul_ps(fscal,dy02);
1648 tz = _mm256_mul_ps(fscal,dz02);
1650 /* Update vectorial force */
1651 fix0 = _mm256_add_ps(fix0,tx);
1652 fiy0 = _mm256_add_ps(fiy0,ty);
1653 fiz0 = _mm256_add_ps(fiz0,tz);
1655 fjx2 = _mm256_add_ps(fjx2,tx);
1656 fjy2 = _mm256_add_ps(fjy2,ty);
1657 fjz2 = _mm256_add_ps(fjz2,tz);
1659 /**************************
1660 * CALCULATE INTERACTIONS *
1661 **************************/
1663 r10 = _mm256_mul_ps(rsq10,rinv10);
1665 /* EWALD ELECTROSTATICS */
1667 /* Analytical PME correction */
1668 zeta2 = _mm256_mul_ps(beta2,rsq10);
1669 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
1670 pmecorrF = avx256_pmecorrF_f(zeta2);
1671 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1672 felec = _mm256_mul_ps(qq10,felec);
1676 /* Calculate temporary vectorial force */
1677 tx = _mm256_mul_ps(fscal,dx10);
1678 ty = _mm256_mul_ps(fscal,dy10);
1679 tz = _mm256_mul_ps(fscal,dz10);
1681 /* Update vectorial force */
1682 fix1 = _mm256_add_ps(fix1,tx);
1683 fiy1 = _mm256_add_ps(fiy1,ty);
1684 fiz1 = _mm256_add_ps(fiz1,tz);
1686 fjx0 = _mm256_add_ps(fjx0,tx);
1687 fjy0 = _mm256_add_ps(fjy0,ty);
1688 fjz0 = _mm256_add_ps(fjz0,tz);
1690 /**************************
1691 * CALCULATE INTERACTIONS *
1692 **************************/
1694 r11 = _mm256_mul_ps(rsq11,rinv11);
1696 /* EWALD ELECTROSTATICS */
1698 /* Analytical PME correction */
1699 zeta2 = _mm256_mul_ps(beta2,rsq11);
1700 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
1701 pmecorrF = avx256_pmecorrF_f(zeta2);
1702 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1703 felec = _mm256_mul_ps(qq11,felec);
1707 /* Calculate temporary vectorial force */
1708 tx = _mm256_mul_ps(fscal,dx11);
1709 ty = _mm256_mul_ps(fscal,dy11);
1710 tz = _mm256_mul_ps(fscal,dz11);
1712 /* Update vectorial force */
1713 fix1 = _mm256_add_ps(fix1,tx);
1714 fiy1 = _mm256_add_ps(fiy1,ty);
1715 fiz1 = _mm256_add_ps(fiz1,tz);
1717 fjx1 = _mm256_add_ps(fjx1,tx);
1718 fjy1 = _mm256_add_ps(fjy1,ty);
1719 fjz1 = _mm256_add_ps(fjz1,tz);
1721 /**************************
1722 * CALCULATE INTERACTIONS *
1723 **************************/
1725 r12 = _mm256_mul_ps(rsq12,rinv12);
1727 /* EWALD ELECTROSTATICS */
1729 /* Analytical PME correction */
1730 zeta2 = _mm256_mul_ps(beta2,rsq12);
1731 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
1732 pmecorrF = avx256_pmecorrF_f(zeta2);
1733 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1734 felec = _mm256_mul_ps(qq12,felec);
1738 /* Calculate temporary vectorial force */
1739 tx = _mm256_mul_ps(fscal,dx12);
1740 ty = _mm256_mul_ps(fscal,dy12);
1741 tz = _mm256_mul_ps(fscal,dz12);
1743 /* Update vectorial force */
1744 fix1 = _mm256_add_ps(fix1,tx);
1745 fiy1 = _mm256_add_ps(fiy1,ty);
1746 fiz1 = _mm256_add_ps(fiz1,tz);
1748 fjx2 = _mm256_add_ps(fjx2,tx);
1749 fjy2 = _mm256_add_ps(fjy2,ty);
1750 fjz2 = _mm256_add_ps(fjz2,tz);
1752 /**************************
1753 * CALCULATE INTERACTIONS *
1754 **************************/
1756 r20 = _mm256_mul_ps(rsq20,rinv20);
1758 /* EWALD ELECTROSTATICS */
1760 /* Analytical PME correction */
1761 zeta2 = _mm256_mul_ps(beta2,rsq20);
1762 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
1763 pmecorrF = avx256_pmecorrF_f(zeta2);
1764 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1765 felec = _mm256_mul_ps(qq20,felec);
1769 /* Calculate temporary vectorial force */
1770 tx = _mm256_mul_ps(fscal,dx20);
1771 ty = _mm256_mul_ps(fscal,dy20);
1772 tz = _mm256_mul_ps(fscal,dz20);
1774 /* Update vectorial force */
1775 fix2 = _mm256_add_ps(fix2,tx);
1776 fiy2 = _mm256_add_ps(fiy2,ty);
1777 fiz2 = _mm256_add_ps(fiz2,tz);
1779 fjx0 = _mm256_add_ps(fjx0,tx);
1780 fjy0 = _mm256_add_ps(fjy0,ty);
1781 fjz0 = _mm256_add_ps(fjz0,tz);
1783 /**************************
1784 * CALCULATE INTERACTIONS *
1785 **************************/
1787 r21 = _mm256_mul_ps(rsq21,rinv21);
1789 /* EWALD ELECTROSTATICS */
1791 /* Analytical PME correction */
1792 zeta2 = _mm256_mul_ps(beta2,rsq21);
1793 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
1794 pmecorrF = avx256_pmecorrF_f(zeta2);
1795 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1796 felec = _mm256_mul_ps(qq21,felec);
1800 /* Calculate temporary vectorial force */
1801 tx = _mm256_mul_ps(fscal,dx21);
1802 ty = _mm256_mul_ps(fscal,dy21);
1803 tz = _mm256_mul_ps(fscal,dz21);
1805 /* Update vectorial force */
1806 fix2 = _mm256_add_ps(fix2,tx);
1807 fiy2 = _mm256_add_ps(fiy2,ty);
1808 fiz2 = _mm256_add_ps(fiz2,tz);
1810 fjx1 = _mm256_add_ps(fjx1,tx);
1811 fjy1 = _mm256_add_ps(fjy1,ty);
1812 fjz1 = _mm256_add_ps(fjz1,tz);
1814 /**************************
1815 * CALCULATE INTERACTIONS *
1816 **************************/
1818 r22 = _mm256_mul_ps(rsq22,rinv22);
1820 /* EWALD ELECTROSTATICS */
1822 /* Analytical PME correction */
1823 zeta2 = _mm256_mul_ps(beta2,rsq22);
1824 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
1825 pmecorrF = avx256_pmecorrF_f(zeta2);
1826 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1827 felec = _mm256_mul_ps(qq22,felec);
1831 /* Calculate temporary vectorial force */
1832 tx = _mm256_mul_ps(fscal,dx22);
1833 ty = _mm256_mul_ps(fscal,dy22);
1834 tz = _mm256_mul_ps(fscal,dz22);
1836 /* Update vectorial force */
1837 fix2 = _mm256_add_ps(fix2,tx);
1838 fiy2 = _mm256_add_ps(fiy2,ty);
1839 fiz2 = _mm256_add_ps(fiz2,tz);
1841 fjx2 = _mm256_add_ps(fjx2,tx);
1842 fjy2 = _mm256_add_ps(fjy2,ty);
1843 fjz2 = _mm256_add_ps(fjz2,tz);
1845 fjptrA = f+j_coord_offsetA;
1846 fjptrB = f+j_coord_offsetB;
1847 fjptrC = f+j_coord_offsetC;
1848 fjptrD = f+j_coord_offsetD;
1849 fjptrE = f+j_coord_offsetE;
1850 fjptrF = f+j_coord_offsetF;
1851 fjptrG = f+j_coord_offsetG;
1852 fjptrH = f+j_coord_offsetH;
1854 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1855 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1857 /* Inner loop uses 527 flops */
1860 if(jidx<j_index_end)
1863 /* Get j neighbor index, and coordinate index */
1864 jnrlistA = jjnr[jidx];
1865 jnrlistB = jjnr[jidx+1];
1866 jnrlistC = jjnr[jidx+2];
1867 jnrlistD = jjnr[jidx+3];
1868 jnrlistE = jjnr[jidx+4];
1869 jnrlistF = jjnr[jidx+5];
1870 jnrlistG = jjnr[jidx+6];
1871 jnrlistH = jjnr[jidx+7];
1872 /* Sign of each element will be negative for non-real atoms.
1873 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1874 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
1876 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
1877 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
1879 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
1880 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
1881 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
1882 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
1883 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
1884 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
1885 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
1886 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
1887 j_coord_offsetA = DIM*jnrA;
1888 j_coord_offsetB = DIM*jnrB;
1889 j_coord_offsetC = DIM*jnrC;
1890 j_coord_offsetD = DIM*jnrD;
1891 j_coord_offsetE = DIM*jnrE;
1892 j_coord_offsetF = DIM*jnrF;
1893 j_coord_offsetG = DIM*jnrG;
1894 j_coord_offsetH = DIM*jnrH;
1896 /* load j atom coordinates */
1897 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1898 x+j_coord_offsetC,x+j_coord_offsetD,
1899 x+j_coord_offsetE,x+j_coord_offsetF,
1900 x+j_coord_offsetG,x+j_coord_offsetH,
1901 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1903 /* Calculate displacement vector */
1904 dx00 = _mm256_sub_ps(ix0,jx0);
1905 dy00 = _mm256_sub_ps(iy0,jy0);
1906 dz00 = _mm256_sub_ps(iz0,jz0);
1907 dx01 = _mm256_sub_ps(ix0,jx1);
1908 dy01 = _mm256_sub_ps(iy0,jy1);
1909 dz01 = _mm256_sub_ps(iz0,jz1);
1910 dx02 = _mm256_sub_ps(ix0,jx2);
1911 dy02 = _mm256_sub_ps(iy0,jy2);
1912 dz02 = _mm256_sub_ps(iz0,jz2);
1913 dx10 = _mm256_sub_ps(ix1,jx0);
1914 dy10 = _mm256_sub_ps(iy1,jy0);
1915 dz10 = _mm256_sub_ps(iz1,jz0);
1916 dx11 = _mm256_sub_ps(ix1,jx1);
1917 dy11 = _mm256_sub_ps(iy1,jy1);
1918 dz11 = _mm256_sub_ps(iz1,jz1);
1919 dx12 = _mm256_sub_ps(ix1,jx2);
1920 dy12 = _mm256_sub_ps(iy1,jy2);
1921 dz12 = _mm256_sub_ps(iz1,jz2);
1922 dx20 = _mm256_sub_ps(ix2,jx0);
1923 dy20 = _mm256_sub_ps(iy2,jy0);
1924 dz20 = _mm256_sub_ps(iz2,jz0);
1925 dx21 = _mm256_sub_ps(ix2,jx1);
1926 dy21 = _mm256_sub_ps(iy2,jy1);
1927 dz21 = _mm256_sub_ps(iz2,jz1);
1928 dx22 = _mm256_sub_ps(ix2,jx2);
1929 dy22 = _mm256_sub_ps(iy2,jy2);
1930 dz22 = _mm256_sub_ps(iz2,jz2);
1932 /* Calculate squared distance and things based on it */
1933 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1934 rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1935 rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1936 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1937 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1938 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1939 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1940 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1941 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1943 rinv00 = avx256_invsqrt_f(rsq00);
1944 rinv01 = avx256_invsqrt_f(rsq01);
1945 rinv02 = avx256_invsqrt_f(rsq02);
1946 rinv10 = avx256_invsqrt_f(rsq10);
1947 rinv11 = avx256_invsqrt_f(rsq11);
1948 rinv12 = avx256_invsqrt_f(rsq12);
1949 rinv20 = avx256_invsqrt_f(rsq20);
1950 rinv21 = avx256_invsqrt_f(rsq21);
1951 rinv22 = avx256_invsqrt_f(rsq22);
1953 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
1954 rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
1955 rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
1956 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
1957 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
1958 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
1959 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
1960 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
1961 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
1963 fjx0 = _mm256_setzero_ps();
1964 fjy0 = _mm256_setzero_ps();
1965 fjz0 = _mm256_setzero_ps();
1966 fjx1 = _mm256_setzero_ps();
1967 fjy1 = _mm256_setzero_ps();
1968 fjz1 = _mm256_setzero_ps();
1969 fjx2 = _mm256_setzero_ps();
1970 fjy2 = _mm256_setzero_ps();
1971 fjz2 = _mm256_setzero_ps();
1973 /**************************
1974 * CALCULATE INTERACTIONS *
1975 **************************/
1977 r00 = _mm256_mul_ps(rsq00,rinv00);
1978 r00 = _mm256_andnot_ps(dummy_mask,r00);
1980 /* EWALD ELECTROSTATICS */
1982 /* Analytical PME correction */
1983 zeta2 = _mm256_mul_ps(beta2,rsq00);
1984 rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
1985 pmecorrF = avx256_pmecorrF_f(zeta2);
1986 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1987 felec = _mm256_mul_ps(qq00,felec);
1989 /* Analytical LJ-PME */
1990 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1991 ewcljrsq = _mm256_mul_ps(ewclj2,rsq00);
1992 ewclj6 = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1993 exponent = avx256_exp_f(ewcljrsq);
1994 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1995 poly = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1996 /* f6A = 6 * C6grid * (1 - poly) */
1997 f6A = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
1998 /* f6B = C6grid * exponent * beta^6 */
1999 f6B = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
2000 /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
2001 fvdw = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
2003 fscal = _mm256_add_ps(felec,fvdw);
2005 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2007 /* Calculate temporary vectorial force */
2008 tx = _mm256_mul_ps(fscal,dx00);
2009 ty = _mm256_mul_ps(fscal,dy00);
2010 tz = _mm256_mul_ps(fscal,dz00);
2012 /* Update vectorial force */
2013 fix0 = _mm256_add_ps(fix0,tx);
2014 fiy0 = _mm256_add_ps(fiy0,ty);
2015 fiz0 = _mm256_add_ps(fiz0,tz);
2017 fjx0 = _mm256_add_ps(fjx0,tx);
2018 fjy0 = _mm256_add_ps(fjy0,ty);
2019 fjz0 = _mm256_add_ps(fjz0,tz);
2021 /**************************
2022 * CALCULATE INTERACTIONS *
2023 **************************/
2025 r01 = _mm256_mul_ps(rsq01,rinv01);
2026 r01 = _mm256_andnot_ps(dummy_mask,r01);
2028 /* EWALD ELECTROSTATICS */
2030 /* Analytical PME correction */
2031 zeta2 = _mm256_mul_ps(beta2,rsq01);
2032 rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
2033 pmecorrF = avx256_pmecorrF_f(zeta2);
2034 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2035 felec = _mm256_mul_ps(qq01,felec);
2039 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2041 /* Calculate temporary vectorial force */
2042 tx = _mm256_mul_ps(fscal,dx01);
2043 ty = _mm256_mul_ps(fscal,dy01);
2044 tz = _mm256_mul_ps(fscal,dz01);
2046 /* Update vectorial force */
2047 fix0 = _mm256_add_ps(fix0,tx);
2048 fiy0 = _mm256_add_ps(fiy0,ty);
2049 fiz0 = _mm256_add_ps(fiz0,tz);
2051 fjx1 = _mm256_add_ps(fjx1,tx);
2052 fjy1 = _mm256_add_ps(fjy1,ty);
2053 fjz1 = _mm256_add_ps(fjz1,tz);
2055 /**************************
2056 * CALCULATE INTERACTIONS *
2057 **************************/
2059 r02 = _mm256_mul_ps(rsq02,rinv02);
2060 r02 = _mm256_andnot_ps(dummy_mask,r02);
2062 /* EWALD ELECTROSTATICS */
2064 /* Analytical PME correction */
2065 zeta2 = _mm256_mul_ps(beta2,rsq02);
2066 rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
2067 pmecorrF = avx256_pmecorrF_f(zeta2);
2068 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2069 felec = _mm256_mul_ps(qq02,felec);
2073 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2075 /* Calculate temporary vectorial force */
2076 tx = _mm256_mul_ps(fscal,dx02);
2077 ty = _mm256_mul_ps(fscal,dy02);
2078 tz = _mm256_mul_ps(fscal,dz02);
2080 /* Update vectorial force */
2081 fix0 = _mm256_add_ps(fix0,tx);
2082 fiy0 = _mm256_add_ps(fiy0,ty);
2083 fiz0 = _mm256_add_ps(fiz0,tz);
2085 fjx2 = _mm256_add_ps(fjx2,tx);
2086 fjy2 = _mm256_add_ps(fjy2,ty);
2087 fjz2 = _mm256_add_ps(fjz2,tz);
2089 /**************************
2090 * CALCULATE INTERACTIONS *
2091 **************************/
2093 r10 = _mm256_mul_ps(rsq10,rinv10);
2094 r10 = _mm256_andnot_ps(dummy_mask,r10);
2096 /* EWALD ELECTROSTATICS */
2098 /* Analytical PME correction */
2099 zeta2 = _mm256_mul_ps(beta2,rsq10);
2100 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
2101 pmecorrF = avx256_pmecorrF_f(zeta2);
2102 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2103 felec = _mm256_mul_ps(qq10,felec);
2107 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2109 /* Calculate temporary vectorial force */
2110 tx = _mm256_mul_ps(fscal,dx10);
2111 ty = _mm256_mul_ps(fscal,dy10);
2112 tz = _mm256_mul_ps(fscal,dz10);
2114 /* Update vectorial force */
2115 fix1 = _mm256_add_ps(fix1,tx);
2116 fiy1 = _mm256_add_ps(fiy1,ty);
2117 fiz1 = _mm256_add_ps(fiz1,tz);
2119 fjx0 = _mm256_add_ps(fjx0,tx);
2120 fjy0 = _mm256_add_ps(fjy0,ty);
2121 fjz0 = _mm256_add_ps(fjz0,tz);
2123 /**************************
2124 * CALCULATE INTERACTIONS *
2125 **************************/
2127 r11 = _mm256_mul_ps(rsq11,rinv11);
2128 r11 = _mm256_andnot_ps(dummy_mask,r11);
2130 /* EWALD ELECTROSTATICS */
2132 /* Analytical PME correction */
2133 zeta2 = _mm256_mul_ps(beta2,rsq11);
2134 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
2135 pmecorrF = avx256_pmecorrF_f(zeta2);
2136 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2137 felec = _mm256_mul_ps(qq11,felec);
2141 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2143 /* Calculate temporary vectorial force */
2144 tx = _mm256_mul_ps(fscal,dx11);
2145 ty = _mm256_mul_ps(fscal,dy11);
2146 tz = _mm256_mul_ps(fscal,dz11);
2148 /* Update vectorial force */
2149 fix1 = _mm256_add_ps(fix1,tx);
2150 fiy1 = _mm256_add_ps(fiy1,ty);
2151 fiz1 = _mm256_add_ps(fiz1,tz);
2153 fjx1 = _mm256_add_ps(fjx1,tx);
2154 fjy1 = _mm256_add_ps(fjy1,ty);
2155 fjz1 = _mm256_add_ps(fjz1,tz);
2157 /**************************
2158 * CALCULATE INTERACTIONS *
2159 **************************/
2161 r12 = _mm256_mul_ps(rsq12,rinv12);
2162 r12 = _mm256_andnot_ps(dummy_mask,r12);
2164 /* EWALD ELECTROSTATICS */
2166 /* Analytical PME correction */
2167 zeta2 = _mm256_mul_ps(beta2,rsq12);
2168 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
2169 pmecorrF = avx256_pmecorrF_f(zeta2);
2170 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2171 felec = _mm256_mul_ps(qq12,felec);
2175 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2177 /* Calculate temporary vectorial force */
2178 tx = _mm256_mul_ps(fscal,dx12);
2179 ty = _mm256_mul_ps(fscal,dy12);
2180 tz = _mm256_mul_ps(fscal,dz12);
2182 /* Update vectorial force */
2183 fix1 = _mm256_add_ps(fix1,tx);
2184 fiy1 = _mm256_add_ps(fiy1,ty);
2185 fiz1 = _mm256_add_ps(fiz1,tz);
2187 fjx2 = _mm256_add_ps(fjx2,tx);
2188 fjy2 = _mm256_add_ps(fjy2,ty);
2189 fjz2 = _mm256_add_ps(fjz2,tz);
2191 /**************************
2192 * CALCULATE INTERACTIONS *
2193 **************************/
2195 r20 = _mm256_mul_ps(rsq20,rinv20);
2196 r20 = _mm256_andnot_ps(dummy_mask,r20);
2198 /* EWALD ELECTROSTATICS */
2200 /* Analytical PME correction */
2201 zeta2 = _mm256_mul_ps(beta2,rsq20);
2202 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
2203 pmecorrF = avx256_pmecorrF_f(zeta2);
2204 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2205 felec = _mm256_mul_ps(qq20,felec);
2209 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2211 /* Calculate temporary vectorial force */
2212 tx = _mm256_mul_ps(fscal,dx20);
2213 ty = _mm256_mul_ps(fscal,dy20);
2214 tz = _mm256_mul_ps(fscal,dz20);
2216 /* Update vectorial force */
2217 fix2 = _mm256_add_ps(fix2,tx);
2218 fiy2 = _mm256_add_ps(fiy2,ty);
2219 fiz2 = _mm256_add_ps(fiz2,tz);
2221 fjx0 = _mm256_add_ps(fjx0,tx);
2222 fjy0 = _mm256_add_ps(fjy0,ty);
2223 fjz0 = _mm256_add_ps(fjz0,tz);
2225 /**************************
2226 * CALCULATE INTERACTIONS *
2227 **************************/
2229 r21 = _mm256_mul_ps(rsq21,rinv21);
2230 r21 = _mm256_andnot_ps(dummy_mask,r21);
2232 /* EWALD ELECTROSTATICS */
2234 /* Analytical PME correction */
2235 zeta2 = _mm256_mul_ps(beta2,rsq21);
2236 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
2237 pmecorrF = avx256_pmecorrF_f(zeta2);
2238 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2239 felec = _mm256_mul_ps(qq21,felec);
2243 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2245 /* Calculate temporary vectorial force */
2246 tx = _mm256_mul_ps(fscal,dx21);
2247 ty = _mm256_mul_ps(fscal,dy21);
2248 tz = _mm256_mul_ps(fscal,dz21);
2250 /* Update vectorial force */
2251 fix2 = _mm256_add_ps(fix2,tx);
2252 fiy2 = _mm256_add_ps(fiy2,ty);
2253 fiz2 = _mm256_add_ps(fiz2,tz);
2255 fjx1 = _mm256_add_ps(fjx1,tx);
2256 fjy1 = _mm256_add_ps(fjy1,ty);
2257 fjz1 = _mm256_add_ps(fjz1,tz);
2259 /**************************
2260 * CALCULATE INTERACTIONS *
2261 **************************/
2263 r22 = _mm256_mul_ps(rsq22,rinv22);
2264 r22 = _mm256_andnot_ps(dummy_mask,r22);
2266 /* EWALD ELECTROSTATICS */
2268 /* Analytical PME correction */
2269 zeta2 = _mm256_mul_ps(beta2,rsq22);
2270 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
2271 pmecorrF = avx256_pmecorrF_f(zeta2);
2272 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2273 felec = _mm256_mul_ps(qq22,felec);
2277 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2279 /* Calculate temporary vectorial force */
2280 tx = _mm256_mul_ps(fscal,dx22);
2281 ty = _mm256_mul_ps(fscal,dy22);
2282 tz = _mm256_mul_ps(fscal,dz22);
2284 /* Update vectorial force */
2285 fix2 = _mm256_add_ps(fix2,tx);
2286 fiy2 = _mm256_add_ps(fiy2,ty);
2287 fiz2 = _mm256_add_ps(fiz2,tz);
2289 fjx2 = _mm256_add_ps(fjx2,tx);
2290 fjy2 = _mm256_add_ps(fjy2,ty);
2291 fjz2 = _mm256_add_ps(fjz2,tz);
2293 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2294 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2295 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2296 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2297 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2298 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2299 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2300 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2302 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2303 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2305 /* Inner loop uses 536 flops */
2308 /* End of innermost loop */
2310 gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
2311 f+i_coord_offset,fshift+i_shift_offset);
2313 /* Increment number of inner iterations */
2314 inneriter += j_index_end - j_index_start;
2316 /* Outer loop uses 18 flops */
2319 /* Increment number of outer iterations */
2322 /* Update outer/inner flops */
2324 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*536);