2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_single kernel generator.
44 #include "../nb_kernel.h"
45 #include "gromacs/gmxlib/nrnb.h"
47 #include "kernelutil_x86_avx_256_single.h"
50 * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwNone_GeomW4W4_VF_avx_256_single
51 * Electrostatics interaction: Ewald
52 * VdW interaction: None
53 * Geometry: Water4-Water4
54 * Calculate force/pot: PotentialAndForce
57 nb_kernel_ElecEw_VdwNone_GeomW4W4_VF_avx_256_single
58 (t_nblist * gmx_restrict nlist,
59 rvec * gmx_restrict xx,
60 rvec * gmx_restrict ff,
61 struct t_forcerec * gmx_restrict fr,
62 t_mdatoms * gmx_restrict mdatoms,
63 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
64 t_nrnb * gmx_restrict nrnb)
66 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
67 * just 0 for non-waters.
68 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
69 * jnr indices corresponding to data put in the four positions in the SIMD register.
71 int i_shift_offset,i_coord_offset,outeriter,inneriter;
72 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
73 int jnrA,jnrB,jnrC,jnrD;
74 int jnrE,jnrF,jnrG,jnrH;
75 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
76 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
77 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
78 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
79 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
81 real *shiftvec,*fshift,*x,*f;
82 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
84 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
85 real * vdwioffsetptr1;
86 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
87 real * vdwioffsetptr2;
88 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
89 real * vdwioffsetptr3;
90 __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
91 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
92 __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
93 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
94 __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
95 int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
96 __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
97 __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
98 __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
99 __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
100 __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
101 __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
102 __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
103 __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
104 __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
105 __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
106 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
109 __m128i ewitab_lo,ewitab_hi;
110 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
111 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
113 __m256 dummy_mask,cutoff_mask;
114 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
115 __m256 one = _mm256_set1_ps(1.0);
116 __m256 two = _mm256_set1_ps(2.0);
122 jindex = nlist->jindex;
124 shiftidx = nlist->shift;
126 shiftvec = fr->shift_vec[0];
127 fshift = fr->fshift[0];
128 facel = _mm256_set1_ps(fr->ic->epsfac);
129 charge = mdatoms->chargeA;
131 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
132 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
133 beta2 = _mm256_mul_ps(beta,beta);
134 beta3 = _mm256_mul_ps(beta,beta2);
136 ewtab = fr->ic->tabq_coul_FDV0;
137 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
138 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
140 /* Setup water-specific parameters */
141 inr = nlist->iinr[0];
142 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
143 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
144 iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
146 jq1 = _mm256_set1_ps(charge[inr+1]);
147 jq2 = _mm256_set1_ps(charge[inr+2]);
148 jq3 = _mm256_set1_ps(charge[inr+3]);
149 qq11 = _mm256_mul_ps(iq1,jq1);
150 qq12 = _mm256_mul_ps(iq1,jq2);
151 qq13 = _mm256_mul_ps(iq1,jq3);
152 qq21 = _mm256_mul_ps(iq2,jq1);
153 qq22 = _mm256_mul_ps(iq2,jq2);
154 qq23 = _mm256_mul_ps(iq2,jq3);
155 qq31 = _mm256_mul_ps(iq3,jq1);
156 qq32 = _mm256_mul_ps(iq3,jq2);
157 qq33 = _mm256_mul_ps(iq3,jq3);
159 /* Avoid stupid compiler warnings */
160 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
173 for(iidx=0;iidx<4*DIM;iidx++)
178 /* Start outer loop over neighborlists */
179 for(iidx=0; iidx<nri; iidx++)
181 /* Load shift vector for this list */
182 i_shift_offset = DIM*shiftidx[iidx];
184 /* Load limits for loop over neighbors */
185 j_index_start = jindex[iidx];
186 j_index_end = jindex[iidx+1];
188 /* Get outer coordinate index */
190 i_coord_offset = DIM*inr;
192 /* Load i particle coords and add shift vector */
193 gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
194 &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
196 fix1 = _mm256_setzero_ps();
197 fiy1 = _mm256_setzero_ps();
198 fiz1 = _mm256_setzero_ps();
199 fix2 = _mm256_setzero_ps();
200 fiy2 = _mm256_setzero_ps();
201 fiz2 = _mm256_setzero_ps();
202 fix3 = _mm256_setzero_ps();
203 fiy3 = _mm256_setzero_ps();
204 fiz3 = _mm256_setzero_ps();
206 /* Reset potential sums */
207 velecsum = _mm256_setzero_ps();
209 /* Start inner kernel loop */
210 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
213 /* Get j neighbor index, and coordinate index */
222 j_coord_offsetA = DIM*jnrA;
223 j_coord_offsetB = DIM*jnrB;
224 j_coord_offsetC = DIM*jnrC;
225 j_coord_offsetD = DIM*jnrD;
226 j_coord_offsetE = DIM*jnrE;
227 j_coord_offsetF = DIM*jnrF;
228 j_coord_offsetG = DIM*jnrG;
229 j_coord_offsetH = DIM*jnrH;
231 /* load j atom coordinates */
232 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
233 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
234 x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
235 x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
236 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
238 /* Calculate displacement vector */
239 dx11 = _mm256_sub_ps(ix1,jx1);
240 dy11 = _mm256_sub_ps(iy1,jy1);
241 dz11 = _mm256_sub_ps(iz1,jz1);
242 dx12 = _mm256_sub_ps(ix1,jx2);
243 dy12 = _mm256_sub_ps(iy1,jy2);
244 dz12 = _mm256_sub_ps(iz1,jz2);
245 dx13 = _mm256_sub_ps(ix1,jx3);
246 dy13 = _mm256_sub_ps(iy1,jy3);
247 dz13 = _mm256_sub_ps(iz1,jz3);
248 dx21 = _mm256_sub_ps(ix2,jx1);
249 dy21 = _mm256_sub_ps(iy2,jy1);
250 dz21 = _mm256_sub_ps(iz2,jz1);
251 dx22 = _mm256_sub_ps(ix2,jx2);
252 dy22 = _mm256_sub_ps(iy2,jy2);
253 dz22 = _mm256_sub_ps(iz2,jz2);
254 dx23 = _mm256_sub_ps(ix2,jx3);
255 dy23 = _mm256_sub_ps(iy2,jy3);
256 dz23 = _mm256_sub_ps(iz2,jz3);
257 dx31 = _mm256_sub_ps(ix3,jx1);
258 dy31 = _mm256_sub_ps(iy3,jy1);
259 dz31 = _mm256_sub_ps(iz3,jz1);
260 dx32 = _mm256_sub_ps(ix3,jx2);
261 dy32 = _mm256_sub_ps(iy3,jy2);
262 dz32 = _mm256_sub_ps(iz3,jz2);
263 dx33 = _mm256_sub_ps(ix3,jx3);
264 dy33 = _mm256_sub_ps(iy3,jy3);
265 dz33 = _mm256_sub_ps(iz3,jz3);
267 /* Calculate squared distance and things based on it */
268 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
269 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
270 rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
271 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
272 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
273 rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
274 rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
275 rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
276 rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
278 rinv11 = avx256_invsqrt_f(rsq11);
279 rinv12 = avx256_invsqrt_f(rsq12);
280 rinv13 = avx256_invsqrt_f(rsq13);
281 rinv21 = avx256_invsqrt_f(rsq21);
282 rinv22 = avx256_invsqrt_f(rsq22);
283 rinv23 = avx256_invsqrt_f(rsq23);
284 rinv31 = avx256_invsqrt_f(rsq31);
285 rinv32 = avx256_invsqrt_f(rsq32);
286 rinv33 = avx256_invsqrt_f(rsq33);
288 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
289 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
290 rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
291 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
292 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
293 rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
294 rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
295 rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
296 rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
298 fjx1 = _mm256_setzero_ps();
299 fjy1 = _mm256_setzero_ps();
300 fjz1 = _mm256_setzero_ps();
301 fjx2 = _mm256_setzero_ps();
302 fjy2 = _mm256_setzero_ps();
303 fjz2 = _mm256_setzero_ps();
304 fjx3 = _mm256_setzero_ps();
305 fjy3 = _mm256_setzero_ps();
306 fjz3 = _mm256_setzero_ps();
308 /**************************
309 * CALCULATE INTERACTIONS *
310 **************************/
312 r11 = _mm256_mul_ps(rsq11,rinv11);
314 /* EWALD ELECTROSTATICS */
316 /* Analytical PME correction */
317 zeta2 = _mm256_mul_ps(beta2,rsq11);
318 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
319 pmecorrF = avx256_pmecorrF_f(zeta2);
320 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
321 felec = _mm256_mul_ps(qq11,felec);
322 pmecorrV = avx256_pmecorrV_f(zeta2);
323 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
324 velec = _mm256_sub_ps(rinv11,pmecorrV);
325 velec = _mm256_mul_ps(qq11,velec);
327 /* Update potential sum for this i atom from the interaction with this j atom. */
328 velecsum = _mm256_add_ps(velecsum,velec);
332 /* Calculate temporary vectorial force */
333 tx = _mm256_mul_ps(fscal,dx11);
334 ty = _mm256_mul_ps(fscal,dy11);
335 tz = _mm256_mul_ps(fscal,dz11);
337 /* Update vectorial force */
338 fix1 = _mm256_add_ps(fix1,tx);
339 fiy1 = _mm256_add_ps(fiy1,ty);
340 fiz1 = _mm256_add_ps(fiz1,tz);
342 fjx1 = _mm256_add_ps(fjx1,tx);
343 fjy1 = _mm256_add_ps(fjy1,ty);
344 fjz1 = _mm256_add_ps(fjz1,tz);
346 /**************************
347 * CALCULATE INTERACTIONS *
348 **************************/
350 r12 = _mm256_mul_ps(rsq12,rinv12);
352 /* EWALD ELECTROSTATICS */
354 /* Analytical PME correction */
355 zeta2 = _mm256_mul_ps(beta2,rsq12);
356 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
357 pmecorrF = avx256_pmecorrF_f(zeta2);
358 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
359 felec = _mm256_mul_ps(qq12,felec);
360 pmecorrV = avx256_pmecorrV_f(zeta2);
361 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
362 velec = _mm256_sub_ps(rinv12,pmecorrV);
363 velec = _mm256_mul_ps(qq12,velec);
365 /* Update potential sum for this i atom from the interaction with this j atom. */
366 velecsum = _mm256_add_ps(velecsum,velec);
370 /* Calculate temporary vectorial force */
371 tx = _mm256_mul_ps(fscal,dx12);
372 ty = _mm256_mul_ps(fscal,dy12);
373 tz = _mm256_mul_ps(fscal,dz12);
375 /* Update vectorial force */
376 fix1 = _mm256_add_ps(fix1,tx);
377 fiy1 = _mm256_add_ps(fiy1,ty);
378 fiz1 = _mm256_add_ps(fiz1,tz);
380 fjx2 = _mm256_add_ps(fjx2,tx);
381 fjy2 = _mm256_add_ps(fjy2,ty);
382 fjz2 = _mm256_add_ps(fjz2,tz);
384 /**************************
385 * CALCULATE INTERACTIONS *
386 **************************/
388 r13 = _mm256_mul_ps(rsq13,rinv13);
390 /* EWALD ELECTROSTATICS */
392 /* Analytical PME correction */
393 zeta2 = _mm256_mul_ps(beta2,rsq13);
394 rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
395 pmecorrF = avx256_pmecorrF_f(zeta2);
396 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
397 felec = _mm256_mul_ps(qq13,felec);
398 pmecorrV = avx256_pmecorrV_f(zeta2);
399 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
400 velec = _mm256_sub_ps(rinv13,pmecorrV);
401 velec = _mm256_mul_ps(qq13,velec);
403 /* Update potential sum for this i atom from the interaction with this j atom. */
404 velecsum = _mm256_add_ps(velecsum,velec);
408 /* Calculate temporary vectorial force */
409 tx = _mm256_mul_ps(fscal,dx13);
410 ty = _mm256_mul_ps(fscal,dy13);
411 tz = _mm256_mul_ps(fscal,dz13);
413 /* Update vectorial force */
414 fix1 = _mm256_add_ps(fix1,tx);
415 fiy1 = _mm256_add_ps(fiy1,ty);
416 fiz1 = _mm256_add_ps(fiz1,tz);
418 fjx3 = _mm256_add_ps(fjx3,tx);
419 fjy3 = _mm256_add_ps(fjy3,ty);
420 fjz3 = _mm256_add_ps(fjz3,tz);
422 /**************************
423 * CALCULATE INTERACTIONS *
424 **************************/
426 r21 = _mm256_mul_ps(rsq21,rinv21);
428 /* EWALD ELECTROSTATICS */
430 /* Analytical PME correction */
431 zeta2 = _mm256_mul_ps(beta2,rsq21);
432 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
433 pmecorrF = avx256_pmecorrF_f(zeta2);
434 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
435 felec = _mm256_mul_ps(qq21,felec);
436 pmecorrV = avx256_pmecorrV_f(zeta2);
437 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
438 velec = _mm256_sub_ps(rinv21,pmecorrV);
439 velec = _mm256_mul_ps(qq21,velec);
441 /* Update potential sum for this i atom from the interaction with this j atom. */
442 velecsum = _mm256_add_ps(velecsum,velec);
446 /* Calculate temporary vectorial force */
447 tx = _mm256_mul_ps(fscal,dx21);
448 ty = _mm256_mul_ps(fscal,dy21);
449 tz = _mm256_mul_ps(fscal,dz21);
451 /* Update vectorial force */
452 fix2 = _mm256_add_ps(fix2,tx);
453 fiy2 = _mm256_add_ps(fiy2,ty);
454 fiz2 = _mm256_add_ps(fiz2,tz);
456 fjx1 = _mm256_add_ps(fjx1,tx);
457 fjy1 = _mm256_add_ps(fjy1,ty);
458 fjz1 = _mm256_add_ps(fjz1,tz);
460 /**************************
461 * CALCULATE INTERACTIONS *
462 **************************/
464 r22 = _mm256_mul_ps(rsq22,rinv22);
466 /* EWALD ELECTROSTATICS */
468 /* Analytical PME correction */
469 zeta2 = _mm256_mul_ps(beta2,rsq22);
470 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
471 pmecorrF = avx256_pmecorrF_f(zeta2);
472 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
473 felec = _mm256_mul_ps(qq22,felec);
474 pmecorrV = avx256_pmecorrV_f(zeta2);
475 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
476 velec = _mm256_sub_ps(rinv22,pmecorrV);
477 velec = _mm256_mul_ps(qq22,velec);
479 /* Update potential sum for this i atom from the interaction with this j atom. */
480 velecsum = _mm256_add_ps(velecsum,velec);
484 /* Calculate temporary vectorial force */
485 tx = _mm256_mul_ps(fscal,dx22);
486 ty = _mm256_mul_ps(fscal,dy22);
487 tz = _mm256_mul_ps(fscal,dz22);
489 /* Update vectorial force */
490 fix2 = _mm256_add_ps(fix2,tx);
491 fiy2 = _mm256_add_ps(fiy2,ty);
492 fiz2 = _mm256_add_ps(fiz2,tz);
494 fjx2 = _mm256_add_ps(fjx2,tx);
495 fjy2 = _mm256_add_ps(fjy2,ty);
496 fjz2 = _mm256_add_ps(fjz2,tz);
498 /**************************
499 * CALCULATE INTERACTIONS *
500 **************************/
502 r23 = _mm256_mul_ps(rsq23,rinv23);
504 /* EWALD ELECTROSTATICS */
506 /* Analytical PME correction */
507 zeta2 = _mm256_mul_ps(beta2,rsq23);
508 rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
509 pmecorrF = avx256_pmecorrF_f(zeta2);
510 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
511 felec = _mm256_mul_ps(qq23,felec);
512 pmecorrV = avx256_pmecorrV_f(zeta2);
513 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
514 velec = _mm256_sub_ps(rinv23,pmecorrV);
515 velec = _mm256_mul_ps(qq23,velec);
517 /* Update potential sum for this i atom from the interaction with this j atom. */
518 velecsum = _mm256_add_ps(velecsum,velec);
522 /* Calculate temporary vectorial force */
523 tx = _mm256_mul_ps(fscal,dx23);
524 ty = _mm256_mul_ps(fscal,dy23);
525 tz = _mm256_mul_ps(fscal,dz23);
527 /* Update vectorial force */
528 fix2 = _mm256_add_ps(fix2,tx);
529 fiy2 = _mm256_add_ps(fiy2,ty);
530 fiz2 = _mm256_add_ps(fiz2,tz);
532 fjx3 = _mm256_add_ps(fjx3,tx);
533 fjy3 = _mm256_add_ps(fjy3,ty);
534 fjz3 = _mm256_add_ps(fjz3,tz);
536 /**************************
537 * CALCULATE INTERACTIONS *
538 **************************/
540 r31 = _mm256_mul_ps(rsq31,rinv31);
542 /* EWALD ELECTROSTATICS */
544 /* Analytical PME correction */
545 zeta2 = _mm256_mul_ps(beta2,rsq31);
546 rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
547 pmecorrF = avx256_pmecorrF_f(zeta2);
548 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
549 felec = _mm256_mul_ps(qq31,felec);
550 pmecorrV = avx256_pmecorrV_f(zeta2);
551 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
552 velec = _mm256_sub_ps(rinv31,pmecorrV);
553 velec = _mm256_mul_ps(qq31,velec);
555 /* Update potential sum for this i atom from the interaction with this j atom. */
556 velecsum = _mm256_add_ps(velecsum,velec);
560 /* Calculate temporary vectorial force */
561 tx = _mm256_mul_ps(fscal,dx31);
562 ty = _mm256_mul_ps(fscal,dy31);
563 tz = _mm256_mul_ps(fscal,dz31);
565 /* Update vectorial force */
566 fix3 = _mm256_add_ps(fix3,tx);
567 fiy3 = _mm256_add_ps(fiy3,ty);
568 fiz3 = _mm256_add_ps(fiz3,tz);
570 fjx1 = _mm256_add_ps(fjx1,tx);
571 fjy1 = _mm256_add_ps(fjy1,ty);
572 fjz1 = _mm256_add_ps(fjz1,tz);
574 /**************************
575 * CALCULATE INTERACTIONS *
576 **************************/
578 r32 = _mm256_mul_ps(rsq32,rinv32);
580 /* EWALD ELECTROSTATICS */
582 /* Analytical PME correction */
583 zeta2 = _mm256_mul_ps(beta2,rsq32);
584 rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
585 pmecorrF = avx256_pmecorrF_f(zeta2);
586 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
587 felec = _mm256_mul_ps(qq32,felec);
588 pmecorrV = avx256_pmecorrV_f(zeta2);
589 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
590 velec = _mm256_sub_ps(rinv32,pmecorrV);
591 velec = _mm256_mul_ps(qq32,velec);
593 /* Update potential sum for this i atom from the interaction with this j atom. */
594 velecsum = _mm256_add_ps(velecsum,velec);
598 /* Calculate temporary vectorial force */
599 tx = _mm256_mul_ps(fscal,dx32);
600 ty = _mm256_mul_ps(fscal,dy32);
601 tz = _mm256_mul_ps(fscal,dz32);
603 /* Update vectorial force */
604 fix3 = _mm256_add_ps(fix3,tx);
605 fiy3 = _mm256_add_ps(fiy3,ty);
606 fiz3 = _mm256_add_ps(fiz3,tz);
608 fjx2 = _mm256_add_ps(fjx2,tx);
609 fjy2 = _mm256_add_ps(fjy2,ty);
610 fjz2 = _mm256_add_ps(fjz2,tz);
612 /**************************
613 * CALCULATE INTERACTIONS *
614 **************************/
616 r33 = _mm256_mul_ps(rsq33,rinv33);
618 /* EWALD ELECTROSTATICS */
620 /* Analytical PME correction */
621 zeta2 = _mm256_mul_ps(beta2,rsq33);
622 rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
623 pmecorrF = avx256_pmecorrF_f(zeta2);
624 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
625 felec = _mm256_mul_ps(qq33,felec);
626 pmecorrV = avx256_pmecorrV_f(zeta2);
627 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
628 velec = _mm256_sub_ps(rinv33,pmecorrV);
629 velec = _mm256_mul_ps(qq33,velec);
631 /* Update potential sum for this i atom from the interaction with this j atom. */
632 velecsum = _mm256_add_ps(velecsum,velec);
636 /* Calculate temporary vectorial force */
637 tx = _mm256_mul_ps(fscal,dx33);
638 ty = _mm256_mul_ps(fscal,dy33);
639 tz = _mm256_mul_ps(fscal,dz33);
641 /* Update vectorial force */
642 fix3 = _mm256_add_ps(fix3,tx);
643 fiy3 = _mm256_add_ps(fiy3,ty);
644 fiz3 = _mm256_add_ps(fiz3,tz);
646 fjx3 = _mm256_add_ps(fjx3,tx);
647 fjy3 = _mm256_add_ps(fjy3,ty);
648 fjz3 = _mm256_add_ps(fjz3,tz);
650 fjptrA = f+j_coord_offsetA;
651 fjptrB = f+j_coord_offsetB;
652 fjptrC = f+j_coord_offsetC;
653 fjptrD = f+j_coord_offsetD;
654 fjptrE = f+j_coord_offsetE;
655 fjptrF = f+j_coord_offsetF;
656 fjptrG = f+j_coord_offsetG;
657 fjptrH = f+j_coord_offsetH;
659 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
660 fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
661 fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
663 /* Inner loop uses 756 flops */
669 /* Get j neighbor index, and coordinate index */
670 jnrlistA = jjnr[jidx];
671 jnrlistB = jjnr[jidx+1];
672 jnrlistC = jjnr[jidx+2];
673 jnrlistD = jjnr[jidx+3];
674 jnrlistE = jjnr[jidx+4];
675 jnrlistF = jjnr[jidx+5];
676 jnrlistG = jjnr[jidx+6];
677 jnrlistH = jjnr[jidx+7];
678 /* Sign of each element will be negative for non-real atoms.
679 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
680 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
682 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
683 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
685 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
686 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
687 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
688 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
689 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
690 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
691 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
692 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
693 j_coord_offsetA = DIM*jnrA;
694 j_coord_offsetB = DIM*jnrB;
695 j_coord_offsetC = DIM*jnrC;
696 j_coord_offsetD = DIM*jnrD;
697 j_coord_offsetE = DIM*jnrE;
698 j_coord_offsetF = DIM*jnrF;
699 j_coord_offsetG = DIM*jnrG;
700 j_coord_offsetH = DIM*jnrH;
702 /* load j atom coordinates */
703 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
704 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
705 x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
706 x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
707 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
709 /* Calculate displacement vector */
710 dx11 = _mm256_sub_ps(ix1,jx1);
711 dy11 = _mm256_sub_ps(iy1,jy1);
712 dz11 = _mm256_sub_ps(iz1,jz1);
713 dx12 = _mm256_sub_ps(ix1,jx2);
714 dy12 = _mm256_sub_ps(iy1,jy2);
715 dz12 = _mm256_sub_ps(iz1,jz2);
716 dx13 = _mm256_sub_ps(ix1,jx3);
717 dy13 = _mm256_sub_ps(iy1,jy3);
718 dz13 = _mm256_sub_ps(iz1,jz3);
719 dx21 = _mm256_sub_ps(ix2,jx1);
720 dy21 = _mm256_sub_ps(iy2,jy1);
721 dz21 = _mm256_sub_ps(iz2,jz1);
722 dx22 = _mm256_sub_ps(ix2,jx2);
723 dy22 = _mm256_sub_ps(iy2,jy2);
724 dz22 = _mm256_sub_ps(iz2,jz2);
725 dx23 = _mm256_sub_ps(ix2,jx3);
726 dy23 = _mm256_sub_ps(iy2,jy3);
727 dz23 = _mm256_sub_ps(iz2,jz3);
728 dx31 = _mm256_sub_ps(ix3,jx1);
729 dy31 = _mm256_sub_ps(iy3,jy1);
730 dz31 = _mm256_sub_ps(iz3,jz1);
731 dx32 = _mm256_sub_ps(ix3,jx2);
732 dy32 = _mm256_sub_ps(iy3,jy2);
733 dz32 = _mm256_sub_ps(iz3,jz2);
734 dx33 = _mm256_sub_ps(ix3,jx3);
735 dy33 = _mm256_sub_ps(iy3,jy3);
736 dz33 = _mm256_sub_ps(iz3,jz3);
738 /* Calculate squared distance and things based on it */
739 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
740 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
741 rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
742 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
743 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
744 rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
745 rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
746 rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
747 rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
749 rinv11 = avx256_invsqrt_f(rsq11);
750 rinv12 = avx256_invsqrt_f(rsq12);
751 rinv13 = avx256_invsqrt_f(rsq13);
752 rinv21 = avx256_invsqrt_f(rsq21);
753 rinv22 = avx256_invsqrt_f(rsq22);
754 rinv23 = avx256_invsqrt_f(rsq23);
755 rinv31 = avx256_invsqrt_f(rsq31);
756 rinv32 = avx256_invsqrt_f(rsq32);
757 rinv33 = avx256_invsqrt_f(rsq33);
759 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
760 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
761 rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
762 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
763 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
764 rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
765 rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
766 rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
767 rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
769 fjx1 = _mm256_setzero_ps();
770 fjy1 = _mm256_setzero_ps();
771 fjz1 = _mm256_setzero_ps();
772 fjx2 = _mm256_setzero_ps();
773 fjy2 = _mm256_setzero_ps();
774 fjz2 = _mm256_setzero_ps();
775 fjx3 = _mm256_setzero_ps();
776 fjy3 = _mm256_setzero_ps();
777 fjz3 = _mm256_setzero_ps();
779 /**************************
780 * CALCULATE INTERACTIONS *
781 **************************/
783 r11 = _mm256_mul_ps(rsq11,rinv11);
784 r11 = _mm256_andnot_ps(dummy_mask,r11);
786 /* EWALD ELECTROSTATICS */
788 /* Analytical PME correction */
789 zeta2 = _mm256_mul_ps(beta2,rsq11);
790 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
791 pmecorrF = avx256_pmecorrF_f(zeta2);
792 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
793 felec = _mm256_mul_ps(qq11,felec);
794 pmecorrV = avx256_pmecorrV_f(zeta2);
795 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
796 velec = _mm256_sub_ps(rinv11,pmecorrV);
797 velec = _mm256_mul_ps(qq11,velec);
799 /* Update potential sum for this i atom from the interaction with this j atom. */
800 velec = _mm256_andnot_ps(dummy_mask,velec);
801 velecsum = _mm256_add_ps(velecsum,velec);
805 fscal = _mm256_andnot_ps(dummy_mask,fscal);
807 /* Calculate temporary vectorial force */
808 tx = _mm256_mul_ps(fscal,dx11);
809 ty = _mm256_mul_ps(fscal,dy11);
810 tz = _mm256_mul_ps(fscal,dz11);
812 /* Update vectorial force */
813 fix1 = _mm256_add_ps(fix1,tx);
814 fiy1 = _mm256_add_ps(fiy1,ty);
815 fiz1 = _mm256_add_ps(fiz1,tz);
817 fjx1 = _mm256_add_ps(fjx1,tx);
818 fjy1 = _mm256_add_ps(fjy1,ty);
819 fjz1 = _mm256_add_ps(fjz1,tz);
821 /**************************
822 * CALCULATE INTERACTIONS *
823 **************************/
825 r12 = _mm256_mul_ps(rsq12,rinv12);
826 r12 = _mm256_andnot_ps(dummy_mask,r12);
828 /* EWALD ELECTROSTATICS */
830 /* Analytical PME correction */
831 zeta2 = _mm256_mul_ps(beta2,rsq12);
832 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
833 pmecorrF = avx256_pmecorrF_f(zeta2);
834 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
835 felec = _mm256_mul_ps(qq12,felec);
836 pmecorrV = avx256_pmecorrV_f(zeta2);
837 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
838 velec = _mm256_sub_ps(rinv12,pmecorrV);
839 velec = _mm256_mul_ps(qq12,velec);
841 /* Update potential sum for this i atom from the interaction with this j atom. */
842 velec = _mm256_andnot_ps(dummy_mask,velec);
843 velecsum = _mm256_add_ps(velecsum,velec);
847 fscal = _mm256_andnot_ps(dummy_mask,fscal);
849 /* Calculate temporary vectorial force */
850 tx = _mm256_mul_ps(fscal,dx12);
851 ty = _mm256_mul_ps(fscal,dy12);
852 tz = _mm256_mul_ps(fscal,dz12);
854 /* Update vectorial force */
855 fix1 = _mm256_add_ps(fix1,tx);
856 fiy1 = _mm256_add_ps(fiy1,ty);
857 fiz1 = _mm256_add_ps(fiz1,tz);
859 fjx2 = _mm256_add_ps(fjx2,tx);
860 fjy2 = _mm256_add_ps(fjy2,ty);
861 fjz2 = _mm256_add_ps(fjz2,tz);
863 /**************************
864 * CALCULATE INTERACTIONS *
865 **************************/
867 r13 = _mm256_mul_ps(rsq13,rinv13);
868 r13 = _mm256_andnot_ps(dummy_mask,r13);
870 /* EWALD ELECTROSTATICS */
872 /* Analytical PME correction */
873 zeta2 = _mm256_mul_ps(beta2,rsq13);
874 rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
875 pmecorrF = avx256_pmecorrF_f(zeta2);
876 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
877 felec = _mm256_mul_ps(qq13,felec);
878 pmecorrV = avx256_pmecorrV_f(zeta2);
879 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
880 velec = _mm256_sub_ps(rinv13,pmecorrV);
881 velec = _mm256_mul_ps(qq13,velec);
883 /* Update potential sum for this i atom from the interaction with this j atom. */
884 velec = _mm256_andnot_ps(dummy_mask,velec);
885 velecsum = _mm256_add_ps(velecsum,velec);
889 fscal = _mm256_andnot_ps(dummy_mask,fscal);
891 /* Calculate temporary vectorial force */
892 tx = _mm256_mul_ps(fscal,dx13);
893 ty = _mm256_mul_ps(fscal,dy13);
894 tz = _mm256_mul_ps(fscal,dz13);
896 /* Update vectorial force */
897 fix1 = _mm256_add_ps(fix1,tx);
898 fiy1 = _mm256_add_ps(fiy1,ty);
899 fiz1 = _mm256_add_ps(fiz1,tz);
901 fjx3 = _mm256_add_ps(fjx3,tx);
902 fjy3 = _mm256_add_ps(fjy3,ty);
903 fjz3 = _mm256_add_ps(fjz3,tz);
905 /**************************
906 * CALCULATE INTERACTIONS *
907 **************************/
909 r21 = _mm256_mul_ps(rsq21,rinv21);
910 r21 = _mm256_andnot_ps(dummy_mask,r21);
912 /* EWALD ELECTROSTATICS */
914 /* Analytical PME correction */
915 zeta2 = _mm256_mul_ps(beta2,rsq21);
916 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
917 pmecorrF = avx256_pmecorrF_f(zeta2);
918 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
919 felec = _mm256_mul_ps(qq21,felec);
920 pmecorrV = avx256_pmecorrV_f(zeta2);
921 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
922 velec = _mm256_sub_ps(rinv21,pmecorrV);
923 velec = _mm256_mul_ps(qq21,velec);
925 /* Update potential sum for this i atom from the interaction with this j atom. */
926 velec = _mm256_andnot_ps(dummy_mask,velec);
927 velecsum = _mm256_add_ps(velecsum,velec);
931 fscal = _mm256_andnot_ps(dummy_mask,fscal);
933 /* Calculate temporary vectorial force */
934 tx = _mm256_mul_ps(fscal,dx21);
935 ty = _mm256_mul_ps(fscal,dy21);
936 tz = _mm256_mul_ps(fscal,dz21);
938 /* Update vectorial force */
939 fix2 = _mm256_add_ps(fix2,tx);
940 fiy2 = _mm256_add_ps(fiy2,ty);
941 fiz2 = _mm256_add_ps(fiz2,tz);
943 fjx1 = _mm256_add_ps(fjx1,tx);
944 fjy1 = _mm256_add_ps(fjy1,ty);
945 fjz1 = _mm256_add_ps(fjz1,tz);
947 /**************************
948 * CALCULATE INTERACTIONS *
949 **************************/
951 r22 = _mm256_mul_ps(rsq22,rinv22);
952 r22 = _mm256_andnot_ps(dummy_mask,r22);
954 /* EWALD ELECTROSTATICS */
956 /* Analytical PME correction */
957 zeta2 = _mm256_mul_ps(beta2,rsq22);
958 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
959 pmecorrF = avx256_pmecorrF_f(zeta2);
960 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
961 felec = _mm256_mul_ps(qq22,felec);
962 pmecorrV = avx256_pmecorrV_f(zeta2);
963 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
964 velec = _mm256_sub_ps(rinv22,pmecorrV);
965 velec = _mm256_mul_ps(qq22,velec);
967 /* Update potential sum for this i atom from the interaction with this j atom. */
968 velec = _mm256_andnot_ps(dummy_mask,velec);
969 velecsum = _mm256_add_ps(velecsum,velec);
973 fscal = _mm256_andnot_ps(dummy_mask,fscal);
975 /* Calculate temporary vectorial force */
976 tx = _mm256_mul_ps(fscal,dx22);
977 ty = _mm256_mul_ps(fscal,dy22);
978 tz = _mm256_mul_ps(fscal,dz22);
980 /* Update vectorial force */
981 fix2 = _mm256_add_ps(fix2,tx);
982 fiy2 = _mm256_add_ps(fiy2,ty);
983 fiz2 = _mm256_add_ps(fiz2,tz);
985 fjx2 = _mm256_add_ps(fjx2,tx);
986 fjy2 = _mm256_add_ps(fjy2,ty);
987 fjz2 = _mm256_add_ps(fjz2,tz);
989 /**************************
990 * CALCULATE INTERACTIONS *
991 **************************/
993 r23 = _mm256_mul_ps(rsq23,rinv23);
994 r23 = _mm256_andnot_ps(dummy_mask,r23);
996 /* EWALD ELECTROSTATICS */
998 /* Analytical PME correction */
999 zeta2 = _mm256_mul_ps(beta2,rsq23);
1000 rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
1001 pmecorrF = avx256_pmecorrF_f(zeta2);
1002 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1003 felec = _mm256_mul_ps(qq23,felec);
1004 pmecorrV = avx256_pmecorrV_f(zeta2);
1005 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1006 velec = _mm256_sub_ps(rinv23,pmecorrV);
1007 velec = _mm256_mul_ps(qq23,velec);
1009 /* Update potential sum for this i atom from the interaction with this j atom. */
1010 velec = _mm256_andnot_ps(dummy_mask,velec);
1011 velecsum = _mm256_add_ps(velecsum,velec);
1015 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1017 /* Calculate temporary vectorial force */
1018 tx = _mm256_mul_ps(fscal,dx23);
1019 ty = _mm256_mul_ps(fscal,dy23);
1020 tz = _mm256_mul_ps(fscal,dz23);
1022 /* Update vectorial force */
1023 fix2 = _mm256_add_ps(fix2,tx);
1024 fiy2 = _mm256_add_ps(fiy2,ty);
1025 fiz2 = _mm256_add_ps(fiz2,tz);
1027 fjx3 = _mm256_add_ps(fjx3,tx);
1028 fjy3 = _mm256_add_ps(fjy3,ty);
1029 fjz3 = _mm256_add_ps(fjz3,tz);
1031 /**************************
1032 * CALCULATE INTERACTIONS *
1033 **************************/
1035 r31 = _mm256_mul_ps(rsq31,rinv31);
1036 r31 = _mm256_andnot_ps(dummy_mask,r31);
1038 /* EWALD ELECTROSTATICS */
1040 /* Analytical PME correction */
1041 zeta2 = _mm256_mul_ps(beta2,rsq31);
1042 rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
1043 pmecorrF = avx256_pmecorrF_f(zeta2);
1044 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1045 felec = _mm256_mul_ps(qq31,felec);
1046 pmecorrV = avx256_pmecorrV_f(zeta2);
1047 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1048 velec = _mm256_sub_ps(rinv31,pmecorrV);
1049 velec = _mm256_mul_ps(qq31,velec);
1051 /* Update potential sum for this i atom from the interaction with this j atom. */
1052 velec = _mm256_andnot_ps(dummy_mask,velec);
1053 velecsum = _mm256_add_ps(velecsum,velec);
1057 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1059 /* Calculate temporary vectorial force */
1060 tx = _mm256_mul_ps(fscal,dx31);
1061 ty = _mm256_mul_ps(fscal,dy31);
1062 tz = _mm256_mul_ps(fscal,dz31);
1064 /* Update vectorial force */
1065 fix3 = _mm256_add_ps(fix3,tx);
1066 fiy3 = _mm256_add_ps(fiy3,ty);
1067 fiz3 = _mm256_add_ps(fiz3,tz);
1069 fjx1 = _mm256_add_ps(fjx1,tx);
1070 fjy1 = _mm256_add_ps(fjy1,ty);
1071 fjz1 = _mm256_add_ps(fjz1,tz);
1073 /**************************
1074 * CALCULATE INTERACTIONS *
1075 **************************/
1077 r32 = _mm256_mul_ps(rsq32,rinv32);
1078 r32 = _mm256_andnot_ps(dummy_mask,r32);
1080 /* EWALD ELECTROSTATICS */
1082 /* Analytical PME correction */
1083 zeta2 = _mm256_mul_ps(beta2,rsq32);
1084 rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
1085 pmecorrF = avx256_pmecorrF_f(zeta2);
1086 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1087 felec = _mm256_mul_ps(qq32,felec);
1088 pmecorrV = avx256_pmecorrV_f(zeta2);
1089 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1090 velec = _mm256_sub_ps(rinv32,pmecorrV);
1091 velec = _mm256_mul_ps(qq32,velec);
1093 /* Update potential sum for this i atom from the interaction with this j atom. */
1094 velec = _mm256_andnot_ps(dummy_mask,velec);
1095 velecsum = _mm256_add_ps(velecsum,velec);
1099 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1101 /* Calculate temporary vectorial force */
1102 tx = _mm256_mul_ps(fscal,dx32);
1103 ty = _mm256_mul_ps(fscal,dy32);
1104 tz = _mm256_mul_ps(fscal,dz32);
1106 /* Update vectorial force */
1107 fix3 = _mm256_add_ps(fix3,tx);
1108 fiy3 = _mm256_add_ps(fiy3,ty);
1109 fiz3 = _mm256_add_ps(fiz3,tz);
1111 fjx2 = _mm256_add_ps(fjx2,tx);
1112 fjy2 = _mm256_add_ps(fjy2,ty);
1113 fjz2 = _mm256_add_ps(fjz2,tz);
1115 /**************************
1116 * CALCULATE INTERACTIONS *
1117 **************************/
1119 r33 = _mm256_mul_ps(rsq33,rinv33);
1120 r33 = _mm256_andnot_ps(dummy_mask,r33);
1122 /* EWALD ELECTROSTATICS */
1124 /* Analytical PME correction */
1125 zeta2 = _mm256_mul_ps(beta2,rsq33);
1126 rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
1127 pmecorrF = avx256_pmecorrF_f(zeta2);
1128 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1129 felec = _mm256_mul_ps(qq33,felec);
1130 pmecorrV = avx256_pmecorrV_f(zeta2);
1131 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1132 velec = _mm256_sub_ps(rinv33,pmecorrV);
1133 velec = _mm256_mul_ps(qq33,velec);
1135 /* Update potential sum for this i atom from the interaction with this j atom. */
1136 velec = _mm256_andnot_ps(dummy_mask,velec);
1137 velecsum = _mm256_add_ps(velecsum,velec);
1141 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1143 /* Calculate temporary vectorial force */
1144 tx = _mm256_mul_ps(fscal,dx33);
1145 ty = _mm256_mul_ps(fscal,dy33);
1146 tz = _mm256_mul_ps(fscal,dz33);
1148 /* Update vectorial force */
1149 fix3 = _mm256_add_ps(fix3,tx);
1150 fiy3 = _mm256_add_ps(fiy3,ty);
1151 fiz3 = _mm256_add_ps(fiz3,tz);
1153 fjx3 = _mm256_add_ps(fjx3,tx);
1154 fjy3 = _mm256_add_ps(fjy3,ty);
1155 fjz3 = _mm256_add_ps(fjz3,tz);
1157 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1158 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1159 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1160 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1161 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1162 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1163 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1164 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1166 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
1167 fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
1168 fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1170 /* Inner loop uses 765 flops */
1173 /* End of innermost loop */
1175 gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1176 f+i_coord_offset+DIM,fshift+i_shift_offset);
1179 /* Update potential energies */
1180 gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1182 /* Increment number of inner iterations */
1183 inneriter += j_index_end - j_index_start;
1185 /* Outer loop uses 19 flops */
1188 /* Increment number of outer iterations */
1191 /* Update outer/inner flops */
1193 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*765);
1196 * Gromacs nonbonded kernel: nb_kernel_ElecEw_VdwNone_GeomW4W4_F_avx_256_single
1197 * Electrostatics interaction: Ewald
1198 * VdW interaction: None
1199 * Geometry: Water4-Water4
1200 * Calculate force/pot: Force
1203 nb_kernel_ElecEw_VdwNone_GeomW4W4_F_avx_256_single
1204 (t_nblist * gmx_restrict nlist,
1205 rvec * gmx_restrict xx,
1206 rvec * gmx_restrict ff,
1207 struct t_forcerec * gmx_restrict fr,
1208 t_mdatoms * gmx_restrict mdatoms,
1209 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1210 t_nrnb * gmx_restrict nrnb)
1212 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
1213 * just 0 for non-waters.
1214 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1215 * jnr indices corresponding to data put in the four positions in the SIMD register.
1217 int i_shift_offset,i_coord_offset,outeriter,inneriter;
1218 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1219 int jnrA,jnrB,jnrC,jnrD;
1220 int jnrE,jnrF,jnrG,jnrH;
1221 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1222 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1223 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1224 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1225 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
1226 real rcutoff_scalar;
1227 real *shiftvec,*fshift,*x,*f;
1228 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1229 real scratch[4*DIM];
1230 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1231 real * vdwioffsetptr1;
1232 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1233 real * vdwioffsetptr2;
1234 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1235 real * vdwioffsetptr3;
1236 __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1237 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1238 __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1239 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1240 __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1241 int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
1242 __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
1243 __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1244 __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1245 __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
1246 __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1247 __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1248 __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
1249 __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
1250 __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
1251 __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
1252 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
1255 __m128i ewitab_lo,ewitab_hi;
1256 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1257 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1259 __m256 dummy_mask,cutoff_mask;
1260 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1261 __m256 one = _mm256_set1_ps(1.0);
1262 __m256 two = _mm256_set1_ps(2.0);
1268 jindex = nlist->jindex;
1270 shiftidx = nlist->shift;
1272 shiftvec = fr->shift_vec[0];
1273 fshift = fr->fshift[0];
1274 facel = _mm256_set1_ps(fr->ic->epsfac);
1275 charge = mdatoms->chargeA;
1277 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
1278 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1279 beta2 = _mm256_mul_ps(beta,beta);
1280 beta3 = _mm256_mul_ps(beta,beta2);
1282 ewtab = fr->ic->tabq_coul_F;
1283 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
1284 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1286 /* Setup water-specific parameters */
1287 inr = nlist->iinr[0];
1288 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1289 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1290 iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
1292 jq1 = _mm256_set1_ps(charge[inr+1]);
1293 jq2 = _mm256_set1_ps(charge[inr+2]);
1294 jq3 = _mm256_set1_ps(charge[inr+3]);
1295 qq11 = _mm256_mul_ps(iq1,jq1);
1296 qq12 = _mm256_mul_ps(iq1,jq2);
1297 qq13 = _mm256_mul_ps(iq1,jq3);
1298 qq21 = _mm256_mul_ps(iq2,jq1);
1299 qq22 = _mm256_mul_ps(iq2,jq2);
1300 qq23 = _mm256_mul_ps(iq2,jq3);
1301 qq31 = _mm256_mul_ps(iq3,jq1);
1302 qq32 = _mm256_mul_ps(iq3,jq2);
1303 qq33 = _mm256_mul_ps(iq3,jq3);
1305 /* Avoid stupid compiler warnings */
1306 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1307 j_coord_offsetA = 0;
1308 j_coord_offsetB = 0;
1309 j_coord_offsetC = 0;
1310 j_coord_offsetD = 0;
1311 j_coord_offsetE = 0;
1312 j_coord_offsetF = 0;
1313 j_coord_offsetG = 0;
1314 j_coord_offsetH = 0;
1319 for(iidx=0;iidx<4*DIM;iidx++)
1321 scratch[iidx] = 0.0;
1324 /* Start outer loop over neighborlists */
1325 for(iidx=0; iidx<nri; iidx++)
1327 /* Load shift vector for this list */
1328 i_shift_offset = DIM*shiftidx[iidx];
1330 /* Load limits for loop over neighbors */
1331 j_index_start = jindex[iidx];
1332 j_index_end = jindex[iidx+1];
1334 /* Get outer coordinate index */
1336 i_coord_offset = DIM*inr;
1338 /* Load i particle coords and add shift vector */
1339 gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
1340 &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1342 fix1 = _mm256_setzero_ps();
1343 fiy1 = _mm256_setzero_ps();
1344 fiz1 = _mm256_setzero_ps();
1345 fix2 = _mm256_setzero_ps();
1346 fiy2 = _mm256_setzero_ps();
1347 fiz2 = _mm256_setzero_ps();
1348 fix3 = _mm256_setzero_ps();
1349 fiy3 = _mm256_setzero_ps();
1350 fiz3 = _mm256_setzero_ps();
1352 /* Start inner kernel loop */
1353 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1356 /* Get j neighbor index, and coordinate index */
1358 jnrB = jjnr[jidx+1];
1359 jnrC = jjnr[jidx+2];
1360 jnrD = jjnr[jidx+3];
1361 jnrE = jjnr[jidx+4];
1362 jnrF = jjnr[jidx+5];
1363 jnrG = jjnr[jidx+6];
1364 jnrH = jjnr[jidx+7];
1365 j_coord_offsetA = DIM*jnrA;
1366 j_coord_offsetB = DIM*jnrB;
1367 j_coord_offsetC = DIM*jnrC;
1368 j_coord_offsetD = DIM*jnrD;
1369 j_coord_offsetE = DIM*jnrE;
1370 j_coord_offsetF = DIM*jnrF;
1371 j_coord_offsetG = DIM*jnrG;
1372 j_coord_offsetH = DIM*jnrH;
1374 /* load j atom coordinates */
1375 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
1376 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
1377 x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
1378 x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
1379 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
1381 /* Calculate displacement vector */
1382 dx11 = _mm256_sub_ps(ix1,jx1);
1383 dy11 = _mm256_sub_ps(iy1,jy1);
1384 dz11 = _mm256_sub_ps(iz1,jz1);
1385 dx12 = _mm256_sub_ps(ix1,jx2);
1386 dy12 = _mm256_sub_ps(iy1,jy2);
1387 dz12 = _mm256_sub_ps(iz1,jz2);
1388 dx13 = _mm256_sub_ps(ix1,jx3);
1389 dy13 = _mm256_sub_ps(iy1,jy3);
1390 dz13 = _mm256_sub_ps(iz1,jz3);
1391 dx21 = _mm256_sub_ps(ix2,jx1);
1392 dy21 = _mm256_sub_ps(iy2,jy1);
1393 dz21 = _mm256_sub_ps(iz2,jz1);
1394 dx22 = _mm256_sub_ps(ix2,jx2);
1395 dy22 = _mm256_sub_ps(iy2,jy2);
1396 dz22 = _mm256_sub_ps(iz2,jz2);
1397 dx23 = _mm256_sub_ps(ix2,jx3);
1398 dy23 = _mm256_sub_ps(iy2,jy3);
1399 dz23 = _mm256_sub_ps(iz2,jz3);
1400 dx31 = _mm256_sub_ps(ix3,jx1);
1401 dy31 = _mm256_sub_ps(iy3,jy1);
1402 dz31 = _mm256_sub_ps(iz3,jz1);
1403 dx32 = _mm256_sub_ps(ix3,jx2);
1404 dy32 = _mm256_sub_ps(iy3,jy2);
1405 dz32 = _mm256_sub_ps(iz3,jz2);
1406 dx33 = _mm256_sub_ps(ix3,jx3);
1407 dy33 = _mm256_sub_ps(iy3,jy3);
1408 dz33 = _mm256_sub_ps(iz3,jz3);
1410 /* Calculate squared distance and things based on it */
1411 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1412 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1413 rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
1414 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1415 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1416 rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
1417 rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
1418 rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
1419 rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
1421 rinv11 = avx256_invsqrt_f(rsq11);
1422 rinv12 = avx256_invsqrt_f(rsq12);
1423 rinv13 = avx256_invsqrt_f(rsq13);
1424 rinv21 = avx256_invsqrt_f(rsq21);
1425 rinv22 = avx256_invsqrt_f(rsq22);
1426 rinv23 = avx256_invsqrt_f(rsq23);
1427 rinv31 = avx256_invsqrt_f(rsq31);
1428 rinv32 = avx256_invsqrt_f(rsq32);
1429 rinv33 = avx256_invsqrt_f(rsq33);
1431 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
1432 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
1433 rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
1434 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
1435 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
1436 rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
1437 rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
1438 rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
1439 rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
1441 fjx1 = _mm256_setzero_ps();
1442 fjy1 = _mm256_setzero_ps();
1443 fjz1 = _mm256_setzero_ps();
1444 fjx2 = _mm256_setzero_ps();
1445 fjy2 = _mm256_setzero_ps();
1446 fjz2 = _mm256_setzero_ps();
1447 fjx3 = _mm256_setzero_ps();
1448 fjy3 = _mm256_setzero_ps();
1449 fjz3 = _mm256_setzero_ps();
1451 /**************************
1452 * CALCULATE INTERACTIONS *
1453 **************************/
1455 r11 = _mm256_mul_ps(rsq11,rinv11);
1457 /* EWALD ELECTROSTATICS */
1459 /* Analytical PME correction */
1460 zeta2 = _mm256_mul_ps(beta2,rsq11);
1461 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
1462 pmecorrF = avx256_pmecorrF_f(zeta2);
1463 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1464 felec = _mm256_mul_ps(qq11,felec);
1468 /* Calculate temporary vectorial force */
1469 tx = _mm256_mul_ps(fscal,dx11);
1470 ty = _mm256_mul_ps(fscal,dy11);
1471 tz = _mm256_mul_ps(fscal,dz11);
1473 /* Update vectorial force */
1474 fix1 = _mm256_add_ps(fix1,tx);
1475 fiy1 = _mm256_add_ps(fiy1,ty);
1476 fiz1 = _mm256_add_ps(fiz1,tz);
1478 fjx1 = _mm256_add_ps(fjx1,tx);
1479 fjy1 = _mm256_add_ps(fjy1,ty);
1480 fjz1 = _mm256_add_ps(fjz1,tz);
1482 /**************************
1483 * CALCULATE INTERACTIONS *
1484 **************************/
1486 r12 = _mm256_mul_ps(rsq12,rinv12);
1488 /* EWALD ELECTROSTATICS */
1490 /* Analytical PME correction */
1491 zeta2 = _mm256_mul_ps(beta2,rsq12);
1492 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
1493 pmecorrF = avx256_pmecorrF_f(zeta2);
1494 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1495 felec = _mm256_mul_ps(qq12,felec);
1499 /* Calculate temporary vectorial force */
1500 tx = _mm256_mul_ps(fscal,dx12);
1501 ty = _mm256_mul_ps(fscal,dy12);
1502 tz = _mm256_mul_ps(fscal,dz12);
1504 /* Update vectorial force */
1505 fix1 = _mm256_add_ps(fix1,tx);
1506 fiy1 = _mm256_add_ps(fiy1,ty);
1507 fiz1 = _mm256_add_ps(fiz1,tz);
1509 fjx2 = _mm256_add_ps(fjx2,tx);
1510 fjy2 = _mm256_add_ps(fjy2,ty);
1511 fjz2 = _mm256_add_ps(fjz2,tz);
1513 /**************************
1514 * CALCULATE INTERACTIONS *
1515 **************************/
1517 r13 = _mm256_mul_ps(rsq13,rinv13);
1519 /* EWALD ELECTROSTATICS */
1521 /* Analytical PME correction */
1522 zeta2 = _mm256_mul_ps(beta2,rsq13);
1523 rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
1524 pmecorrF = avx256_pmecorrF_f(zeta2);
1525 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1526 felec = _mm256_mul_ps(qq13,felec);
1530 /* Calculate temporary vectorial force */
1531 tx = _mm256_mul_ps(fscal,dx13);
1532 ty = _mm256_mul_ps(fscal,dy13);
1533 tz = _mm256_mul_ps(fscal,dz13);
1535 /* Update vectorial force */
1536 fix1 = _mm256_add_ps(fix1,tx);
1537 fiy1 = _mm256_add_ps(fiy1,ty);
1538 fiz1 = _mm256_add_ps(fiz1,tz);
1540 fjx3 = _mm256_add_ps(fjx3,tx);
1541 fjy3 = _mm256_add_ps(fjy3,ty);
1542 fjz3 = _mm256_add_ps(fjz3,tz);
1544 /**************************
1545 * CALCULATE INTERACTIONS *
1546 **************************/
1548 r21 = _mm256_mul_ps(rsq21,rinv21);
1550 /* EWALD ELECTROSTATICS */
1552 /* Analytical PME correction */
1553 zeta2 = _mm256_mul_ps(beta2,rsq21);
1554 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
1555 pmecorrF = avx256_pmecorrF_f(zeta2);
1556 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1557 felec = _mm256_mul_ps(qq21,felec);
1561 /* Calculate temporary vectorial force */
1562 tx = _mm256_mul_ps(fscal,dx21);
1563 ty = _mm256_mul_ps(fscal,dy21);
1564 tz = _mm256_mul_ps(fscal,dz21);
1566 /* Update vectorial force */
1567 fix2 = _mm256_add_ps(fix2,tx);
1568 fiy2 = _mm256_add_ps(fiy2,ty);
1569 fiz2 = _mm256_add_ps(fiz2,tz);
1571 fjx1 = _mm256_add_ps(fjx1,tx);
1572 fjy1 = _mm256_add_ps(fjy1,ty);
1573 fjz1 = _mm256_add_ps(fjz1,tz);
1575 /**************************
1576 * CALCULATE INTERACTIONS *
1577 **************************/
1579 r22 = _mm256_mul_ps(rsq22,rinv22);
1581 /* EWALD ELECTROSTATICS */
1583 /* Analytical PME correction */
1584 zeta2 = _mm256_mul_ps(beta2,rsq22);
1585 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
1586 pmecorrF = avx256_pmecorrF_f(zeta2);
1587 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1588 felec = _mm256_mul_ps(qq22,felec);
1592 /* Calculate temporary vectorial force */
1593 tx = _mm256_mul_ps(fscal,dx22);
1594 ty = _mm256_mul_ps(fscal,dy22);
1595 tz = _mm256_mul_ps(fscal,dz22);
1597 /* Update vectorial force */
1598 fix2 = _mm256_add_ps(fix2,tx);
1599 fiy2 = _mm256_add_ps(fiy2,ty);
1600 fiz2 = _mm256_add_ps(fiz2,tz);
1602 fjx2 = _mm256_add_ps(fjx2,tx);
1603 fjy2 = _mm256_add_ps(fjy2,ty);
1604 fjz2 = _mm256_add_ps(fjz2,tz);
1606 /**************************
1607 * CALCULATE INTERACTIONS *
1608 **************************/
1610 r23 = _mm256_mul_ps(rsq23,rinv23);
1612 /* EWALD ELECTROSTATICS */
1614 /* Analytical PME correction */
1615 zeta2 = _mm256_mul_ps(beta2,rsq23);
1616 rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
1617 pmecorrF = avx256_pmecorrF_f(zeta2);
1618 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1619 felec = _mm256_mul_ps(qq23,felec);
1623 /* Calculate temporary vectorial force */
1624 tx = _mm256_mul_ps(fscal,dx23);
1625 ty = _mm256_mul_ps(fscal,dy23);
1626 tz = _mm256_mul_ps(fscal,dz23);
1628 /* Update vectorial force */
1629 fix2 = _mm256_add_ps(fix2,tx);
1630 fiy2 = _mm256_add_ps(fiy2,ty);
1631 fiz2 = _mm256_add_ps(fiz2,tz);
1633 fjx3 = _mm256_add_ps(fjx3,tx);
1634 fjy3 = _mm256_add_ps(fjy3,ty);
1635 fjz3 = _mm256_add_ps(fjz3,tz);
1637 /**************************
1638 * CALCULATE INTERACTIONS *
1639 **************************/
1641 r31 = _mm256_mul_ps(rsq31,rinv31);
1643 /* EWALD ELECTROSTATICS */
1645 /* Analytical PME correction */
1646 zeta2 = _mm256_mul_ps(beta2,rsq31);
1647 rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
1648 pmecorrF = avx256_pmecorrF_f(zeta2);
1649 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1650 felec = _mm256_mul_ps(qq31,felec);
1654 /* Calculate temporary vectorial force */
1655 tx = _mm256_mul_ps(fscal,dx31);
1656 ty = _mm256_mul_ps(fscal,dy31);
1657 tz = _mm256_mul_ps(fscal,dz31);
1659 /* Update vectorial force */
1660 fix3 = _mm256_add_ps(fix3,tx);
1661 fiy3 = _mm256_add_ps(fiy3,ty);
1662 fiz3 = _mm256_add_ps(fiz3,tz);
1664 fjx1 = _mm256_add_ps(fjx1,tx);
1665 fjy1 = _mm256_add_ps(fjy1,ty);
1666 fjz1 = _mm256_add_ps(fjz1,tz);
1668 /**************************
1669 * CALCULATE INTERACTIONS *
1670 **************************/
1672 r32 = _mm256_mul_ps(rsq32,rinv32);
1674 /* EWALD ELECTROSTATICS */
1676 /* Analytical PME correction */
1677 zeta2 = _mm256_mul_ps(beta2,rsq32);
1678 rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
1679 pmecorrF = avx256_pmecorrF_f(zeta2);
1680 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1681 felec = _mm256_mul_ps(qq32,felec);
1685 /* Calculate temporary vectorial force */
1686 tx = _mm256_mul_ps(fscal,dx32);
1687 ty = _mm256_mul_ps(fscal,dy32);
1688 tz = _mm256_mul_ps(fscal,dz32);
1690 /* Update vectorial force */
1691 fix3 = _mm256_add_ps(fix3,tx);
1692 fiy3 = _mm256_add_ps(fiy3,ty);
1693 fiz3 = _mm256_add_ps(fiz3,tz);
1695 fjx2 = _mm256_add_ps(fjx2,tx);
1696 fjy2 = _mm256_add_ps(fjy2,ty);
1697 fjz2 = _mm256_add_ps(fjz2,tz);
1699 /**************************
1700 * CALCULATE INTERACTIONS *
1701 **************************/
1703 r33 = _mm256_mul_ps(rsq33,rinv33);
1705 /* EWALD ELECTROSTATICS */
1707 /* Analytical PME correction */
1708 zeta2 = _mm256_mul_ps(beta2,rsq33);
1709 rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
1710 pmecorrF = avx256_pmecorrF_f(zeta2);
1711 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1712 felec = _mm256_mul_ps(qq33,felec);
1716 /* Calculate temporary vectorial force */
1717 tx = _mm256_mul_ps(fscal,dx33);
1718 ty = _mm256_mul_ps(fscal,dy33);
1719 tz = _mm256_mul_ps(fscal,dz33);
1721 /* Update vectorial force */
1722 fix3 = _mm256_add_ps(fix3,tx);
1723 fiy3 = _mm256_add_ps(fiy3,ty);
1724 fiz3 = _mm256_add_ps(fiz3,tz);
1726 fjx3 = _mm256_add_ps(fjx3,tx);
1727 fjy3 = _mm256_add_ps(fjy3,ty);
1728 fjz3 = _mm256_add_ps(fjz3,tz);
1730 fjptrA = f+j_coord_offsetA;
1731 fjptrB = f+j_coord_offsetB;
1732 fjptrC = f+j_coord_offsetC;
1733 fjptrD = f+j_coord_offsetD;
1734 fjptrE = f+j_coord_offsetE;
1735 fjptrF = f+j_coord_offsetF;
1736 fjptrG = f+j_coord_offsetG;
1737 fjptrH = f+j_coord_offsetH;
1739 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
1740 fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
1741 fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1743 /* Inner loop uses 504 flops */
1746 if(jidx<j_index_end)
1749 /* Get j neighbor index, and coordinate index */
1750 jnrlistA = jjnr[jidx];
1751 jnrlistB = jjnr[jidx+1];
1752 jnrlistC = jjnr[jidx+2];
1753 jnrlistD = jjnr[jidx+3];
1754 jnrlistE = jjnr[jidx+4];
1755 jnrlistF = jjnr[jidx+5];
1756 jnrlistG = jjnr[jidx+6];
1757 jnrlistH = jjnr[jidx+7];
1758 /* Sign of each element will be negative for non-real atoms.
1759 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1760 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
1762 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
1763 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
1765 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
1766 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
1767 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
1768 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
1769 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
1770 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
1771 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
1772 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
1773 j_coord_offsetA = DIM*jnrA;
1774 j_coord_offsetB = DIM*jnrB;
1775 j_coord_offsetC = DIM*jnrC;
1776 j_coord_offsetD = DIM*jnrD;
1777 j_coord_offsetE = DIM*jnrE;
1778 j_coord_offsetF = DIM*jnrF;
1779 j_coord_offsetG = DIM*jnrG;
1780 j_coord_offsetH = DIM*jnrH;
1782 /* load j atom coordinates */
1783 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
1784 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
1785 x+j_coord_offsetE+DIM,x+j_coord_offsetF+DIM,
1786 x+j_coord_offsetG+DIM,x+j_coord_offsetH+DIM,
1787 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
1789 /* Calculate displacement vector */
1790 dx11 = _mm256_sub_ps(ix1,jx1);
1791 dy11 = _mm256_sub_ps(iy1,jy1);
1792 dz11 = _mm256_sub_ps(iz1,jz1);
1793 dx12 = _mm256_sub_ps(ix1,jx2);
1794 dy12 = _mm256_sub_ps(iy1,jy2);
1795 dz12 = _mm256_sub_ps(iz1,jz2);
1796 dx13 = _mm256_sub_ps(ix1,jx3);
1797 dy13 = _mm256_sub_ps(iy1,jy3);
1798 dz13 = _mm256_sub_ps(iz1,jz3);
1799 dx21 = _mm256_sub_ps(ix2,jx1);
1800 dy21 = _mm256_sub_ps(iy2,jy1);
1801 dz21 = _mm256_sub_ps(iz2,jz1);
1802 dx22 = _mm256_sub_ps(ix2,jx2);
1803 dy22 = _mm256_sub_ps(iy2,jy2);
1804 dz22 = _mm256_sub_ps(iz2,jz2);
1805 dx23 = _mm256_sub_ps(ix2,jx3);
1806 dy23 = _mm256_sub_ps(iy2,jy3);
1807 dz23 = _mm256_sub_ps(iz2,jz3);
1808 dx31 = _mm256_sub_ps(ix3,jx1);
1809 dy31 = _mm256_sub_ps(iy3,jy1);
1810 dz31 = _mm256_sub_ps(iz3,jz1);
1811 dx32 = _mm256_sub_ps(ix3,jx2);
1812 dy32 = _mm256_sub_ps(iy3,jy2);
1813 dz32 = _mm256_sub_ps(iz3,jz2);
1814 dx33 = _mm256_sub_ps(ix3,jx3);
1815 dy33 = _mm256_sub_ps(iy3,jy3);
1816 dz33 = _mm256_sub_ps(iz3,jz3);
1818 /* Calculate squared distance and things based on it */
1819 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1820 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1821 rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
1822 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1823 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1824 rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
1825 rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
1826 rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
1827 rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
1829 rinv11 = avx256_invsqrt_f(rsq11);
1830 rinv12 = avx256_invsqrt_f(rsq12);
1831 rinv13 = avx256_invsqrt_f(rsq13);
1832 rinv21 = avx256_invsqrt_f(rsq21);
1833 rinv22 = avx256_invsqrt_f(rsq22);
1834 rinv23 = avx256_invsqrt_f(rsq23);
1835 rinv31 = avx256_invsqrt_f(rsq31);
1836 rinv32 = avx256_invsqrt_f(rsq32);
1837 rinv33 = avx256_invsqrt_f(rsq33);
1839 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
1840 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
1841 rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
1842 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
1843 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
1844 rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
1845 rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
1846 rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
1847 rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
1849 fjx1 = _mm256_setzero_ps();
1850 fjy1 = _mm256_setzero_ps();
1851 fjz1 = _mm256_setzero_ps();
1852 fjx2 = _mm256_setzero_ps();
1853 fjy2 = _mm256_setzero_ps();
1854 fjz2 = _mm256_setzero_ps();
1855 fjx3 = _mm256_setzero_ps();
1856 fjy3 = _mm256_setzero_ps();
1857 fjz3 = _mm256_setzero_ps();
1859 /**************************
1860 * CALCULATE INTERACTIONS *
1861 **************************/
1863 r11 = _mm256_mul_ps(rsq11,rinv11);
1864 r11 = _mm256_andnot_ps(dummy_mask,r11);
1866 /* EWALD ELECTROSTATICS */
1868 /* Analytical PME correction */
1869 zeta2 = _mm256_mul_ps(beta2,rsq11);
1870 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
1871 pmecorrF = avx256_pmecorrF_f(zeta2);
1872 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1873 felec = _mm256_mul_ps(qq11,felec);
1877 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1879 /* Calculate temporary vectorial force */
1880 tx = _mm256_mul_ps(fscal,dx11);
1881 ty = _mm256_mul_ps(fscal,dy11);
1882 tz = _mm256_mul_ps(fscal,dz11);
1884 /* Update vectorial force */
1885 fix1 = _mm256_add_ps(fix1,tx);
1886 fiy1 = _mm256_add_ps(fiy1,ty);
1887 fiz1 = _mm256_add_ps(fiz1,tz);
1889 fjx1 = _mm256_add_ps(fjx1,tx);
1890 fjy1 = _mm256_add_ps(fjy1,ty);
1891 fjz1 = _mm256_add_ps(fjz1,tz);
1893 /**************************
1894 * CALCULATE INTERACTIONS *
1895 **************************/
1897 r12 = _mm256_mul_ps(rsq12,rinv12);
1898 r12 = _mm256_andnot_ps(dummy_mask,r12);
1900 /* EWALD ELECTROSTATICS */
1902 /* Analytical PME correction */
1903 zeta2 = _mm256_mul_ps(beta2,rsq12);
1904 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
1905 pmecorrF = avx256_pmecorrF_f(zeta2);
1906 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1907 felec = _mm256_mul_ps(qq12,felec);
1911 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1913 /* Calculate temporary vectorial force */
1914 tx = _mm256_mul_ps(fscal,dx12);
1915 ty = _mm256_mul_ps(fscal,dy12);
1916 tz = _mm256_mul_ps(fscal,dz12);
1918 /* Update vectorial force */
1919 fix1 = _mm256_add_ps(fix1,tx);
1920 fiy1 = _mm256_add_ps(fiy1,ty);
1921 fiz1 = _mm256_add_ps(fiz1,tz);
1923 fjx2 = _mm256_add_ps(fjx2,tx);
1924 fjy2 = _mm256_add_ps(fjy2,ty);
1925 fjz2 = _mm256_add_ps(fjz2,tz);
1927 /**************************
1928 * CALCULATE INTERACTIONS *
1929 **************************/
1931 r13 = _mm256_mul_ps(rsq13,rinv13);
1932 r13 = _mm256_andnot_ps(dummy_mask,r13);
1934 /* EWALD ELECTROSTATICS */
1936 /* Analytical PME correction */
1937 zeta2 = _mm256_mul_ps(beta2,rsq13);
1938 rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
1939 pmecorrF = avx256_pmecorrF_f(zeta2);
1940 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1941 felec = _mm256_mul_ps(qq13,felec);
1945 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1947 /* Calculate temporary vectorial force */
1948 tx = _mm256_mul_ps(fscal,dx13);
1949 ty = _mm256_mul_ps(fscal,dy13);
1950 tz = _mm256_mul_ps(fscal,dz13);
1952 /* Update vectorial force */
1953 fix1 = _mm256_add_ps(fix1,tx);
1954 fiy1 = _mm256_add_ps(fiy1,ty);
1955 fiz1 = _mm256_add_ps(fiz1,tz);
1957 fjx3 = _mm256_add_ps(fjx3,tx);
1958 fjy3 = _mm256_add_ps(fjy3,ty);
1959 fjz3 = _mm256_add_ps(fjz3,tz);
1961 /**************************
1962 * CALCULATE INTERACTIONS *
1963 **************************/
1965 r21 = _mm256_mul_ps(rsq21,rinv21);
1966 r21 = _mm256_andnot_ps(dummy_mask,r21);
1968 /* EWALD ELECTROSTATICS */
1970 /* Analytical PME correction */
1971 zeta2 = _mm256_mul_ps(beta2,rsq21);
1972 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
1973 pmecorrF = avx256_pmecorrF_f(zeta2);
1974 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1975 felec = _mm256_mul_ps(qq21,felec);
1979 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1981 /* Calculate temporary vectorial force */
1982 tx = _mm256_mul_ps(fscal,dx21);
1983 ty = _mm256_mul_ps(fscal,dy21);
1984 tz = _mm256_mul_ps(fscal,dz21);
1986 /* Update vectorial force */
1987 fix2 = _mm256_add_ps(fix2,tx);
1988 fiy2 = _mm256_add_ps(fiy2,ty);
1989 fiz2 = _mm256_add_ps(fiz2,tz);
1991 fjx1 = _mm256_add_ps(fjx1,tx);
1992 fjy1 = _mm256_add_ps(fjy1,ty);
1993 fjz1 = _mm256_add_ps(fjz1,tz);
1995 /**************************
1996 * CALCULATE INTERACTIONS *
1997 **************************/
1999 r22 = _mm256_mul_ps(rsq22,rinv22);
2000 r22 = _mm256_andnot_ps(dummy_mask,r22);
2002 /* EWALD ELECTROSTATICS */
2004 /* Analytical PME correction */
2005 zeta2 = _mm256_mul_ps(beta2,rsq22);
2006 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
2007 pmecorrF = avx256_pmecorrF_f(zeta2);
2008 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2009 felec = _mm256_mul_ps(qq22,felec);
2013 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2015 /* Calculate temporary vectorial force */
2016 tx = _mm256_mul_ps(fscal,dx22);
2017 ty = _mm256_mul_ps(fscal,dy22);
2018 tz = _mm256_mul_ps(fscal,dz22);
2020 /* Update vectorial force */
2021 fix2 = _mm256_add_ps(fix2,tx);
2022 fiy2 = _mm256_add_ps(fiy2,ty);
2023 fiz2 = _mm256_add_ps(fiz2,tz);
2025 fjx2 = _mm256_add_ps(fjx2,tx);
2026 fjy2 = _mm256_add_ps(fjy2,ty);
2027 fjz2 = _mm256_add_ps(fjz2,tz);
2029 /**************************
2030 * CALCULATE INTERACTIONS *
2031 **************************/
2033 r23 = _mm256_mul_ps(rsq23,rinv23);
2034 r23 = _mm256_andnot_ps(dummy_mask,r23);
2036 /* EWALD ELECTROSTATICS */
2038 /* Analytical PME correction */
2039 zeta2 = _mm256_mul_ps(beta2,rsq23);
2040 rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
2041 pmecorrF = avx256_pmecorrF_f(zeta2);
2042 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2043 felec = _mm256_mul_ps(qq23,felec);
2047 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2049 /* Calculate temporary vectorial force */
2050 tx = _mm256_mul_ps(fscal,dx23);
2051 ty = _mm256_mul_ps(fscal,dy23);
2052 tz = _mm256_mul_ps(fscal,dz23);
2054 /* Update vectorial force */
2055 fix2 = _mm256_add_ps(fix2,tx);
2056 fiy2 = _mm256_add_ps(fiy2,ty);
2057 fiz2 = _mm256_add_ps(fiz2,tz);
2059 fjx3 = _mm256_add_ps(fjx3,tx);
2060 fjy3 = _mm256_add_ps(fjy3,ty);
2061 fjz3 = _mm256_add_ps(fjz3,tz);
2063 /**************************
2064 * CALCULATE INTERACTIONS *
2065 **************************/
2067 r31 = _mm256_mul_ps(rsq31,rinv31);
2068 r31 = _mm256_andnot_ps(dummy_mask,r31);
2070 /* EWALD ELECTROSTATICS */
2072 /* Analytical PME correction */
2073 zeta2 = _mm256_mul_ps(beta2,rsq31);
2074 rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
2075 pmecorrF = avx256_pmecorrF_f(zeta2);
2076 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2077 felec = _mm256_mul_ps(qq31,felec);
2081 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2083 /* Calculate temporary vectorial force */
2084 tx = _mm256_mul_ps(fscal,dx31);
2085 ty = _mm256_mul_ps(fscal,dy31);
2086 tz = _mm256_mul_ps(fscal,dz31);
2088 /* Update vectorial force */
2089 fix3 = _mm256_add_ps(fix3,tx);
2090 fiy3 = _mm256_add_ps(fiy3,ty);
2091 fiz3 = _mm256_add_ps(fiz3,tz);
2093 fjx1 = _mm256_add_ps(fjx1,tx);
2094 fjy1 = _mm256_add_ps(fjy1,ty);
2095 fjz1 = _mm256_add_ps(fjz1,tz);
2097 /**************************
2098 * CALCULATE INTERACTIONS *
2099 **************************/
2101 r32 = _mm256_mul_ps(rsq32,rinv32);
2102 r32 = _mm256_andnot_ps(dummy_mask,r32);
2104 /* EWALD ELECTROSTATICS */
2106 /* Analytical PME correction */
2107 zeta2 = _mm256_mul_ps(beta2,rsq32);
2108 rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
2109 pmecorrF = avx256_pmecorrF_f(zeta2);
2110 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2111 felec = _mm256_mul_ps(qq32,felec);
2115 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2117 /* Calculate temporary vectorial force */
2118 tx = _mm256_mul_ps(fscal,dx32);
2119 ty = _mm256_mul_ps(fscal,dy32);
2120 tz = _mm256_mul_ps(fscal,dz32);
2122 /* Update vectorial force */
2123 fix3 = _mm256_add_ps(fix3,tx);
2124 fiy3 = _mm256_add_ps(fiy3,ty);
2125 fiz3 = _mm256_add_ps(fiz3,tz);
2127 fjx2 = _mm256_add_ps(fjx2,tx);
2128 fjy2 = _mm256_add_ps(fjy2,ty);
2129 fjz2 = _mm256_add_ps(fjz2,tz);
2131 /**************************
2132 * CALCULATE INTERACTIONS *
2133 **************************/
2135 r33 = _mm256_mul_ps(rsq33,rinv33);
2136 r33 = _mm256_andnot_ps(dummy_mask,r33);
2138 /* EWALD ELECTROSTATICS */
2140 /* Analytical PME correction */
2141 zeta2 = _mm256_mul_ps(beta2,rsq33);
2142 rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
2143 pmecorrF = avx256_pmecorrF_f(zeta2);
2144 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2145 felec = _mm256_mul_ps(qq33,felec);
2149 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2151 /* Calculate temporary vectorial force */
2152 tx = _mm256_mul_ps(fscal,dx33);
2153 ty = _mm256_mul_ps(fscal,dy33);
2154 tz = _mm256_mul_ps(fscal,dz33);
2156 /* Update vectorial force */
2157 fix3 = _mm256_add_ps(fix3,tx);
2158 fiy3 = _mm256_add_ps(fiy3,ty);
2159 fiz3 = _mm256_add_ps(fiz3,tz);
2161 fjx3 = _mm256_add_ps(fjx3,tx);
2162 fjy3 = _mm256_add_ps(fjy3,ty);
2163 fjz3 = _mm256_add_ps(fjz3,tz);
2165 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2166 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2167 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2168 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2169 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2170 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2171 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2172 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2174 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
2175 fjptrE+DIM,fjptrF+DIM,fjptrG+DIM,fjptrH+DIM,
2176 fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2178 /* Inner loop uses 513 flops */
2181 /* End of innermost loop */
2183 gmx_mm256_update_iforce_3atom_swizzle_ps(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
2184 f+i_coord_offset+DIM,fshift+i_shift_offset);
2186 /* Increment number of inner iterations */
2187 inneriter += j_index_end - j_index_start;
2189 /* Outer loop uses 18 flops */
2192 /* Increment number of outer iterations */
2195 /* Update outer/inner flops */
2197 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*513);