2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_single kernel generator.
42 #include "../nb_kernel.h"
43 #include "types/simple.h"
44 #include "gromacs/math/vec.h"
47 #include "gromacs/simd/math_x86_avx_256_single.h"
48 #include "kernelutil_x86_avx_256_single.h"
51 * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_VF_avx_256_single
52 * Electrostatics interaction: Ewald
53 * VdW interaction: LennardJones
54 * Geometry: Water3-Water3
55 * Calculate force/pot: PotentialAndForce
58 nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_VF_avx_256_single
59 (t_nblist * gmx_restrict nlist,
60 rvec * gmx_restrict xx,
61 rvec * gmx_restrict ff,
62 t_forcerec * gmx_restrict fr,
63 t_mdatoms * gmx_restrict mdatoms,
64 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65 t_nrnb * gmx_restrict nrnb)
67 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
68 * just 0 for non-waters.
69 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
70 * jnr indices corresponding to data put in the four positions in the SIMD register.
72 int i_shift_offset,i_coord_offset,outeriter,inneriter;
73 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74 int jnrA,jnrB,jnrC,jnrD;
75 int jnrE,jnrF,jnrG,jnrH;
76 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
77 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
78 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
79 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
80 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
82 real *shiftvec,*fshift,*x,*f;
83 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
85 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86 real * vdwioffsetptr0;
87 __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
88 real * vdwioffsetptr1;
89 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
90 real * vdwioffsetptr2;
91 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
92 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
93 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
94 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
95 __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
96 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
97 __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
98 __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
99 __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
100 __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
101 __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
102 __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
103 __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
104 __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
105 __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
106 __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
107 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
110 __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
113 __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
114 __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
116 __m128i ewitab_lo,ewitab_hi;
117 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
118 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
120 __m256 dummy_mask,cutoff_mask;
121 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
122 __m256 one = _mm256_set1_ps(1.0);
123 __m256 two = _mm256_set1_ps(2.0);
129 jindex = nlist->jindex;
131 shiftidx = nlist->shift;
133 shiftvec = fr->shift_vec[0];
134 fshift = fr->fshift[0];
135 facel = _mm256_set1_ps(fr->epsfac);
136 charge = mdatoms->chargeA;
137 nvdwtype = fr->ntype;
139 vdwtype = mdatoms->typeA;
141 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
142 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
143 beta2 = _mm256_mul_ps(beta,beta);
144 beta3 = _mm256_mul_ps(beta,beta2);
146 ewtab = fr->ic->tabq_coul_FDV0;
147 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
148 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
150 /* Setup water-specific parameters */
151 inr = nlist->iinr[0];
152 iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
153 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
154 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
155 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
157 jq0 = _mm256_set1_ps(charge[inr+0]);
158 jq1 = _mm256_set1_ps(charge[inr+1]);
159 jq2 = _mm256_set1_ps(charge[inr+2]);
160 vdwjidx0A = 2*vdwtype[inr+0];
161 qq00 = _mm256_mul_ps(iq0,jq0);
162 c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
163 c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
164 qq01 = _mm256_mul_ps(iq0,jq1);
165 qq02 = _mm256_mul_ps(iq0,jq2);
166 qq10 = _mm256_mul_ps(iq1,jq0);
167 qq11 = _mm256_mul_ps(iq1,jq1);
168 qq12 = _mm256_mul_ps(iq1,jq2);
169 qq20 = _mm256_mul_ps(iq2,jq0);
170 qq21 = _mm256_mul_ps(iq2,jq1);
171 qq22 = _mm256_mul_ps(iq2,jq2);
173 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
174 rcutoff_scalar = fr->rcoulomb;
175 rcutoff = _mm256_set1_ps(rcutoff_scalar);
176 rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
178 sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
179 rvdw = _mm256_set1_ps(fr->rvdw);
181 /* Avoid stupid compiler warnings */
182 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
195 for(iidx=0;iidx<4*DIM;iidx++)
200 /* Start outer loop over neighborlists */
201 for(iidx=0; iidx<nri; iidx++)
203 /* Load shift vector for this list */
204 i_shift_offset = DIM*shiftidx[iidx];
206 /* Load limits for loop over neighbors */
207 j_index_start = jindex[iidx];
208 j_index_end = jindex[iidx+1];
210 /* Get outer coordinate index */
212 i_coord_offset = DIM*inr;
214 /* Load i particle coords and add shift vector */
215 gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
216 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
218 fix0 = _mm256_setzero_ps();
219 fiy0 = _mm256_setzero_ps();
220 fiz0 = _mm256_setzero_ps();
221 fix1 = _mm256_setzero_ps();
222 fiy1 = _mm256_setzero_ps();
223 fiz1 = _mm256_setzero_ps();
224 fix2 = _mm256_setzero_ps();
225 fiy2 = _mm256_setzero_ps();
226 fiz2 = _mm256_setzero_ps();
228 /* Reset potential sums */
229 velecsum = _mm256_setzero_ps();
230 vvdwsum = _mm256_setzero_ps();
232 /* Start inner kernel loop */
233 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
236 /* Get j neighbor index, and coordinate index */
245 j_coord_offsetA = DIM*jnrA;
246 j_coord_offsetB = DIM*jnrB;
247 j_coord_offsetC = DIM*jnrC;
248 j_coord_offsetD = DIM*jnrD;
249 j_coord_offsetE = DIM*jnrE;
250 j_coord_offsetF = DIM*jnrF;
251 j_coord_offsetG = DIM*jnrG;
252 j_coord_offsetH = DIM*jnrH;
254 /* load j atom coordinates */
255 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
256 x+j_coord_offsetC,x+j_coord_offsetD,
257 x+j_coord_offsetE,x+j_coord_offsetF,
258 x+j_coord_offsetG,x+j_coord_offsetH,
259 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
261 /* Calculate displacement vector */
262 dx00 = _mm256_sub_ps(ix0,jx0);
263 dy00 = _mm256_sub_ps(iy0,jy0);
264 dz00 = _mm256_sub_ps(iz0,jz0);
265 dx01 = _mm256_sub_ps(ix0,jx1);
266 dy01 = _mm256_sub_ps(iy0,jy1);
267 dz01 = _mm256_sub_ps(iz0,jz1);
268 dx02 = _mm256_sub_ps(ix0,jx2);
269 dy02 = _mm256_sub_ps(iy0,jy2);
270 dz02 = _mm256_sub_ps(iz0,jz2);
271 dx10 = _mm256_sub_ps(ix1,jx0);
272 dy10 = _mm256_sub_ps(iy1,jy0);
273 dz10 = _mm256_sub_ps(iz1,jz0);
274 dx11 = _mm256_sub_ps(ix1,jx1);
275 dy11 = _mm256_sub_ps(iy1,jy1);
276 dz11 = _mm256_sub_ps(iz1,jz1);
277 dx12 = _mm256_sub_ps(ix1,jx2);
278 dy12 = _mm256_sub_ps(iy1,jy2);
279 dz12 = _mm256_sub_ps(iz1,jz2);
280 dx20 = _mm256_sub_ps(ix2,jx0);
281 dy20 = _mm256_sub_ps(iy2,jy0);
282 dz20 = _mm256_sub_ps(iz2,jz0);
283 dx21 = _mm256_sub_ps(ix2,jx1);
284 dy21 = _mm256_sub_ps(iy2,jy1);
285 dz21 = _mm256_sub_ps(iz2,jz1);
286 dx22 = _mm256_sub_ps(ix2,jx2);
287 dy22 = _mm256_sub_ps(iy2,jy2);
288 dz22 = _mm256_sub_ps(iz2,jz2);
290 /* Calculate squared distance and things based on it */
291 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
292 rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
293 rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
294 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
295 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
296 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
297 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
298 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
299 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
301 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
302 rinv01 = gmx_mm256_invsqrt_ps(rsq01);
303 rinv02 = gmx_mm256_invsqrt_ps(rsq02);
304 rinv10 = gmx_mm256_invsqrt_ps(rsq10);
305 rinv11 = gmx_mm256_invsqrt_ps(rsq11);
306 rinv12 = gmx_mm256_invsqrt_ps(rsq12);
307 rinv20 = gmx_mm256_invsqrt_ps(rsq20);
308 rinv21 = gmx_mm256_invsqrt_ps(rsq21);
309 rinv22 = gmx_mm256_invsqrt_ps(rsq22);
311 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
312 rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
313 rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
314 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
315 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
316 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
317 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
318 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
319 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
321 fjx0 = _mm256_setzero_ps();
322 fjy0 = _mm256_setzero_ps();
323 fjz0 = _mm256_setzero_ps();
324 fjx1 = _mm256_setzero_ps();
325 fjy1 = _mm256_setzero_ps();
326 fjz1 = _mm256_setzero_ps();
327 fjx2 = _mm256_setzero_ps();
328 fjy2 = _mm256_setzero_ps();
329 fjz2 = _mm256_setzero_ps();
331 /**************************
332 * CALCULATE INTERACTIONS *
333 **************************/
335 if (gmx_mm256_any_lt(rsq00,rcutoff2))
338 r00 = _mm256_mul_ps(rsq00,rinv00);
340 /* EWALD ELECTROSTATICS */
342 /* Analytical PME correction */
343 zeta2 = _mm256_mul_ps(beta2,rsq00);
344 rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
345 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
346 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
347 felec = _mm256_mul_ps(qq00,felec);
348 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
349 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
350 velec = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
351 velec = _mm256_mul_ps(qq00,velec);
353 /* LENNARD-JONES DISPERSION/REPULSION */
355 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
356 vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
357 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
358 vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
359 _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
360 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
362 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
364 /* Update potential sum for this i atom from the interaction with this j atom. */
365 velec = _mm256_and_ps(velec,cutoff_mask);
366 velecsum = _mm256_add_ps(velecsum,velec);
367 vvdw = _mm256_and_ps(vvdw,cutoff_mask);
368 vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
370 fscal = _mm256_add_ps(felec,fvdw);
372 fscal = _mm256_and_ps(fscal,cutoff_mask);
374 /* Calculate temporary vectorial force */
375 tx = _mm256_mul_ps(fscal,dx00);
376 ty = _mm256_mul_ps(fscal,dy00);
377 tz = _mm256_mul_ps(fscal,dz00);
379 /* Update vectorial force */
380 fix0 = _mm256_add_ps(fix0,tx);
381 fiy0 = _mm256_add_ps(fiy0,ty);
382 fiz0 = _mm256_add_ps(fiz0,tz);
384 fjx0 = _mm256_add_ps(fjx0,tx);
385 fjy0 = _mm256_add_ps(fjy0,ty);
386 fjz0 = _mm256_add_ps(fjz0,tz);
390 /**************************
391 * CALCULATE INTERACTIONS *
392 **************************/
394 if (gmx_mm256_any_lt(rsq01,rcutoff2))
397 r01 = _mm256_mul_ps(rsq01,rinv01);
399 /* EWALD ELECTROSTATICS */
401 /* Analytical PME correction */
402 zeta2 = _mm256_mul_ps(beta2,rsq01);
403 rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
404 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
405 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
406 felec = _mm256_mul_ps(qq01,felec);
407 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
408 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
409 velec = _mm256_sub_ps(_mm256_sub_ps(rinv01,sh_ewald),pmecorrV);
410 velec = _mm256_mul_ps(qq01,velec);
412 cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
414 /* Update potential sum for this i atom from the interaction with this j atom. */
415 velec = _mm256_and_ps(velec,cutoff_mask);
416 velecsum = _mm256_add_ps(velecsum,velec);
420 fscal = _mm256_and_ps(fscal,cutoff_mask);
422 /* Calculate temporary vectorial force */
423 tx = _mm256_mul_ps(fscal,dx01);
424 ty = _mm256_mul_ps(fscal,dy01);
425 tz = _mm256_mul_ps(fscal,dz01);
427 /* Update vectorial force */
428 fix0 = _mm256_add_ps(fix0,tx);
429 fiy0 = _mm256_add_ps(fiy0,ty);
430 fiz0 = _mm256_add_ps(fiz0,tz);
432 fjx1 = _mm256_add_ps(fjx1,tx);
433 fjy1 = _mm256_add_ps(fjy1,ty);
434 fjz1 = _mm256_add_ps(fjz1,tz);
438 /**************************
439 * CALCULATE INTERACTIONS *
440 **************************/
442 if (gmx_mm256_any_lt(rsq02,rcutoff2))
445 r02 = _mm256_mul_ps(rsq02,rinv02);
447 /* EWALD ELECTROSTATICS */
449 /* Analytical PME correction */
450 zeta2 = _mm256_mul_ps(beta2,rsq02);
451 rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
452 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
453 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
454 felec = _mm256_mul_ps(qq02,felec);
455 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
456 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
457 velec = _mm256_sub_ps(_mm256_sub_ps(rinv02,sh_ewald),pmecorrV);
458 velec = _mm256_mul_ps(qq02,velec);
460 cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
462 /* Update potential sum for this i atom from the interaction with this j atom. */
463 velec = _mm256_and_ps(velec,cutoff_mask);
464 velecsum = _mm256_add_ps(velecsum,velec);
468 fscal = _mm256_and_ps(fscal,cutoff_mask);
470 /* Calculate temporary vectorial force */
471 tx = _mm256_mul_ps(fscal,dx02);
472 ty = _mm256_mul_ps(fscal,dy02);
473 tz = _mm256_mul_ps(fscal,dz02);
475 /* Update vectorial force */
476 fix0 = _mm256_add_ps(fix0,tx);
477 fiy0 = _mm256_add_ps(fiy0,ty);
478 fiz0 = _mm256_add_ps(fiz0,tz);
480 fjx2 = _mm256_add_ps(fjx2,tx);
481 fjy2 = _mm256_add_ps(fjy2,ty);
482 fjz2 = _mm256_add_ps(fjz2,tz);
486 /**************************
487 * CALCULATE INTERACTIONS *
488 **************************/
490 if (gmx_mm256_any_lt(rsq10,rcutoff2))
493 r10 = _mm256_mul_ps(rsq10,rinv10);
495 /* EWALD ELECTROSTATICS */
497 /* Analytical PME correction */
498 zeta2 = _mm256_mul_ps(beta2,rsq10);
499 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
500 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
501 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
502 felec = _mm256_mul_ps(qq10,felec);
503 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
504 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
505 velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
506 velec = _mm256_mul_ps(qq10,velec);
508 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
510 /* Update potential sum for this i atom from the interaction with this j atom. */
511 velec = _mm256_and_ps(velec,cutoff_mask);
512 velecsum = _mm256_add_ps(velecsum,velec);
516 fscal = _mm256_and_ps(fscal,cutoff_mask);
518 /* Calculate temporary vectorial force */
519 tx = _mm256_mul_ps(fscal,dx10);
520 ty = _mm256_mul_ps(fscal,dy10);
521 tz = _mm256_mul_ps(fscal,dz10);
523 /* Update vectorial force */
524 fix1 = _mm256_add_ps(fix1,tx);
525 fiy1 = _mm256_add_ps(fiy1,ty);
526 fiz1 = _mm256_add_ps(fiz1,tz);
528 fjx0 = _mm256_add_ps(fjx0,tx);
529 fjy0 = _mm256_add_ps(fjy0,ty);
530 fjz0 = _mm256_add_ps(fjz0,tz);
534 /**************************
535 * CALCULATE INTERACTIONS *
536 **************************/
538 if (gmx_mm256_any_lt(rsq11,rcutoff2))
541 r11 = _mm256_mul_ps(rsq11,rinv11);
543 /* EWALD ELECTROSTATICS */
545 /* Analytical PME correction */
546 zeta2 = _mm256_mul_ps(beta2,rsq11);
547 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
548 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
549 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
550 felec = _mm256_mul_ps(qq11,felec);
551 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
552 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
553 velec = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
554 velec = _mm256_mul_ps(qq11,velec);
556 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
558 /* Update potential sum for this i atom from the interaction with this j atom. */
559 velec = _mm256_and_ps(velec,cutoff_mask);
560 velecsum = _mm256_add_ps(velecsum,velec);
564 fscal = _mm256_and_ps(fscal,cutoff_mask);
566 /* Calculate temporary vectorial force */
567 tx = _mm256_mul_ps(fscal,dx11);
568 ty = _mm256_mul_ps(fscal,dy11);
569 tz = _mm256_mul_ps(fscal,dz11);
571 /* Update vectorial force */
572 fix1 = _mm256_add_ps(fix1,tx);
573 fiy1 = _mm256_add_ps(fiy1,ty);
574 fiz1 = _mm256_add_ps(fiz1,tz);
576 fjx1 = _mm256_add_ps(fjx1,tx);
577 fjy1 = _mm256_add_ps(fjy1,ty);
578 fjz1 = _mm256_add_ps(fjz1,tz);
582 /**************************
583 * CALCULATE INTERACTIONS *
584 **************************/
586 if (gmx_mm256_any_lt(rsq12,rcutoff2))
589 r12 = _mm256_mul_ps(rsq12,rinv12);
591 /* EWALD ELECTROSTATICS */
593 /* Analytical PME correction */
594 zeta2 = _mm256_mul_ps(beta2,rsq12);
595 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
596 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
597 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
598 felec = _mm256_mul_ps(qq12,felec);
599 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
600 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
601 velec = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
602 velec = _mm256_mul_ps(qq12,velec);
604 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
606 /* Update potential sum for this i atom from the interaction with this j atom. */
607 velec = _mm256_and_ps(velec,cutoff_mask);
608 velecsum = _mm256_add_ps(velecsum,velec);
612 fscal = _mm256_and_ps(fscal,cutoff_mask);
614 /* Calculate temporary vectorial force */
615 tx = _mm256_mul_ps(fscal,dx12);
616 ty = _mm256_mul_ps(fscal,dy12);
617 tz = _mm256_mul_ps(fscal,dz12);
619 /* Update vectorial force */
620 fix1 = _mm256_add_ps(fix1,tx);
621 fiy1 = _mm256_add_ps(fiy1,ty);
622 fiz1 = _mm256_add_ps(fiz1,tz);
624 fjx2 = _mm256_add_ps(fjx2,tx);
625 fjy2 = _mm256_add_ps(fjy2,ty);
626 fjz2 = _mm256_add_ps(fjz2,tz);
630 /**************************
631 * CALCULATE INTERACTIONS *
632 **************************/
634 if (gmx_mm256_any_lt(rsq20,rcutoff2))
637 r20 = _mm256_mul_ps(rsq20,rinv20);
639 /* EWALD ELECTROSTATICS */
641 /* Analytical PME correction */
642 zeta2 = _mm256_mul_ps(beta2,rsq20);
643 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
644 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
645 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
646 felec = _mm256_mul_ps(qq20,felec);
647 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
648 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
649 velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
650 velec = _mm256_mul_ps(qq20,velec);
652 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
654 /* Update potential sum for this i atom from the interaction with this j atom. */
655 velec = _mm256_and_ps(velec,cutoff_mask);
656 velecsum = _mm256_add_ps(velecsum,velec);
660 fscal = _mm256_and_ps(fscal,cutoff_mask);
662 /* Calculate temporary vectorial force */
663 tx = _mm256_mul_ps(fscal,dx20);
664 ty = _mm256_mul_ps(fscal,dy20);
665 tz = _mm256_mul_ps(fscal,dz20);
667 /* Update vectorial force */
668 fix2 = _mm256_add_ps(fix2,tx);
669 fiy2 = _mm256_add_ps(fiy2,ty);
670 fiz2 = _mm256_add_ps(fiz2,tz);
672 fjx0 = _mm256_add_ps(fjx0,tx);
673 fjy0 = _mm256_add_ps(fjy0,ty);
674 fjz0 = _mm256_add_ps(fjz0,tz);
678 /**************************
679 * CALCULATE INTERACTIONS *
680 **************************/
682 if (gmx_mm256_any_lt(rsq21,rcutoff2))
685 r21 = _mm256_mul_ps(rsq21,rinv21);
687 /* EWALD ELECTROSTATICS */
689 /* Analytical PME correction */
690 zeta2 = _mm256_mul_ps(beta2,rsq21);
691 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
692 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
693 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
694 felec = _mm256_mul_ps(qq21,felec);
695 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
696 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
697 velec = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
698 velec = _mm256_mul_ps(qq21,velec);
700 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
702 /* Update potential sum for this i atom from the interaction with this j atom. */
703 velec = _mm256_and_ps(velec,cutoff_mask);
704 velecsum = _mm256_add_ps(velecsum,velec);
708 fscal = _mm256_and_ps(fscal,cutoff_mask);
710 /* Calculate temporary vectorial force */
711 tx = _mm256_mul_ps(fscal,dx21);
712 ty = _mm256_mul_ps(fscal,dy21);
713 tz = _mm256_mul_ps(fscal,dz21);
715 /* Update vectorial force */
716 fix2 = _mm256_add_ps(fix2,tx);
717 fiy2 = _mm256_add_ps(fiy2,ty);
718 fiz2 = _mm256_add_ps(fiz2,tz);
720 fjx1 = _mm256_add_ps(fjx1,tx);
721 fjy1 = _mm256_add_ps(fjy1,ty);
722 fjz1 = _mm256_add_ps(fjz1,tz);
726 /**************************
727 * CALCULATE INTERACTIONS *
728 **************************/
730 if (gmx_mm256_any_lt(rsq22,rcutoff2))
733 r22 = _mm256_mul_ps(rsq22,rinv22);
735 /* EWALD ELECTROSTATICS */
737 /* Analytical PME correction */
738 zeta2 = _mm256_mul_ps(beta2,rsq22);
739 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
740 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
741 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
742 felec = _mm256_mul_ps(qq22,felec);
743 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
744 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
745 velec = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
746 velec = _mm256_mul_ps(qq22,velec);
748 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
750 /* Update potential sum for this i atom from the interaction with this j atom. */
751 velec = _mm256_and_ps(velec,cutoff_mask);
752 velecsum = _mm256_add_ps(velecsum,velec);
756 fscal = _mm256_and_ps(fscal,cutoff_mask);
758 /* Calculate temporary vectorial force */
759 tx = _mm256_mul_ps(fscal,dx22);
760 ty = _mm256_mul_ps(fscal,dy22);
761 tz = _mm256_mul_ps(fscal,dz22);
763 /* Update vectorial force */
764 fix2 = _mm256_add_ps(fix2,tx);
765 fiy2 = _mm256_add_ps(fiy2,ty);
766 fiz2 = _mm256_add_ps(fiz2,tz);
768 fjx2 = _mm256_add_ps(fjx2,tx);
769 fjy2 = _mm256_add_ps(fjy2,ty);
770 fjz2 = _mm256_add_ps(fjz2,tz);
774 fjptrA = f+j_coord_offsetA;
775 fjptrB = f+j_coord_offsetB;
776 fjptrC = f+j_coord_offsetC;
777 fjptrD = f+j_coord_offsetD;
778 fjptrE = f+j_coord_offsetE;
779 fjptrF = f+j_coord_offsetF;
780 fjptrG = f+j_coord_offsetG;
781 fjptrH = f+j_coord_offsetH;
783 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
784 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
786 /* Inner loop uses 999 flops */
792 /* Get j neighbor index, and coordinate index */
793 jnrlistA = jjnr[jidx];
794 jnrlistB = jjnr[jidx+1];
795 jnrlistC = jjnr[jidx+2];
796 jnrlistD = jjnr[jidx+3];
797 jnrlistE = jjnr[jidx+4];
798 jnrlistF = jjnr[jidx+5];
799 jnrlistG = jjnr[jidx+6];
800 jnrlistH = jjnr[jidx+7];
801 /* Sign of each element will be negative for non-real atoms.
802 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
803 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
805 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
806 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
808 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
809 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
810 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
811 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
812 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
813 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
814 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
815 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
816 j_coord_offsetA = DIM*jnrA;
817 j_coord_offsetB = DIM*jnrB;
818 j_coord_offsetC = DIM*jnrC;
819 j_coord_offsetD = DIM*jnrD;
820 j_coord_offsetE = DIM*jnrE;
821 j_coord_offsetF = DIM*jnrF;
822 j_coord_offsetG = DIM*jnrG;
823 j_coord_offsetH = DIM*jnrH;
825 /* load j atom coordinates */
826 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
827 x+j_coord_offsetC,x+j_coord_offsetD,
828 x+j_coord_offsetE,x+j_coord_offsetF,
829 x+j_coord_offsetG,x+j_coord_offsetH,
830 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
832 /* Calculate displacement vector */
833 dx00 = _mm256_sub_ps(ix0,jx0);
834 dy00 = _mm256_sub_ps(iy0,jy0);
835 dz00 = _mm256_sub_ps(iz0,jz0);
836 dx01 = _mm256_sub_ps(ix0,jx1);
837 dy01 = _mm256_sub_ps(iy0,jy1);
838 dz01 = _mm256_sub_ps(iz0,jz1);
839 dx02 = _mm256_sub_ps(ix0,jx2);
840 dy02 = _mm256_sub_ps(iy0,jy2);
841 dz02 = _mm256_sub_ps(iz0,jz2);
842 dx10 = _mm256_sub_ps(ix1,jx0);
843 dy10 = _mm256_sub_ps(iy1,jy0);
844 dz10 = _mm256_sub_ps(iz1,jz0);
845 dx11 = _mm256_sub_ps(ix1,jx1);
846 dy11 = _mm256_sub_ps(iy1,jy1);
847 dz11 = _mm256_sub_ps(iz1,jz1);
848 dx12 = _mm256_sub_ps(ix1,jx2);
849 dy12 = _mm256_sub_ps(iy1,jy2);
850 dz12 = _mm256_sub_ps(iz1,jz2);
851 dx20 = _mm256_sub_ps(ix2,jx0);
852 dy20 = _mm256_sub_ps(iy2,jy0);
853 dz20 = _mm256_sub_ps(iz2,jz0);
854 dx21 = _mm256_sub_ps(ix2,jx1);
855 dy21 = _mm256_sub_ps(iy2,jy1);
856 dz21 = _mm256_sub_ps(iz2,jz1);
857 dx22 = _mm256_sub_ps(ix2,jx2);
858 dy22 = _mm256_sub_ps(iy2,jy2);
859 dz22 = _mm256_sub_ps(iz2,jz2);
861 /* Calculate squared distance and things based on it */
862 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
863 rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
864 rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
865 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
866 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
867 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
868 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
869 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
870 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
872 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
873 rinv01 = gmx_mm256_invsqrt_ps(rsq01);
874 rinv02 = gmx_mm256_invsqrt_ps(rsq02);
875 rinv10 = gmx_mm256_invsqrt_ps(rsq10);
876 rinv11 = gmx_mm256_invsqrt_ps(rsq11);
877 rinv12 = gmx_mm256_invsqrt_ps(rsq12);
878 rinv20 = gmx_mm256_invsqrt_ps(rsq20);
879 rinv21 = gmx_mm256_invsqrt_ps(rsq21);
880 rinv22 = gmx_mm256_invsqrt_ps(rsq22);
882 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
883 rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
884 rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
885 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
886 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
887 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
888 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
889 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
890 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
892 fjx0 = _mm256_setzero_ps();
893 fjy0 = _mm256_setzero_ps();
894 fjz0 = _mm256_setzero_ps();
895 fjx1 = _mm256_setzero_ps();
896 fjy1 = _mm256_setzero_ps();
897 fjz1 = _mm256_setzero_ps();
898 fjx2 = _mm256_setzero_ps();
899 fjy2 = _mm256_setzero_ps();
900 fjz2 = _mm256_setzero_ps();
902 /**************************
903 * CALCULATE INTERACTIONS *
904 **************************/
906 if (gmx_mm256_any_lt(rsq00,rcutoff2))
909 r00 = _mm256_mul_ps(rsq00,rinv00);
910 r00 = _mm256_andnot_ps(dummy_mask,r00);
912 /* EWALD ELECTROSTATICS */
914 /* Analytical PME correction */
915 zeta2 = _mm256_mul_ps(beta2,rsq00);
916 rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
917 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
918 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
919 felec = _mm256_mul_ps(qq00,felec);
920 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
921 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
922 velec = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
923 velec = _mm256_mul_ps(qq00,velec);
925 /* LENNARD-JONES DISPERSION/REPULSION */
927 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
928 vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
929 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
930 vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
931 _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
932 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
934 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
936 /* Update potential sum for this i atom from the interaction with this j atom. */
937 velec = _mm256_and_ps(velec,cutoff_mask);
938 velec = _mm256_andnot_ps(dummy_mask,velec);
939 velecsum = _mm256_add_ps(velecsum,velec);
940 vvdw = _mm256_and_ps(vvdw,cutoff_mask);
941 vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
942 vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
944 fscal = _mm256_add_ps(felec,fvdw);
946 fscal = _mm256_and_ps(fscal,cutoff_mask);
948 fscal = _mm256_andnot_ps(dummy_mask,fscal);
950 /* Calculate temporary vectorial force */
951 tx = _mm256_mul_ps(fscal,dx00);
952 ty = _mm256_mul_ps(fscal,dy00);
953 tz = _mm256_mul_ps(fscal,dz00);
955 /* Update vectorial force */
956 fix0 = _mm256_add_ps(fix0,tx);
957 fiy0 = _mm256_add_ps(fiy0,ty);
958 fiz0 = _mm256_add_ps(fiz0,tz);
960 fjx0 = _mm256_add_ps(fjx0,tx);
961 fjy0 = _mm256_add_ps(fjy0,ty);
962 fjz0 = _mm256_add_ps(fjz0,tz);
966 /**************************
967 * CALCULATE INTERACTIONS *
968 **************************/
970 if (gmx_mm256_any_lt(rsq01,rcutoff2))
973 r01 = _mm256_mul_ps(rsq01,rinv01);
974 r01 = _mm256_andnot_ps(dummy_mask,r01);
976 /* EWALD ELECTROSTATICS */
978 /* Analytical PME correction */
979 zeta2 = _mm256_mul_ps(beta2,rsq01);
980 rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
981 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
982 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
983 felec = _mm256_mul_ps(qq01,felec);
984 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
985 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
986 velec = _mm256_sub_ps(_mm256_sub_ps(rinv01,sh_ewald),pmecorrV);
987 velec = _mm256_mul_ps(qq01,velec);
989 cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
991 /* Update potential sum for this i atom from the interaction with this j atom. */
992 velec = _mm256_and_ps(velec,cutoff_mask);
993 velec = _mm256_andnot_ps(dummy_mask,velec);
994 velecsum = _mm256_add_ps(velecsum,velec);
998 fscal = _mm256_and_ps(fscal,cutoff_mask);
1000 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1002 /* Calculate temporary vectorial force */
1003 tx = _mm256_mul_ps(fscal,dx01);
1004 ty = _mm256_mul_ps(fscal,dy01);
1005 tz = _mm256_mul_ps(fscal,dz01);
1007 /* Update vectorial force */
1008 fix0 = _mm256_add_ps(fix0,tx);
1009 fiy0 = _mm256_add_ps(fiy0,ty);
1010 fiz0 = _mm256_add_ps(fiz0,tz);
1012 fjx1 = _mm256_add_ps(fjx1,tx);
1013 fjy1 = _mm256_add_ps(fjy1,ty);
1014 fjz1 = _mm256_add_ps(fjz1,tz);
1018 /**************************
1019 * CALCULATE INTERACTIONS *
1020 **************************/
1022 if (gmx_mm256_any_lt(rsq02,rcutoff2))
1025 r02 = _mm256_mul_ps(rsq02,rinv02);
1026 r02 = _mm256_andnot_ps(dummy_mask,r02);
1028 /* EWALD ELECTROSTATICS */
1030 /* Analytical PME correction */
1031 zeta2 = _mm256_mul_ps(beta2,rsq02);
1032 rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
1033 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1034 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1035 felec = _mm256_mul_ps(qq02,felec);
1036 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1037 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1038 velec = _mm256_sub_ps(_mm256_sub_ps(rinv02,sh_ewald),pmecorrV);
1039 velec = _mm256_mul_ps(qq02,velec);
1041 cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
1043 /* Update potential sum for this i atom from the interaction with this j atom. */
1044 velec = _mm256_and_ps(velec,cutoff_mask);
1045 velec = _mm256_andnot_ps(dummy_mask,velec);
1046 velecsum = _mm256_add_ps(velecsum,velec);
1050 fscal = _mm256_and_ps(fscal,cutoff_mask);
1052 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1054 /* Calculate temporary vectorial force */
1055 tx = _mm256_mul_ps(fscal,dx02);
1056 ty = _mm256_mul_ps(fscal,dy02);
1057 tz = _mm256_mul_ps(fscal,dz02);
1059 /* Update vectorial force */
1060 fix0 = _mm256_add_ps(fix0,tx);
1061 fiy0 = _mm256_add_ps(fiy0,ty);
1062 fiz0 = _mm256_add_ps(fiz0,tz);
1064 fjx2 = _mm256_add_ps(fjx2,tx);
1065 fjy2 = _mm256_add_ps(fjy2,ty);
1066 fjz2 = _mm256_add_ps(fjz2,tz);
1070 /**************************
1071 * CALCULATE INTERACTIONS *
1072 **************************/
1074 if (gmx_mm256_any_lt(rsq10,rcutoff2))
1077 r10 = _mm256_mul_ps(rsq10,rinv10);
1078 r10 = _mm256_andnot_ps(dummy_mask,r10);
1080 /* EWALD ELECTROSTATICS */
1082 /* Analytical PME correction */
1083 zeta2 = _mm256_mul_ps(beta2,rsq10);
1084 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
1085 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1086 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1087 felec = _mm256_mul_ps(qq10,felec);
1088 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1089 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1090 velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
1091 velec = _mm256_mul_ps(qq10,velec);
1093 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1095 /* Update potential sum for this i atom from the interaction with this j atom. */
1096 velec = _mm256_and_ps(velec,cutoff_mask);
1097 velec = _mm256_andnot_ps(dummy_mask,velec);
1098 velecsum = _mm256_add_ps(velecsum,velec);
1102 fscal = _mm256_and_ps(fscal,cutoff_mask);
1104 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1106 /* Calculate temporary vectorial force */
1107 tx = _mm256_mul_ps(fscal,dx10);
1108 ty = _mm256_mul_ps(fscal,dy10);
1109 tz = _mm256_mul_ps(fscal,dz10);
1111 /* Update vectorial force */
1112 fix1 = _mm256_add_ps(fix1,tx);
1113 fiy1 = _mm256_add_ps(fiy1,ty);
1114 fiz1 = _mm256_add_ps(fiz1,tz);
1116 fjx0 = _mm256_add_ps(fjx0,tx);
1117 fjy0 = _mm256_add_ps(fjy0,ty);
1118 fjz0 = _mm256_add_ps(fjz0,tz);
1122 /**************************
1123 * CALCULATE INTERACTIONS *
1124 **************************/
1126 if (gmx_mm256_any_lt(rsq11,rcutoff2))
1129 r11 = _mm256_mul_ps(rsq11,rinv11);
1130 r11 = _mm256_andnot_ps(dummy_mask,r11);
1132 /* EWALD ELECTROSTATICS */
1134 /* Analytical PME correction */
1135 zeta2 = _mm256_mul_ps(beta2,rsq11);
1136 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
1137 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1138 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1139 felec = _mm256_mul_ps(qq11,felec);
1140 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1141 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1142 velec = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
1143 velec = _mm256_mul_ps(qq11,velec);
1145 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1147 /* Update potential sum for this i atom from the interaction with this j atom. */
1148 velec = _mm256_and_ps(velec,cutoff_mask);
1149 velec = _mm256_andnot_ps(dummy_mask,velec);
1150 velecsum = _mm256_add_ps(velecsum,velec);
1154 fscal = _mm256_and_ps(fscal,cutoff_mask);
1156 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1158 /* Calculate temporary vectorial force */
1159 tx = _mm256_mul_ps(fscal,dx11);
1160 ty = _mm256_mul_ps(fscal,dy11);
1161 tz = _mm256_mul_ps(fscal,dz11);
1163 /* Update vectorial force */
1164 fix1 = _mm256_add_ps(fix1,tx);
1165 fiy1 = _mm256_add_ps(fiy1,ty);
1166 fiz1 = _mm256_add_ps(fiz1,tz);
1168 fjx1 = _mm256_add_ps(fjx1,tx);
1169 fjy1 = _mm256_add_ps(fjy1,ty);
1170 fjz1 = _mm256_add_ps(fjz1,tz);
1174 /**************************
1175 * CALCULATE INTERACTIONS *
1176 **************************/
1178 if (gmx_mm256_any_lt(rsq12,rcutoff2))
1181 r12 = _mm256_mul_ps(rsq12,rinv12);
1182 r12 = _mm256_andnot_ps(dummy_mask,r12);
1184 /* EWALD ELECTROSTATICS */
1186 /* Analytical PME correction */
1187 zeta2 = _mm256_mul_ps(beta2,rsq12);
1188 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
1189 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1190 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1191 felec = _mm256_mul_ps(qq12,felec);
1192 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1193 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1194 velec = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
1195 velec = _mm256_mul_ps(qq12,velec);
1197 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1199 /* Update potential sum for this i atom from the interaction with this j atom. */
1200 velec = _mm256_and_ps(velec,cutoff_mask);
1201 velec = _mm256_andnot_ps(dummy_mask,velec);
1202 velecsum = _mm256_add_ps(velecsum,velec);
1206 fscal = _mm256_and_ps(fscal,cutoff_mask);
1208 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1210 /* Calculate temporary vectorial force */
1211 tx = _mm256_mul_ps(fscal,dx12);
1212 ty = _mm256_mul_ps(fscal,dy12);
1213 tz = _mm256_mul_ps(fscal,dz12);
1215 /* Update vectorial force */
1216 fix1 = _mm256_add_ps(fix1,tx);
1217 fiy1 = _mm256_add_ps(fiy1,ty);
1218 fiz1 = _mm256_add_ps(fiz1,tz);
1220 fjx2 = _mm256_add_ps(fjx2,tx);
1221 fjy2 = _mm256_add_ps(fjy2,ty);
1222 fjz2 = _mm256_add_ps(fjz2,tz);
1226 /**************************
1227 * CALCULATE INTERACTIONS *
1228 **************************/
1230 if (gmx_mm256_any_lt(rsq20,rcutoff2))
1233 r20 = _mm256_mul_ps(rsq20,rinv20);
1234 r20 = _mm256_andnot_ps(dummy_mask,r20);
1236 /* EWALD ELECTROSTATICS */
1238 /* Analytical PME correction */
1239 zeta2 = _mm256_mul_ps(beta2,rsq20);
1240 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
1241 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1242 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1243 felec = _mm256_mul_ps(qq20,felec);
1244 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1245 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1246 velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
1247 velec = _mm256_mul_ps(qq20,velec);
1249 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1251 /* Update potential sum for this i atom from the interaction with this j atom. */
1252 velec = _mm256_and_ps(velec,cutoff_mask);
1253 velec = _mm256_andnot_ps(dummy_mask,velec);
1254 velecsum = _mm256_add_ps(velecsum,velec);
1258 fscal = _mm256_and_ps(fscal,cutoff_mask);
1260 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1262 /* Calculate temporary vectorial force */
1263 tx = _mm256_mul_ps(fscal,dx20);
1264 ty = _mm256_mul_ps(fscal,dy20);
1265 tz = _mm256_mul_ps(fscal,dz20);
1267 /* Update vectorial force */
1268 fix2 = _mm256_add_ps(fix2,tx);
1269 fiy2 = _mm256_add_ps(fiy2,ty);
1270 fiz2 = _mm256_add_ps(fiz2,tz);
1272 fjx0 = _mm256_add_ps(fjx0,tx);
1273 fjy0 = _mm256_add_ps(fjy0,ty);
1274 fjz0 = _mm256_add_ps(fjz0,tz);
1278 /**************************
1279 * CALCULATE INTERACTIONS *
1280 **************************/
1282 if (gmx_mm256_any_lt(rsq21,rcutoff2))
1285 r21 = _mm256_mul_ps(rsq21,rinv21);
1286 r21 = _mm256_andnot_ps(dummy_mask,r21);
1288 /* EWALD ELECTROSTATICS */
1290 /* Analytical PME correction */
1291 zeta2 = _mm256_mul_ps(beta2,rsq21);
1292 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
1293 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1294 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1295 felec = _mm256_mul_ps(qq21,felec);
1296 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1297 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1298 velec = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
1299 velec = _mm256_mul_ps(qq21,velec);
1301 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1303 /* Update potential sum for this i atom from the interaction with this j atom. */
1304 velec = _mm256_and_ps(velec,cutoff_mask);
1305 velec = _mm256_andnot_ps(dummy_mask,velec);
1306 velecsum = _mm256_add_ps(velecsum,velec);
1310 fscal = _mm256_and_ps(fscal,cutoff_mask);
1312 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1314 /* Calculate temporary vectorial force */
1315 tx = _mm256_mul_ps(fscal,dx21);
1316 ty = _mm256_mul_ps(fscal,dy21);
1317 tz = _mm256_mul_ps(fscal,dz21);
1319 /* Update vectorial force */
1320 fix2 = _mm256_add_ps(fix2,tx);
1321 fiy2 = _mm256_add_ps(fiy2,ty);
1322 fiz2 = _mm256_add_ps(fiz2,tz);
1324 fjx1 = _mm256_add_ps(fjx1,tx);
1325 fjy1 = _mm256_add_ps(fjy1,ty);
1326 fjz1 = _mm256_add_ps(fjz1,tz);
1330 /**************************
1331 * CALCULATE INTERACTIONS *
1332 **************************/
1334 if (gmx_mm256_any_lt(rsq22,rcutoff2))
1337 r22 = _mm256_mul_ps(rsq22,rinv22);
1338 r22 = _mm256_andnot_ps(dummy_mask,r22);
1340 /* EWALD ELECTROSTATICS */
1342 /* Analytical PME correction */
1343 zeta2 = _mm256_mul_ps(beta2,rsq22);
1344 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
1345 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1346 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1347 felec = _mm256_mul_ps(qq22,felec);
1348 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1349 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1350 velec = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
1351 velec = _mm256_mul_ps(qq22,velec);
1353 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1355 /* Update potential sum for this i atom from the interaction with this j atom. */
1356 velec = _mm256_and_ps(velec,cutoff_mask);
1357 velec = _mm256_andnot_ps(dummy_mask,velec);
1358 velecsum = _mm256_add_ps(velecsum,velec);
1362 fscal = _mm256_and_ps(fscal,cutoff_mask);
1364 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1366 /* Calculate temporary vectorial force */
1367 tx = _mm256_mul_ps(fscal,dx22);
1368 ty = _mm256_mul_ps(fscal,dy22);
1369 tz = _mm256_mul_ps(fscal,dz22);
1371 /* Update vectorial force */
1372 fix2 = _mm256_add_ps(fix2,tx);
1373 fiy2 = _mm256_add_ps(fiy2,ty);
1374 fiz2 = _mm256_add_ps(fiz2,tz);
1376 fjx2 = _mm256_add_ps(fjx2,tx);
1377 fjy2 = _mm256_add_ps(fjy2,ty);
1378 fjz2 = _mm256_add_ps(fjz2,tz);
1382 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1383 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1384 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1385 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1386 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1387 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1388 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1389 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1391 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1392 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1394 /* Inner loop uses 1008 flops */
1397 /* End of innermost loop */
1399 gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1400 f+i_coord_offset,fshift+i_shift_offset);
1403 /* Update potential energies */
1404 gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1405 gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1407 /* Increment number of inner iterations */
1408 inneriter += j_index_end - j_index_start;
1410 /* Outer loop uses 20 flops */
1413 /* Increment number of outer iterations */
1416 /* Update outer/inner flops */
1418 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*1008);
1421 * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_F_avx_256_single
1422 * Electrostatics interaction: Ewald
1423 * VdW interaction: LennardJones
1424 * Geometry: Water3-Water3
1425 * Calculate force/pot: Force
1428 nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_F_avx_256_single
1429 (t_nblist * gmx_restrict nlist,
1430 rvec * gmx_restrict xx,
1431 rvec * gmx_restrict ff,
1432 t_forcerec * gmx_restrict fr,
1433 t_mdatoms * gmx_restrict mdatoms,
1434 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1435 t_nrnb * gmx_restrict nrnb)
1437 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
1438 * just 0 for non-waters.
1439 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1440 * jnr indices corresponding to data put in the four positions in the SIMD register.
1442 int i_shift_offset,i_coord_offset,outeriter,inneriter;
1443 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1444 int jnrA,jnrB,jnrC,jnrD;
1445 int jnrE,jnrF,jnrG,jnrH;
1446 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1447 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1448 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1449 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1450 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
1451 real rcutoff_scalar;
1452 real *shiftvec,*fshift,*x,*f;
1453 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1454 real scratch[4*DIM];
1455 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1456 real * vdwioffsetptr0;
1457 __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1458 real * vdwioffsetptr1;
1459 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1460 real * vdwioffsetptr2;
1461 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1462 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1463 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1464 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1465 __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1466 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1467 __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1468 __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1469 __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1470 __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1471 __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1472 __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1473 __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1474 __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1475 __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1476 __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1477 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
1480 __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1483 __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
1484 __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
1486 __m128i ewitab_lo,ewitab_hi;
1487 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1488 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1490 __m256 dummy_mask,cutoff_mask;
1491 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1492 __m256 one = _mm256_set1_ps(1.0);
1493 __m256 two = _mm256_set1_ps(2.0);
1499 jindex = nlist->jindex;
1501 shiftidx = nlist->shift;
1503 shiftvec = fr->shift_vec[0];
1504 fshift = fr->fshift[0];
1505 facel = _mm256_set1_ps(fr->epsfac);
1506 charge = mdatoms->chargeA;
1507 nvdwtype = fr->ntype;
1508 vdwparam = fr->nbfp;
1509 vdwtype = mdatoms->typeA;
1511 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
1512 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1513 beta2 = _mm256_mul_ps(beta,beta);
1514 beta3 = _mm256_mul_ps(beta,beta2);
1516 ewtab = fr->ic->tabq_coul_F;
1517 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
1518 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1520 /* Setup water-specific parameters */
1521 inr = nlist->iinr[0];
1522 iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
1523 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1524 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1525 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
1527 jq0 = _mm256_set1_ps(charge[inr+0]);
1528 jq1 = _mm256_set1_ps(charge[inr+1]);
1529 jq2 = _mm256_set1_ps(charge[inr+2]);
1530 vdwjidx0A = 2*vdwtype[inr+0];
1531 qq00 = _mm256_mul_ps(iq0,jq0);
1532 c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1533 c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1534 qq01 = _mm256_mul_ps(iq0,jq1);
1535 qq02 = _mm256_mul_ps(iq0,jq2);
1536 qq10 = _mm256_mul_ps(iq1,jq0);
1537 qq11 = _mm256_mul_ps(iq1,jq1);
1538 qq12 = _mm256_mul_ps(iq1,jq2);
1539 qq20 = _mm256_mul_ps(iq2,jq0);
1540 qq21 = _mm256_mul_ps(iq2,jq1);
1541 qq22 = _mm256_mul_ps(iq2,jq2);
1543 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1544 rcutoff_scalar = fr->rcoulomb;
1545 rcutoff = _mm256_set1_ps(rcutoff_scalar);
1546 rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
1548 sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
1549 rvdw = _mm256_set1_ps(fr->rvdw);
1551 /* Avoid stupid compiler warnings */
1552 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1553 j_coord_offsetA = 0;
1554 j_coord_offsetB = 0;
1555 j_coord_offsetC = 0;
1556 j_coord_offsetD = 0;
1557 j_coord_offsetE = 0;
1558 j_coord_offsetF = 0;
1559 j_coord_offsetG = 0;
1560 j_coord_offsetH = 0;
1565 for(iidx=0;iidx<4*DIM;iidx++)
1567 scratch[iidx] = 0.0;
1570 /* Start outer loop over neighborlists */
1571 for(iidx=0; iidx<nri; iidx++)
1573 /* Load shift vector for this list */
1574 i_shift_offset = DIM*shiftidx[iidx];
1576 /* Load limits for loop over neighbors */
1577 j_index_start = jindex[iidx];
1578 j_index_end = jindex[iidx+1];
1580 /* Get outer coordinate index */
1582 i_coord_offset = DIM*inr;
1584 /* Load i particle coords and add shift vector */
1585 gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1586 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1588 fix0 = _mm256_setzero_ps();
1589 fiy0 = _mm256_setzero_ps();
1590 fiz0 = _mm256_setzero_ps();
1591 fix1 = _mm256_setzero_ps();
1592 fiy1 = _mm256_setzero_ps();
1593 fiz1 = _mm256_setzero_ps();
1594 fix2 = _mm256_setzero_ps();
1595 fiy2 = _mm256_setzero_ps();
1596 fiz2 = _mm256_setzero_ps();
1598 /* Start inner kernel loop */
1599 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1602 /* Get j neighbor index, and coordinate index */
1604 jnrB = jjnr[jidx+1];
1605 jnrC = jjnr[jidx+2];
1606 jnrD = jjnr[jidx+3];
1607 jnrE = jjnr[jidx+4];
1608 jnrF = jjnr[jidx+5];
1609 jnrG = jjnr[jidx+6];
1610 jnrH = jjnr[jidx+7];
1611 j_coord_offsetA = DIM*jnrA;
1612 j_coord_offsetB = DIM*jnrB;
1613 j_coord_offsetC = DIM*jnrC;
1614 j_coord_offsetD = DIM*jnrD;
1615 j_coord_offsetE = DIM*jnrE;
1616 j_coord_offsetF = DIM*jnrF;
1617 j_coord_offsetG = DIM*jnrG;
1618 j_coord_offsetH = DIM*jnrH;
1620 /* load j atom coordinates */
1621 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1622 x+j_coord_offsetC,x+j_coord_offsetD,
1623 x+j_coord_offsetE,x+j_coord_offsetF,
1624 x+j_coord_offsetG,x+j_coord_offsetH,
1625 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1627 /* Calculate displacement vector */
1628 dx00 = _mm256_sub_ps(ix0,jx0);
1629 dy00 = _mm256_sub_ps(iy0,jy0);
1630 dz00 = _mm256_sub_ps(iz0,jz0);
1631 dx01 = _mm256_sub_ps(ix0,jx1);
1632 dy01 = _mm256_sub_ps(iy0,jy1);
1633 dz01 = _mm256_sub_ps(iz0,jz1);
1634 dx02 = _mm256_sub_ps(ix0,jx2);
1635 dy02 = _mm256_sub_ps(iy0,jy2);
1636 dz02 = _mm256_sub_ps(iz0,jz2);
1637 dx10 = _mm256_sub_ps(ix1,jx0);
1638 dy10 = _mm256_sub_ps(iy1,jy0);
1639 dz10 = _mm256_sub_ps(iz1,jz0);
1640 dx11 = _mm256_sub_ps(ix1,jx1);
1641 dy11 = _mm256_sub_ps(iy1,jy1);
1642 dz11 = _mm256_sub_ps(iz1,jz1);
1643 dx12 = _mm256_sub_ps(ix1,jx2);
1644 dy12 = _mm256_sub_ps(iy1,jy2);
1645 dz12 = _mm256_sub_ps(iz1,jz2);
1646 dx20 = _mm256_sub_ps(ix2,jx0);
1647 dy20 = _mm256_sub_ps(iy2,jy0);
1648 dz20 = _mm256_sub_ps(iz2,jz0);
1649 dx21 = _mm256_sub_ps(ix2,jx1);
1650 dy21 = _mm256_sub_ps(iy2,jy1);
1651 dz21 = _mm256_sub_ps(iz2,jz1);
1652 dx22 = _mm256_sub_ps(ix2,jx2);
1653 dy22 = _mm256_sub_ps(iy2,jy2);
1654 dz22 = _mm256_sub_ps(iz2,jz2);
1656 /* Calculate squared distance and things based on it */
1657 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1658 rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1659 rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1660 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1661 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1662 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1663 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1664 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1665 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1667 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
1668 rinv01 = gmx_mm256_invsqrt_ps(rsq01);
1669 rinv02 = gmx_mm256_invsqrt_ps(rsq02);
1670 rinv10 = gmx_mm256_invsqrt_ps(rsq10);
1671 rinv11 = gmx_mm256_invsqrt_ps(rsq11);
1672 rinv12 = gmx_mm256_invsqrt_ps(rsq12);
1673 rinv20 = gmx_mm256_invsqrt_ps(rsq20);
1674 rinv21 = gmx_mm256_invsqrt_ps(rsq21);
1675 rinv22 = gmx_mm256_invsqrt_ps(rsq22);
1677 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
1678 rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
1679 rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
1680 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
1681 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
1682 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
1683 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
1684 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
1685 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
1687 fjx0 = _mm256_setzero_ps();
1688 fjy0 = _mm256_setzero_ps();
1689 fjz0 = _mm256_setzero_ps();
1690 fjx1 = _mm256_setzero_ps();
1691 fjy1 = _mm256_setzero_ps();
1692 fjz1 = _mm256_setzero_ps();
1693 fjx2 = _mm256_setzero_ps();
1694 fjy2 = _mm256_setzero_ps();
1695 fjz2 = _mm256_setzero_ps();
1697 /**************************
1698 * CALCULATE INTERACTIONS *
1699 **************************/
1701 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1704 r00 = _mm256_mul_ps(rsq00,rinv00);
1706 /* EWALD ELECTROSTATICS */
1708 /* Analytical PME correction */
1709 zeta2 = _mm256_mul_ps(beta2,rsq00);
1710 rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
1711 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1712 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1713 felec = _mm256_mul_ps(qq00,felec);
1715 /* LENNARD-JONES DISPERSION/REPULSION */
1717 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1718 fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
1720 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1722 fscal = _mm256_add_ps(felec,fvdw);
1724 fscal = _mm256_and_ps(fscal,cutoff_mask);
1726 /* Calculate temporary vectorial force */
1727 tx = _mm256_mul_ps(fscal,dx00);
1728 ty = _mm256_mul_ps(fscal,dy00);
1729 tz = _mm256_mul_ps(fscal,dz00);
1731 /* Update vectorial force */
1732 fix0 = _mm256_add_ps(fix0,tx);
1733 fiy0 = _mm256_add_ps(fiy0,ty);
1734 fiz0 = _mm256_add_ps(fiz0,tz);
1736 fjx0 = _mm256_add_ps(fjx0,tx);
1737 fjy0 = _mm256_add_ps(fjy0,ty);
1738 fjz0 = _mm256_add_ps(fjz0,tz);
1742 /**************************
1743 * CALCULATE INTERACTIONS *
1744 **************************/
1746 if (gmx_mm256_any_lt(rsq01,rcutoff2))
1749 r01 = _mm256_mul_ps(rsq01,rinv01);
1751 /* EWALD ELECTROSTATICS */
1753 /* Analytical PME correction */
1754 zeta2 = _mm256_mul_ps(beta2,rsq01);
1755 rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
1756 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1757 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1758 felec = _mm256_mul_ps(qq01,felec);
1760 cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
1764 fscal = _mm256_and_ps(fscal,cutoff_mask);
1766 /* Calculate temporary vectorial force */
1767 tx = _mm256_mul_ps(fscal,dx01);
1768 ty = _mm256_mul_ps(fscal,dy01);
1769 tz = _mm256_mul_ps(fscal,dz01);
1771 /* Update vectorial force */
1772 fix0 = _mm256_add_ps(fix0,tx);
1773 fiy0 = _mm256_add_ps(fiy0,ty);
1774 fiz0 = _mm256_add_ps(fiz0,tz);
1776 fjx1 = _mm256_add_ps(fjx1,tx);
1777 fjy1 = _mm256_add_ps(fjy1,ty);
1778 fjz1 = _mm256_add_ps(fjz1,tz);
1782 /**************************
1783 * CALCULATE INTERACTIONS *
1784 **************************/
1786 if (gmx_mm256_any_lt(rsq02,rcutoff2))
1789 r02 = _mm256_mul_ps(rsq02,rinv02);
1791 /* EWALD ELECTROSTATICS */
1793 /* Analytical PME correction */
1794 zeta2 = _mm256_mul_ps(beta2,rsq02);
1795 rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
1796 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1797 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1798 felec = _mm256_mul_ps(qq02,felec);
1800 cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
1804 fscal = _mm256_and_ps(fscal,cutoff_mask);
1806 /* Calculate temporary vectorial force */
1807 tx = _mm256_mul_ps(fscal,dx02);
1808 ty = _mm256_mul_ps(fscal,dy02);
1809 tz = _mm256_mul_ps(fscal,dz02);
1811 /* Update vectorial force */
1812 fix0 = _mm256_add_ps(fix0,tx);
1813 fiy0 = _mm256_add_ps(fiy0,ty);
1814 fiz0 = _mm256_add_ps(fiz0,tz);
1816 fjx2 = _mm256_add_ps(fjx2,tx);
1817 fjy2 = _mm256_add_ps(fjy2,ty);
1818 fjz2 = _mm256_add_ps(fjz2,tz);
1822 /**************************
1823 * CALCULATE INTERACTIONS *
1824 **************************/
1826 if (gmx_mm256_any_lt(rsq10,rcutoff2))
1829 r10 = _mm256_mul_ps(rsq10,rinv10);
1831 /* EWALD ELECTROSTATICS */
1833 /* Analytical PME correction */
1834 zeta2 = _mm256_mul_ps(beta2,rsq10);
1835 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
1836 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1837 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1838 felec = _mm256_mul_ps(qq10,felec);
1840 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1844 fscal = _mm256_and_ps(fscal,cutoff_mask);
1846 /* Calculate temporary vectorial force */
1847 tx = _mm256_mul_ps(fscal,dx10);
1848 ty = _mm256_mul_ps(fscal,dy10);
1849 tz = _mm256_mul_ps(fscal,dz10);
1851 /* Update vectorial force */
1852 fix1 = _mm256_add_ps(fix1,tx);
1853 fiy1 = _mm256_add_ps(fiy1,ty);
1854 fiz1 = _mm256_add_ps(fiz1,tz);
1856 fjx0 = _mm256_add_ps(fjx0,tx);
1857 fjy0 = _mm256_add_ps(fjy0,ty);
1858 fjz0 = _mm256_add_ps(fjz0,tz);
1862 /**************************
1863 * CALCULATE INTERACTIONS *
1864 **************************/
1866 if (gmx_mm256_any_lt(rsq11,rcutoff2))
1869 r11 = _mm256_mul_ps(rsq11,rinv11);
1871 /* EWALD ELECTROSTATICS */
1873 /* Analytical PME correction */
1874 zeta2 = _mm256_mul_ps(beta2,rsq11);
1875 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
1876 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1877 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1878 felec = _mm256_mul_ps(qq11,felec);
1880 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1884 fscal = _mm256_and_ps(fscal,cutoff_mask);
1886 /* Calculate temporary vectorial force */
1887 tx = _mm256_mul_ps(fscal,dx11);
1888 ty = _mm256_mul_ps(fscal,dy11);
1889 tz = _mm256_mul_ps(fscal,dz11);
1891 /* Update vectorial force */
1892 fix1 = _mm256_add_ps(fix1,tx);
1893 fiy1 = _mm256_add_ps(fiy1,ty);
1894 fiz1 = _mm256_add_ps(fiz1,tz);
1896 fjx1 = _mm256_add_ps(fjx1,tx);
1897 fjy1 = _mm256_add_ps(fjy1,ty);
1898 fjz1 = _mm256_add_ps(fjz1,tz);
1902 /**************************
1903 * CALCULATE INTERACTIONS *
1904 **************************/
1906 if (gmx_mm256_any_lt(rsq12,rcutoff2))
1909 r12 = _mm256_mul_ps(rsq12,rinv12);
1911 /* EWALD ELECTROSTATICS */
1913 /* Analytical PME correction */
1914 zeta2 = _mm256_mul_ps(beta2,rsq12);
1915 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
1916 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1917 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1918 felec = _mm256_mul_ps(qq12,felec);
1920 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1924 fscal = _mm256_and_ps(fscal,cutoff_mask);
1926 /* Calculate temporary vectorial force */
1927 tx = _mm256_mul_ps(fscal,dx12);
1928 ty = _mm256_mul_ps(fscal,dy12);
1929 tz = _mm256_mul_ps(fscal,dz12);
1931 /* Update vectorial force */
1932 fix1 = _mm256_add_ps(fix1,tx);
1933 fiy1 = _mm256_add_ps(fiy1,ty);
1934 fiz1 = _mm256_add_ps(fiz1,tz);
1936 fjx2 = _mm256_add_ps(fjx2,tx);
1937 fjy2 = _mm256_add_ps(fjy2,ty);
1938 fjz2 = _mm256_add_ps(fjz2,tz);
1942 /**************************
1943 * CALCULATE INTERACTIONS *
1944 **************************/
1946 if (gmx_mm256_any_lt(rsq20,rcutoff2))
1949 r20 = _mm256_mul_ps(rsq20,rinv20);
1951 /* EWALD ELECTROSTATICS */
1953 /* Analytical PME correction */
1954 zeta2 = _mm256_mul_ps(beta2,rsq20);
1955 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
1956 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1957 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1958 felec = _mm256_mul_ps(qq20,felec);
1960 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1964 fscal = _mm256_and_ps(fscal,cutoff_mask);
1966 /* Calculate temporary vectorial force */
1967 tx = _mm256_mul_ps(fscal,dx20);
1968 ty = _mm256_mul_ps(fscal,dy20);
1969 tz = _mm256_mul_ps(fscal,dz20);
1971 /* Update vectorial force */
1972 fix2 = _mm256_add_ps(fix2,tx);
1973 fiy2 = _mm256_add_ps(fiy2,ty);
1974 fiz2 = _mm256_add_ps(fiz2,tz);
1976 fjx0 = _mm256_add_ps(fjx0,tx);
1977 fjy0 = _mm256_add_ps(fjy0,ty);
1978 fjz0 = _mm256_add_ps(fjz0,tz);
1982 /**************************
1983 * CALCULATE INTERACTIONS *
1984 **************************/
1986 if (gmx_mm256_any_lt(rsq21,rcutoff2))
1989 r21 = _mm256_mul_ps(rsq21,rinv21);
1991 /* EWALD ELECTROSTATICS */
1993 /* Analytical PME correction */
1994 zeta2 = _mm256_mul_ps(beta2,rsq21);
1995 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
1996 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1997 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1998 felec = _mm256_mul_ps(qq21,felec);
2000 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2004 fscal = _mm256_and_ps(fscal,cutoff_mask);
2006 /* Calculate temporary vectorial force */
2007 tx = _mm256_mul_ps(fscal,dx21);
2008 ty = _mm256_mul_ps(fscal,dy21);
2009 tz = _mm256_mul_ps(fscal,dz21);
2011 /* Update vectorial force */
2012 fix2 = _mm256_add_ps(fix2,tx);
2013 fiy2 = _mm256_add_ps(fiy2,ty);
2014 fiz2 = _mm256_add_ps(fiz2,tz);
2016 fjx1 = _mm256_add_ps(fjx1,tx);
2017 fjy1 = _mm256_add_ps(fjy1,ty);
2018 fjz1 = _mm256_add_ps(fjz1,tz);
2022 /**************************
2023 * CALCULATE INTERACTIONS *
2024 **************************/
2026 if (gmx_mm256_any_lt(rsq22,rcutoff2))
2029 r22 = _mm256_mul_ps(rsq22,rinv22);
2031 /* EWALD ELECTROSTATICS */
2033 /* Analytical PME correction */
2034 zeta2 = _mm256_mul_ps(beta2,rsq22);
2035 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
2036 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2037 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2038 felec = _mm256_mul_ps(qq22,felec);
2040 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2044 fscal = _mm256_and_ps(fscal,cutoff_mask);
2046 /* Calculate temporary vectorial force */
2047 tx = _mm256_mul_ps(fscal,dx22);
2048 ty = _mm256_mul_ps(fscal,dy22);
2049 tz = _mm256_mul_ps(fscal,dz22);
2051 /* Update vectorial force */
2052 fix2 = _mm256_add_ps(fix2,tx);
2053 fiy2 = _mm256_add_ps(fiy2,ty);
2054 fiz2 = _mm256_add_ps(fiz2,tz);
2056 fjx2 = _mm256_add_ps(fjx2,tx);
2057 fjy2 = _mm256_add_ps(fjy2,ty);
2058 fjz2 = _mm256_add_ps(fjz2,tz);
2062 fjptrA = f+j_coord_offsetA;
2063 fjptrB = f+j_coord_offsetB;
2064 fjptrC = f+j_coord_offsetC;
2065 fjptrD = f+j_coord_offsetD;
2066 fjptrE = f+j_coord_offsetE;
2067 fjptrF = f+j_coord_offsetF;
2068 fjptrG = f+j_coord_offsetG;
2069 fjptrH = f+j_coord_offsetH;
2071 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2072 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2074 /* Inner loop uses 538 flops */
2077 if(jidx<j_index_end)
2080 /* Get j neighbor index, and coordinate index */
2081 jnrlistA = jjnr[jidx];
2082 jnrlistB = jjnr[jidx+1];
2083 jnrlistC = jjnr[jidx+2];
2084 jnrlistD = jjnr[jidx+3];
2085 jnrlistE = jjnr[jidx+4];
2086 jnrlistF = jjnr[jidx+5];
2087 jnrlistG = jjnr[jidx+6];
2088 jnrlistH = jjnr[jidx+7];
2089 /* Sign of each element will be negative for non-real atoms.
2090 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2091 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
2093 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
2094 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
2096 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
2097 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
2098 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
2099 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
2100 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
2101 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
2102 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
2103 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
2104 j_coord_offsetA = DIM*jnrA;
2105 j_coord_offsetB = DIM*jnrB;
2106 j_coord_offsetC = DIM*jnrC;
2107 j_coord_offsetD = DIM*jnrD;
2108 j_coord_offsetE = DIM*jnrE;
2109 j_coord_offsetF = DIM*jnrF;
2110 j_coord_offsetG = DIM*jnrG;
2111 j_coord_offsetH = DIM*jnrH;
2113 /* load j atom coordinates */
2114 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
2115 x+j_coord_offsetC,x+j_coord_offsetD,
2116 x+j_coord_offsetE,x+j_coord_offsetF,
2117 x+j_coord_offsetG,x+j_coord_offsetH,
2118 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
2120 /* Calculate displacement vector */
2121 dx00 = _mm256_sub_ps(ix0,jx0);
2122 dy00 = _mm256_sub_ps(iy0,jy0);
2123 dz00 = _mm256_sub_ps(iz0,jz0);
2124 dx01 = _mm256_sub_ps(ix0,jx1);
2125 dy01 = _mm256_sub_ps(iy0,jy1);
2126 dz01 = _mm256_sub_ps(iz0,jz1);
2127 dx02 = _mm256_sub_ps(ix0,jx2);
2128 dy02 = _mm256_sub_ps(iy0,jy2);
2129 dz02 = _mm256_sub_ps(iz0,jz2);
2130 dx10 = _mm256_sub_ps(ix1,jx0);
2131 dy10 = _mm256_sub_ps(iy1,jy0);
2132 dz10 = _mm256_sub_ps(iz1,jz0);
2133 dx11 = _mm256_sub_ps(ix1,jx1);
2134 dy11 = _mm256_sub_ps(iy1,jy1);
2135 dz11 = _mm256_sub_ps(iz1,jz1);
2136 dx12 = _mm256_sub_ps(ix1,jx2);
2137 dy12 = _mm256_sub_ps(iy1,jy2);
2138 dz12 = _mm256_sub_ps(iz1,jz2);
2139 dx20 = _mm256_sub_ps(ix2,jx0);
2140 dy20 = _mm256_sub_ps(iy2,jy0);
2141 dz20 = _mm256_sub_ps(iz2,jz0);
2142 dx21 = _mm256_sub_ps(ix2,jx1);
2143 dy21 = _mm256_sub_ps(iy2,jy1);
2144 dz21 = _mm256_sub_ps(iz2,jz1);
2145 dx22 = _mm256_sub_ps(ix2,jx2);
2146 dy22 = _mm256_sub_ps(iy2,jy2);
2147 dz22 = _mm256_sub_ps(iz2,jz2);
2149 /* Calculate squared distance and things based on it */
2150 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
2151 rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
2152 rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
2153 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
2154 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2155 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2156 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
2157 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2158 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2160 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
2161 rinv01 = gmx_mm256_invsqrt_ps(rsq01);
2162 rinv02 = gmx_mm256_invsqrt_ps(rsq02);
2163 rinv10 = gmx_mm256_invsqrt_ps(rsq10);
2164 rinv11 = gmx_mm256_invsqrt_ps(rsq11);
2165 rinv12 = gmx_mm256_invsqrt_ps(rsq12);
2166 rinv20 = gmx_mm256_invsqrt_ps(rsq20);
2167 rinv21 = gmx_mm256_invsqrt_ps(rsq21);
2168 rinv22 = gmx_mm256_invsqrt_ps(rsq22);
2170 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
2171 rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
2172 rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
2173 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
2174 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
2175 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
2176 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
2177 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
2178 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
2180 fjx0 = _mm256_setzero_ps();
2181 fjy0 = _mm256_setzero_ps();
2182 fjz0 = _mm256_setzero_ps();
2183 fjx1 = _mm256_setzero_ps();
2184 fjy1 = _mm256_setzero_ps();
2185 fjz1 = _mm256_setzero_ps();
2186 fjx2 = _mm256_setzero_ps();
2187 fjy2 = _mm256_setzero_ps();
2188 fjz2 = _mm256_setzero_ps();
2190 /**************************
2191 * CALCULATE INTERACTIONS *
2192 **************************/
2194 if (gmx_mm256_any_lt(rsq00,rcutoff2))
2197 r00 = _mm256_mul_ps(rsq00,rinv00);
2198 r00 = _mm256_andnot_ps(dummy_mask,r00);
2200 /* EWALD ELECTROSTATICS */
2202 /* Analytical PME correction */
2203 zeta2 = _mm256_mul_ps(beta2,rsq00);
2204 rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
2205 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2206 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2207 felec = _mm256_mul_ps(qq00,felec);
2209 /* LENNARD-JONES DISPERSION/REPULSION */
2211 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
2212 fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
2214 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
2216 fscal = _mm256_add_ps(felec,fvdw);
2218 fscal = _mm256_and_ps(fscal,cutoff_mask);
2220 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2222 /* Calculate temporary vectorial force */
2223 tx = _mm256_mul_ps(fscal,dx00);
2224 ty = _mm256_mul_ps(fscal,dy00);
2225 tz = _mm256_mul_ps(fscal,dz00);
2227 /* Update vectorial force */
2228 fix0 = _mm256_add_ps(fix0,tx);
2229 fiy0 = _mm256_add_ps(fiy0,ty);
2230 fiz0 = _mm256_add_ps(fiz0,tz);
2232 fjx0 = _mm256_add_ps(fjx0,tx);
2233 fjy0 = _mm256_add_ps(fjy0,ty);
2234 fjz0 = _mm256_add_ps(fjz0,tz);
2238 /**************************
2239 * CALCULATE INTERACTIONS *
2240 **************************/
2242 if (gmx_mm256_any_lt(rsq01,rcutoff2))
2245 r01 = _mm256_mul_ps(rsq01,rinv01);
2246 r01 = _mm256_andnot_ps(dummy_mask,r01);
2248 /* EWALD ELECTROSTATICS */
2250 /* Analytical PME correction */
2251 zeta2 = _mm256_mul_ps(beta2,rsq01);
2252 rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
2253 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2254 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2255 felec = _mm256_mul_ps(qq01,felec);
2257 cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
2261 fscal = _mm256_and_ps(fscal,cutoff_mask);
2263 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2265 /* Calculate temporary vectorial force */
2266 tx = _mm256_mul_ps(fscal,dx01);
2267 ty = _mm256_mul_ps(fscal,dy01);
2268 tz = _mm256_mul_ps(fscal,dz01);
2270 /* Update vectorial force */
2271 fix0 = _mm256_add_ps(fix0,tx);
2272 fiy0 = _mm256_add_ps(fiy0,ty);
2273 fiz0 = _mm256_add_ps(fiz0,tz);
2275 fjx1 = _mm256_add_ps(fjx1,tx);
2276 fjy1 = _mm256_add_ps(fjy1,ty);
2277 fjz1 = _mm256_add_ps(fjz1,tz);
2281 /**************************
2282 * CALCULATE INTERACTIONS *
2283 **************************/
2285 if (gmx_mm256_any_lt(rsq02,rcutoff2))
2288 r02 = _mm256_mul_ps(rsq02,rinv02);
2289 r02 = _mm256_andnot_ps(dummy_mask,r02);
2291 /* EWALD ELECTROSTATICS */
2293 /* Analytical PME correction */
2294 zeta2 = _mm256_mul_ps(beta2,rsq02);
2295 rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
2296 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2297 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2298 felec = _mm256_mul_ps(qq02,felec);
2300 cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
2304 fscal = _mm256_and_ps(fscal,cutoff_mask);
2306 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2308 /* Calculate temporary vectorial force */
2309 tx = _mm256_mul_ps(fscal,dx02);
2310 ty = _mm256_mul_ps(fscal,dy02);
2311 tz = _mm256_mul_ps(fscal,dz02);
2313 /* Update vectorial force */
2314 fix0 = _mm256_add_ps(fix0,tx);
2315 fiy0 = _mm256_add_ps(fiy0,ty);
2316 fiz0 = _mm256_add_ps(fiz0,tz);
2318 fjx2 = _mm256_add_ps(fjx2,tx);
2319 fjy2 = _mm256_add_ps(fjy2,ty);
2320 fjz2 = _mm256_add_ps(fjz2,tz);
2324 /**************************
2325 * CALCULATE INTERACTIONS *
2326 **************************/
2328 if (gmx_mm256_any_lt(rsq10,rcutoff2))
2331 r10 = _mm256_mul_ps(rsq10,rinv10);
2332 r10 = _mm256_andnot_ps(dummy_mask,r10);
2334 /* EWALD ELECTROSTATICS */
2336 /* Analytical PME correction */
2337 zeta2 = _mm256_mul_ps(beta2,rsq10);
2338 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
2339 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2340 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2341 felec = _mm256_mul_ps(qq10,felec);
2343 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
2347 fscal = _mm256_and_ps(fscal,cutoff_mask);
2349 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2351 /* Calculate temporary vectorial force */
2352 tx = _mm256_mul_ps(fscal,dx10);
2353 ty = _mm256_mul_ps(fscal,dy10);
2354 tz = _mm256_mul_ps(fscal,dz10);
2356 /* Update vectorial force */
2357 fix1 = _mm256_add_ps(fix1,tx);
2358 fiy1 = _mm256_add_ps(fiy1,ty);
2359 fiz1 = _mm256_add_ps(fiz1,tz);
2361 fjx0 = _mm256_add_ps(fjx0,tx);
2362 fjy0 = _mm256_add_ps(fjy0,ty);
2363 fjz0 = _mm256_add_ps(fjz0,tz);
2367 /**************************
2368 * CALCULATE INTERACTIONS *
2369 **************************/
2371 if (gmx_mm256_any_lt(rsq11,rcutoff2))
2374 r11 = _mm256_mul_ps(rsq11,rinv11);
2375 r11 = _mm256_andnot_ps(dummy_mask,r11);
2377 /* EWALD ELECTROSTATICS */
2379 /* Analytical PME correction */
2380 zeta2 = _mm256_mul_ps(beta2,rsq11);
2381 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
2382 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2383 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2384 felec = _mm256_mul_ps(qq11,felec);
2386 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2390 fscal = _mm256_and_ps(fscal,cutoff_mask);
2392 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2394 /* Calculate temporary vectorial force */
2395 tx = _mm256_mul_ps(fscal,dx11);
2396 ty = _mm256_mul_ps(fscal,dy11);
2397 tz = _mm256_mul_ps(fscal,dz11);
2399 /* Update vectorial force */
2400 fix1 = _mm256_add_ps(fix1,tx);
2401 fiy1 = _mm256_add_ps(fiy1,ty);
2402 fiz1 = _mm256_add_ps(fiz1,tz);
2404 fjx1 = _mm256_add_ps(fjx1,tx);
2405 fjy1 = _mm256_add_ps(fjy1,ty);
2406 fjz1 = _mm256_add_ps(fjz1,tz);
2410 /**************************
2411 * CALCULATE INTERACTIONS *
2412 **************************/
2414 if (gmx_mm256_any_lt(rsq12,rcutoff2))
2417 r12 = _mm256_mul_ps(rsq12,rinv12);
2418 r12 = _mm256_andnot_ps(dummy_mask,r12);
2420 /* EWALD ELECTROSTATICS */
2422 /* Analytical PME correction */
2423 zeta2 = _mm256_mul_ps(beta2,rsq12);
2424 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
2425 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2426 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2427 felec = _mm256_mul_ps(qq12,felec);
2429 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2433 fscal = _mm256_and_ps(fscal,cutoff_mask);
2435 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2437 /* Calculate temporary vectorial force */
2438 tx = _mm256_mul_ps(fscal,dx12);
2439 ty = _mm256_mul_ps(fscal,dy12);
2440 tz = _mm256_mul_ps(fscal,dz12);
2442 /* Update vectorial force */
2443 fix1 = _mm256_add_ps(fix1,tx);
2444 fiy1 = _mm256_add_ps(fiy1,ty);
2445 fiz1 = _mm256_add_ps(fiz1,tz);
2447 fjx2 = _mm256_add_ps(fjx2,tx);
2448 fjy2 = _mm256_add_ps(fjy2,ty);
2449 fjz2 = _mm256_add_ps(fjz2,tz);
2453 /**************************
2454 * CALCULATE INTERACTIONS *
2455 **************************/
2457 if (gmx_mm256_any_lt(rsq20,rcutoff2))
2460 r20 = _mm256_mul_ps(rsq20,rinv20);
2461 r20 = _mm256_andnot_ps(dummy_mask,r20);
2463 /* EWALD ELECTROSTATICS */
2465 /* Analytical PME correction */
2466 zeta2 = _mm256_mul_ps(beta2,rsq20);
2467 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
2468 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2469 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2470 felec = _mm256_mul_ps(qq20,felec);
2472 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
2476 fscal = _mm256_and_ps(fscal,cutoff_mask);
2478 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2480 /* Calculate temporary vectorial force */
2481 tx = _mm256_mul_ps(fscal,dx20);
2482 ty = _mm256_mul_ps(fscal,dy20);
2483 tz = _mm256_mul_ps(fscal,dz20);
2485 /* Update vectorial force */
2486 fix2 = _mm256_add_ps(fix2,tx);
2487 fiy2 = _mm256_add_ps(fiy2,ty);
2488 fiz2 = _mm256_add_ps(fiz2,tz);
2490 fjx0 = _mm256_add_ps(fjx0,tx);
2491 fjy0 = _mm256_add_ps(fjy0,ty);
2492 fjz0 = _mm256_add_ps(fjz0,tz);
2496 /**************************
2497 * CALCULATE INTERACTIONS *
2498 **************************/
2500 if (gmx_mm256_any_lt(rsq21,rcutoff2))
2503 r21 = _mm256_mul_ps(rsq21,rinv21);
2504 r21 = _mm256_andnot_ps(dummy_mask,r21);
2506 /* EWALD ELECTROSTATICS */
2508 /* Analytical PME correction */
2509 zeta2 = _mm256_mul_ps(beta2,rsq21);
2510 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
2511 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2512 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2513 felec = _mm256_mul_ps(qq21,felec);
2515 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2519 fscal = _mm256_and_ps(fscal,cutoff_mask);
2521 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2523 /* Calculate temporary vectorial force */
2524 tx = _mm256_mul_ps(fscal,dx21);
2525 ty = _mm256_mul_ps(fscal,dy21);
2526 tz = _mm256_mul_ps(fscal,dz21);
2528 /* Update vectorial force */
2529 fix2 = _mm256_add_ps(fix2,tx);
2530 fiy2 = _mm256_add_ps(fiy2,ty);
2531 fiz2 = _mm256_add_ps(fiz2,tz);
2533 fjx1 = _mm256_add_ps(fjx1,tx);
2534 fjy1 = _mm256_add_ps(fjy1,ty);
2535 fjz1 = _mm256_add_ps(fjz1,tz);
2539 /**************************
2540 * CALCULATE INTERACTIONS *
2541 **************************/
2543 if (gmx_mm256_any_lt(rsq22,rcutoff2))
2546 r22 = _mm256_mul_ps(rsq22,rinv22);
2547 r22 = _mm256_andnot_ps(dummy_mask,r22);
2549 /* EWALD ELECTROSTATICS */
2551 /* Analytical PME correction */
2552 zeta2 = _mm256_mul_ps(beta2,rsq22);
2553 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
2554 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2555 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2556 felec = _mm256_mul_ps(qq22,felec);
2558 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2562 fscal = _mm256_and_ps(fscal,cutoff_mask);
2564 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2566 /* Calculate temporary vectorial force */
2567 tx = _mm256_mul_ps(fscal,dx22);
2568 ty = _mm256_mul_ps(fscal,dy22);
2569 tz = _mm256_mul_ps(fscal,dz22);
2571 /* Update vectorial force */
2572 fix2 = _mm256_add_ps(fix2,tx);
2573 fiy2 = _mm256_add_ps(fiy2,ty);
2574 fiz2 = _mm256_add_ps(fiz2,tz);
2576 fjx2 = _mm256_add_ps(fjx2,tx);
2577 fjy2 = _mm256_add_ps(fjy2,ty);
2578 fjz2 = _mm256_add_ps(fjz2,tz);
2582 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2583 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2584 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2585 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2586 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2587 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2588 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2589 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2591 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2592 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2594 /* Inner loop uses 547 flops */
2597 /* End of innermost loop */
2599 gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
2600 f+i_coord_offset,fshift+i_shift_offset);
2602 /* Increment number of inner iterations */
2603 inneriter += j_index_end - j_index_start;
2605 /* Outer loop uses 18 flops */
2608 /* Increment number of outer iterations */
2611 /* Update outer/inner flops */
2613 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*547);