2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017,2018, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_single kernel generator.
44 #include "../nb_kernel.h"
45 #include "gromacs/gmxlib/nrnb.h"
47 #include "kernelutil_x86_avx_256_single.h"
50 * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_VF_avx_256_single
51 * Electrostatics interaction: Ewald
52 * VdW interaction: LennardJones
53 * Geometry: Water4-Water4
54 * Calculate force/pot: PotentialAndForce
57 nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_VF_avx_256_single
58 (t_nblist * gmx_restrict nlist,
59 rvec * gmx_restrict xx,
60 rvec * gmx_restrict ff,
61 struct t_forcerec * gmx_restrict fr,
62 t_mdatoms * gmx_restrict mdatoms,
63 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
64 t_nrnb * gmx_restrict nrnb)
66 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
67 * just 0 for non-waters.
68 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
69 * jnr indices corresponding to data put in the four positions in the SIMD register.
71 int i_shift_offset,i_coord_offset,outeriter,inneriter;
72 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
73 int jnrA,jnrB,jnrC,jnrD;
74 int jnrE,jnrF,jnrG,jnrH;
75 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
76 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
77 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
78 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
79 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
81 real *shiftvec,*fshift,*x,*f;
82 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
84 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
85 real * vdwioffsetptr0;
86 __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
87 real * vdwioffsetptr1;
88 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
89 real * vdwioffsetptr2;
90 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
91 real * vdwioffsetptr3;
92 __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
93 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
94 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
95 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
96 __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
97 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
98 __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
99 int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
100 __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
101 __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
102 __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
103 __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
104 __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
105 __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
106 __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
107 __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
108 __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
109 __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
110 __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
111 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
114 __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
117 __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
118 __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
120 __m128i ewitab_lo,ewitab_hi;
121 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
122 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
124 __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
125 real rswitch_scalar,d_scalar;
126 __m256 dummy_mask,cutoff_mask;
127 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
128 __m256 one = _mm256_set1_ps(1.0);
129 __m256 two = _mm256_set1_ps(2.0);
135 jindex = nlist->jindex;
137 shiftidx = nlist->shift;
139 shiftvec = fr->shift_vec[0];
140 fshift = fr->fshift[0];
141 facel = _mm256_set1_ps(fr->ic->epsfac);
142 charge = mdatoms->chargeA;
143 nvdwtype = fr->ntype;
145 vdwtype = mdatoms->typeA;
147 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
148 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
149 beta2 = _mm256_mul_ps(beta,beta);
150 beta3 = _mm256_mul_ps(beta,beta2);
152 ewtab = fr->ic->tabq_coul_FDV0;
153 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
154 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
156 /* Setup water-specific parameters */
157 inr = nlist->iinr[0];
158 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
159 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
160 iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
161 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
163 jq1 = _mm256_set1_ps(charge[inr+1]);
164 jq2 = _mm256_set1_ps(charge[inr+2]);
165 jq3 = _mm256_set1_ps(charge[inr+3]);
166 vdwjidx0A = 2*vdwtype[inr+0];
167 c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
168 c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
169 qq11 = _mm256_mul_ps(iq1,jq1);
170 qq12 = _mm256_mul_ps(iq1,jq2);
171 qq13 = _mm256_mul_ps(iq1,jq3);
172 qq21 = _mm256_mul_ps(iq2,jq1);
173 qq22 = _mm256_mul_ps(iq2,jq2);
174 qq23 = _mm256_mul_ps(iq2,jq3);
175 qq31 = _mm256_mul_ps(iq3,jq1);
176 qq32 = _mm256_mul_ps(iq3,jq2);
177 qq33 = _mm256_mul_ps(iq3,jq3);
179 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
180 rcutoff_scalar = fr->ic->rcoulomb;
181 rcutoff = _mm256_set1_ps(rcutoff_scalar);
182 rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
184 rswitch_scalar = fr->ic->rcoulomb_switch;
185 rswitch = _mm256_set1_ps(rswitch_scalar);
186 /* Setup switch parameters */
187 d_scalar = rcutoff_scalar-rswitch_scalar;
188 d = _mm256_set1_ps(d_scalar);
189 swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
190 swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
191 swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
192 swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
193 swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
194 swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
196 /* Avoid stupid compiler warnings */
197 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
210 for(iidx=0;iidx<4*DIM;iidx++)
215 /* Start outer loop over neighborlists */
216 for(iidx=0; iidx<nri; iidx++)
218 /* Load shift vector for this list */
219 i_shift_offset = DIM*shiftidx[iidx];
221 /* Load limits for loop over neighbors */
222 j_index_start = jindex[iidx];
223 j_index_end = jindex[iidx+1];
225 /* Get outer coordinate index */
227 i_coord_offset = DIM*inr;
229 /* Load i particle coords and add shift vector */
230 gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
231 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
233 fix0 = _mm256_setzero_ps();
234 fiy0 = _mm256_setzero_ps();
235 fiz0 = _mm256_setzero_ps();
236 fix1 = _mm256_setzero_ps();
237 fiy1 = _mm256_setzero_ps();
238 fiz1 = _mm256_setzero_ps();
239 fix2 = _mm256_setzero_ps();
240 fiy2 = _mm256_setzero_ps();
241 fiz2 = _mm256_setzero_ps();
242 fix3 = _mm256_setzero_ps();
243 fiy3 = _mm256_setzero_ps();
244 fiz3 = _mm256_setzero_ps();
246 /* Reset potential sums */
247 velecsum = _mm256_setzero_ps();
248 vvdwsum = _mm256_setzero_ps();
250 /* Start inner kernel loop */
251 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
254 /* Get j neighbor index, and coordinate index */
263 j_coord_offsetA = DIM*jnrA;
264 j_coord_offsetB = DIM*jnrB;
265 j_coord_offsetC = DIM*jnrC;
266 j_coord_offsetD = DIM*jnrD;
267 j_coord_offsetE = DIM*jnrE;
268 j_coord_offsetF = DIM*jnrF;
269 j_coord_offsetG = DIM*jnrG;
270 j_coord_offsetH = DIM*jnrH;
272 /* load j atom coordinates */
273 gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
274 x+j_coord_offsetC,x+j_coord_offsetD,
275 x+j_coord_offsetE,x+j_coord_offsetF,
276 x+j_coord_offsetG,x+j_coord_offsetH,
277 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
278 &jy2,&jz2,&jx3,&jy3,&jz3);
280 /* Calculate displacement vector */
281 dx00 = _mm256_sub_ps(ix0,jx0);
282 dy00 = _mm256_sub_ps(iy0,jy0);
283 dz00 = _mm256_sub_ps(iz0,jz0);
284 dx11 = _mm256_sub_ps(ix1,jx1);
285 dy11 = _mm256_sub_ps(iy1,jy1);
286 dz11 = _mm256_sub_ps(iz1,jz1);
287 dx12 = _mm256_sub_ps(ix1,jx2);
288 dy12 = _mm256_sub_ps(iy1,jy2);
289 dz12 = _mm256_sub_ps(iz1,jz2);
290 dx13 = _mm256_sub_ps(ix1,jx3);
291 dy13 = _mm256_sub_ps(iy1,jy3);
292 dz13 = _mm256_sub_ps(iz1,jz3);
293 dx21 = _mm256_sub_ps(ix2,jx1);
294 dy21 = _mm256_sub_ps(iy2,jy1);
295 dz21 = _mm256_sub_ps(iz2,jz1);
296 dx22 = _mm256_sub_ps(ix2,jx2);
297 dy22 = _mm256_sub_ps(iy2,jy2);
298 dz22 = _mm256_sub_ps(iz2,jz2);
299 dx23 = _mm256_sub_ps(ix2,jx3);
300 dy23 = _mm256_sub_ps(iy2,jy3);
301 dz23 = _mm256_sub_ps(iz2,jz3);
302 dx31 = _mm256_sub_ps(ix3,jx1);
303 dy31 = _mm256_sub_ps(iy3,jy1);
304 dz31 = _mm256_sub_ps(iz3,jz1);
305 dx32 = _mm256_sub_ps(ix3,jx2);
306 dy32 = _mm256_sub_ps(iy3,jy2);
307 dz32 = _mm256_sub_ps(iz3,jz2);
308 dx33 = _mm256_sub_ps(ix3,jx3);
309 dy33 = _mm256_sub_ps(iy3,jy3);
310 dz33 = _mm256_sub_ps(iz3,jz3);
312 /* Calculate squared distance and things based on it */
313 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
314 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
315 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
316 rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
317 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
318 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
319 rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
320 rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
321 rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
322 rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
324 rinv00 = avx256_invsqrt_f(rsq00);
325 rinv11 = avx256_invsqrt_f(rsq11);
326 rinv12 = avx256_invsqrt_f(rsq12);
327 rinv13 = avx256_invsqrt_f(rsq13);
328 rinv21 = avx256_invsqrt_f(rsq21);
329 rinv22 = avx256_invsqrt_f(rsq22);
330 rinv23 = avx256_invsqrt_f(rsq23);
331 rinv31 = avx256_invsqrt_f(rsq31);
332 rinv32 = avx256_invsqrt_f(rsq32);
333 rinv33 = avx256_invsqrt_f(rsq33);
335 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
336 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
337 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
338 rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
339 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
340 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
341 rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
342 rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
343 rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
344 rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
346 fjx0 = _mm256_setzero_ps();
347 fjy0 = _mm256_setzero_ps();
348 fjz0 = _mm256_setzero_ps();
349 fjx1 = _mm256_setzero_ps();
350 fjy1 = _mm256_setzero_ps();
351 fjz1 = _mm256_setzero_ps();
352 fjx2 = _mm256_setzero_ps();
353 fjy2 = _mm256_setzero_ps();
354 fjz2 = _mm256_setzero_ps();
355 fjx3 = _mm256_setzero_ps();
356 fjy3 = _mm256_setzero_ps();
357 fjz3 = _mm256_setzero_ps();
359 /**************************
360 * CALCULATE INTERACTIONS *
361 **************************/
363 if (gmx_mm256_any_lt(rsq00,rcutoff2))
366 r00 = _mm256_mul_ps(rsq00,rinv00);
368 /* LENNARD-JONES DISPERSION/REPULSION */
370 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
371 vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
372 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
373 vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
374 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
376 d = _mm256_sub_ps(r00,rswitch);
377 d = _mm256_max_ps(d,_mm256_setzero_ps());
378 d2 = _mm256_mul_ps(d,d);
379 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
381 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
383 /* Evaluate switch function */
384 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
385 fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
386 vvdw = _mm256_mul_ps(vvdw,sw);
387 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
389 /* Update potential sum for this i atom from the interaction with this j atom. */
390 vvdw = _mm256_and_ps(vvdw,cutoff_mask);
391 vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
395 fscal = _mm256_and_ps(fscal,cutoff_mask);
397 /* Calculate temporary vectorial force */
398 tx = _mm256_mul_ps(fscal,dx00);
399 ty = _mm256_mul_ps(fscal,dy00);
400 tz = _mm256_mul_ps(fscal,dz00);
402 /* Update vectorial force */
403 fix0 = _mm256_add_ps(fix0,tx);
404 fiy0 = _mm256_add_ps(fiy0,ty);
405 fiz0 = _mm256_add_ps(fiz0,tz);
407 fjx0 = _mm256_add_ps(fjx0,tx);
408 fjy0 = _mm256_add_ps(fjy0,ty);
409 fjz0 = _mm256_add_ps(fjz0,tz);
413 /**************************
414 * CALCULATE INTERACTIONS *
415 **************************/
417 if (gmx_mm256_any_lt(rsq11,rcutoff2))
420 r11 = _mm256_mul_ps(rsq11,rinv11);
422 /* EWALD ELECTROSTATICS */
424 /* Analytical PME correction */
425 zeta2 = _mm256_mul_ps(beta2,rsq11);
426 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
427 pmecorrF = avx256_pmecorrF_f(zeta2);
428 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
429 felec = _mm256_mul_ps(qq11,felec);
430 pmecorrV = avx256_pmecorrV_f(zeta2);
431 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
432 velec = _mm256_sub_ps(rinv11,pmecorrV);
433 velec = _mm256_mul_ps(qq11,velec);
435 d = _mm256_sub_ps(r11,rswitch);
436 d = _mm256_max_ps(d,_mm256_setzero_ps());
437 d2 = _mm256_mul_ps(d,d);
438 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
440 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
442 /* Evaluate switch function */
443 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
444 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
445 velec = _mm256_mul_ps(velec,sw);
446 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
448 /* Update potential sum for this i atom from the interaction with this j atom. */
449 velec = _mm256_and_ps(velec,cutoff_mask);
450 velecsum = _mm256_add_ps(velecsum,velec);
454 fscal = _mm256_and_ps(fscal,cutoff_mask);
456 /* Calculate temporary vectorial force */
457 tx = _mm256_mul_ps(fscal,dx11);
458 ty = _mm256_mul_ps(fscal,dy11);
459 tz = _mm256_mul_ps(fscal,dz11);
461 /* Update vectorial force */
462 fix1 = _mm256_add_ps(fix1,tx);
463 fiy1 = _mm256_add_ps(fiy1,ty);
464 fiz1 = _mm256_add_ps(fiz1,tz);
466 fjx1 = _mm256_add_ps(fjx1,tx);
467 fjy1 = _mm256_add_ps(fjy1,ty);
468 fjz1 = _mm256_add_ps(fjz1,tz);
472 /**************************
473 * CALCULATE INTERACTIONS *
474 **************************/
476 if (gmx_mm256_any_lt(rsq12,rcutoff2))
479 r12 = _mm256_mul_ps(rsq12,rinv12);
481 /* EWALD ELECTROSTATICS */
483 /* Analytical PME correction */
484 zeta2 = _mm256_mul_ps(beta2,rsq12);
485 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
486 pmecorrF = avx256_pmecorrF_f(zeta2);
487 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
488 felec = _mm256_mul_ps(qq12,felec);
489 pmecorrV = avx256_pmecorrV_f(zeta2);
490 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
491 velec = _mm256_sub_ps(rinv12,pmecorrV);
492 velec = _mm256_mul_ps(qq12,velec);
494 d = _mm256_sub_ps(r12,rswitch);
495 d = _mm256_max_ps(d,_mm256_setzero_ps());
496 d2 = _mm256_mul_ps(d,d);
497 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
499 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
501 /* Evaluate switch function */
502 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
503 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
504 velec = _mm256_mul_ps(velec,sw);
505 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
507 /* Update potential sum for this i atom from the interaction with this j atom. */
508 velec = _mm256_and_ps(velec,cutoff_mask);
509 velecsum = _mm256_add_ps(velecsum,velec);
513 fscal = _mm256_and_ps(fscal,cutoff_mask);
515 /* Calculate temporary vectorial force */
516 tx = _mm256_mul_ps(fscal,dx12);
517 ty = _mm256_mul_ps(fscal,dy12);
518 tz = _mm256_mul_ps(fscal,dz12);
520 /* Update vectorial force */
521 fix1 = _mm256_add_ps(fix1,tx);
522 fiy1 = _mm256_add_ps(fiy1,ty);
523 fiz1 = _mm256_add_ps(fiz1,tz);
525 fjx2 = _mm256_add_ps(fjx2,tx);
526 fjy2 = _mm256_add_ps(fjy2,ty);
527 fjz2 = _mm256_add_ps(fjz2,tz);
531 /**************************
532 * CALCULATE INTERACTIONS *
533 **************************/
535 if (gmx_mm256_any_lt(rsq13,rcutoff2))
538 r13 = _mm256_mul_ps(rsq13,rinv13);
540 /* EWALD ELECTROSTATICS */
542 /* Analytical PME correction */
543 zeta2 = _mm256_mul_ps(beta2,rsq13);
544 rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
545 pmecorrF = avx256_pmecorrF_f(zeta2);
546 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
547 felec = _mm256_mul_ps(qq13,felec);
548 pmecorrV = avx256_pmecorrV_f(zeta2);
549 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
550 velec = _mm256_sub_ps(rinv13,pmecorrV);
551 velec = _mm256_mul_ps(qq13,velec);
553 d = _mm256_sub_ps(r13,rswitch);
554 d = _mm256_max_ps(d,_mm256_setzero_ps());
555 d2 = _mm256_mul_ps(d,d);
556 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
558 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
560 /* Evaluate switch function */
561 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
562 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv13,_mm256_mul_ps(velec,dsw)) );
563 velec = _mm256_mul_ps(velec,sw);
564 cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
566 /* Update potential sum for this i atom from the interaction with this j atom. */
567 velec = _mm256_and_ps(velec,cutoff_mask);
568 velecsum = _mm256_add_ps(velecsum,velec);
572 fscal = _mm256_and_ps(fscal,cutoff_mask);
574 /* Calculate temporary vectorial force */
575 tx = _mm256_mul_ps(fscal,dx13);
576 ty = _mm256_mul_ps(fscal,dy13);
577 tz = _mm256_mul_ps(fscal,dz13);
579 /* Update vectorial force */
580 fix1 = _mm256_add_ps(fix1,tx);
581 fiy1 = _mm256_add_ps(fiy1,ty);
582 fiz1 = _mm256_add_ps(fiz1,tz);
584 fjx3 = _mm256_add_ps(fjx3,tx);
585 fjy3 = _mm256_add_ps(fjy3,ty);
586 fjz3 = _mm256_add_ps(fjz3,tz);
590 /**************************
591 * CALCULATE INTERACTIONS *
592 **************************/
594 if (gmx_mm256_any_lt(rsq21,rcutoff2))
597 r21 = _mm256_mul_ps(rsq21,rinv21);
599 /* EWALD ELECTROSTATICS */
601 /* Analytical PME correction */
602 zeta2 = _mm256_mul_ps(beta2,rsq21);
603 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
604 pmecorrF = avx256_pmecorrF_f(zeta2);
605 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
606 felec = _mm256_mul_ps(qq21,felec);
607 pmecorrV = avx256_pmecorrV_f(zeta2);
608 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
609 velec = _mm256_sub_ps(rinv21,pmecorrV);
610 velec = _mm256_mul_ps(qq21,velec);
612 d = _mm256_sub_ps(r21,rswitch);
613 d = _mm256_max_ps(d,_mm256_setzero_ps());
614 d2 = _mm256_mul_ps(d,d);
615 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
617 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
619 /* Evaluate switch function */
620 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
621 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
622 velec = _mm256_mul_ps(velec,sw);
623 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
625 /* Update potential sum for this i atom from the interaction with this j atom. */
626 velec = _mm256_and_ps(velec,cutoff_mask);
627 velecsum = _mm256_add_ps(velecsum,velec);
631 fscal = _mm256_and_ps(fscal,cutoff_mask);
633 /* Calculate temporary vectorial force */
634 tx = _mm256_mul_ps(fscal,dx21);
635 ty = _mm256_mul_ps(fscal,dy21);
636 tz = _mm256_mul_ps(fscal,dz21);
638 /* Update vectorial force */
639 fix2 = _mm256_add_ps(fix2,tx);
640 fiy2 = _mm256_add_ps(fiy2,ty);
641 fiz2 = _mm256_add_ps(fiz2,tz);
643 fjx1 = _mm256_add_ps(fjx1,tx);
644 fjy1 = _mm256_add_ps(fjy1,ty);
645 fjz1 = _mm256_add_ps(fjz1,tz);
649 /**************************
650 * CALCULATE INTERACTIONS *
651 **************************/
653 if (gmx_mm256_any_lt(rsq22,rcutoff2))
656 r22 = _mm256_mul_ps(rsq22,rinv22);
658 /* EWALD ELECTROSTATICS */
660 /* Analytical PME correction */
661 zeta2 = _mm256_mul_ps(beta2,rsq22);
662 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
663 pmecorrF = avx256_pmecorrF_f(zeta2);
664 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
665 felec = _mm256_mul_ps(qq22,felec);
666 pmecorrV = avx256_pmecorrV_f(zeta2);
667 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
668 velec = _mm256_sub_ps(rinv22,pmecorrV);
669 velec = _mm256_mul_ps(qq22,velec);
671 d = _mm256_sub_ps(r22,rswitch);
672 d = _mm256_max_ps(d,_mm256_setzero_ps());
673 d2 = _mm256_mul_ps(d,d);
674 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
676 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
678 /* Evaluate switch function */
679 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
680 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
681 velec = _mm256_mul_ps(velec,sw);
682 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
684 /* Update potential sum for this i atom from the interaction with this j atom. */
685 velec = _mm256_and_ps(velec,cutoff_mask);
686 velecsum = _mm256_add_ps(velecsum,velec);
690 fscal = _mm256_and_ps(fscal,cutoff_mask);
692 /* Calculate temporary vectorial force */
693 tx = _mm256_mul_ps(fscal,dx22);
694 ty = _mm256_mul_ps(fscal,dy22);
695 tz = _mm256_mul_ps(fscal,dz22);
697 /* Update vectorial force */
698 fix2 = _mm256_add_ps(fix2,tx);
699 fiy2 = _mm256_add_ps(fiy2,ty);
700 fiz2 = _mm256_add_ps(fiz2,tz);
702 fjx2 = _mm256_add_ps(fjx2,tx);
703 fjy2 = _mm256_add_ps(fjy2,ty);
704 fjz2 = _mm256_add_ps(fjz2,tz);
708 /**************************
709 * CALCULATE INTERACTIONS *
710 **************************/
712 if (gmx_mm256_any_lt(rsq23,rcutoff2))
715 r23 = _mm256_mul_ps(rsq23,rinv23);
717 /* EWALD ELECTROSTATICS */
719 /* Analytical PME correction */
720 zeta2 = _mm256_mul_ps(beta2,rsq23);
721 rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
722 pmecorrF = avx256_pmecorrF_f(zeta2);
723 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
724 felec = _mm256_mul_ps(qq23,felec);
725 pmecorrV = avx256_pmecorrV_f(zeta2);
726 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
727 velec = _mm256_sub_ps(rinv23,pmecorrV);
728 velec = _mm256_mul_ps(qq23,velec);
730 d = _mm256_sub_ps(r23,rswitch);
731 d = _mm256_max_ps(d,_mm256_setzero_ps());
732 d2 = _mm256_mul_ps(d,d);
733 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
735 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
737 /* Evaluate switch function */
738 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
739 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv23,_mm256_mul_ps(velec,dsw)) );
740 velec = _mm256_mul_ps(velec,sw);
741 cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
743 /* Update potential sum for this i atom from the interaction with this j atom. */
744 velec = _mm256_and_ps(velec,cutoff_mask);
745 velecsum = _mm256_add_ps(velecsum,velec);
749 fscal = _mm256_and_ps(fscal,cutoff_mask);
751 /* Calculate temporary vectorial force */
752 tx = _mm256_mul_ps(fscal,dx23);
753 ty = _mm256_mul_ps(fscal,dy23);
754 tz = _mm256_mul_ps(fscal,dz23);
756 /* Update vectorial force */
757 fix2 = _mm256_add_ps(fix2,tx);
758 fiy2 = _mm256_add_ps(fiy2,ty);
759 fiz2 = _mm256_add_ps(fiz2,tz);
761 fjx3 = _mm256_add_ps(fjx3,tx);
762 fjy3 = _mm256_add_ps(fjy3,ty);
763 fjz3 = _mm256_add_ps(fjz3,tz);
767 /**************************
768 * CALCULATE INTERACTIONS *
769 **************************/
771 if (gmx_mm256_any_lt(rsq31,rcutoff2))
774 r31 = _mm256_mul_ps(rsq31,rinv31);
776 /* EWALD ELECTROSTATICS */
778 /* Analytical PME correction */
779 zeta2 = _mm256_mul_ps(beta2,rsq31);
780 rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
781 pmecorrF = avx256_pmecorrF_f(zeta2);
782 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
783 felec = _mm256_mul_ps(qq31,felec);
784 pmecorrV = avx256_pmecorrV_f(zeta2);
785 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
786 velec = _mm256_sub_ps(rinv31,pmecorrV);
787 velec = _mm256_mul_ps(qq31,velec);
789 d = _mm256_sub_ps(r31,rswitch);
790 d = _mm256_max_ps(d,_mm256_setzero_ps());
791 d2 = _mm256_mul_ps(d,d);
792 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
794 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
796 /* Evaluate switch function */
797 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
798 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv31,_mm256_mul_ps(velec,dsw)) );
799 velec = _mm256_mul_ps(velec,sw);
800 cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
802 /* Update potential sum for this i atom from the interaction with this j atom. */
803 velec = _mm256_and_ps(velec,cutoff_mask);
804 velecsum = _mm256_add_ps(velecsum,velec);
808 fscal = _mm256_and_ps(fscal,cutoff_mask);
810 /* Calculate temporary vectorial force */
811 tx = _mm256_mul_ps(fscal,dx31);
812 ty = _mm256_mul_ps(fscal,dy31);
813 tz = _mm256_mul_ps(fscal,dz31);
815 /* Update vectorial force */
816 fix3 = _mm256_add_ps(fix3,tx);
817 fiy3 = _mm256_add_ps(fiy3,ty);
818 fiz3 = _mm256_add_ps(fiz3,tz);
820 fjx1 = _mm256_add_ps(fjx1,tx);
821 fjy1 = _mm256_add_ps(fjy1,ty);
822 fjz1 = _mm256_add_ps(fjz1,tz);
826 /**************************
827 * CALCULATE INTERACTIONS *
828 **************************/
830 if (gmx_mm256_any_lt(rsq32,rcutoff2))
833 r32 = _mm256_mul_ps(rsq32,rinv32);
835 /* EWALD ELECTROSTATICS */
837 /* Analytical PME correction */
838 zeta2 = _mm256_mul_ps(beta2,rsq32);
839 rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
840 pmecorrF = avx256_pmecorrF_f(zeta2);
841 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
842 felec = _mm256_mul_ps(qq32,felec);
843 pmecorrV = avx256_pmecorrV_f(zeta2);
844 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
845 velec = _mm256_sub_ps(rinv32,pmecorrV);
846 velec = _mm256_mul_ps(qq32,velec);
848 d = _mm256_sub_ps(r32,rswitch);
849 d = _mm256_max_ps(d,_mm256_setzero_ps());
850 d2 = _mm256_mul_ps(d,d);
851 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
853 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
855 /* Evaluate switch function */
856 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
857 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv32,_mm256_mul_ps(velec,dsw)) );
858 velec = _mm256_mul_ps(velec,sw);
859 cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
861 /* Update potential sum for this i atom from the interaction with this j atom. */
862 velec = _mm256_and_ps(velec,cutoff_mask);
863 velecsum = _mm256_add_ps(velecsum,velec);
867 fscal = _mm256_and_ps(fscal,cutoff_mask);
869 /* Calculate temporary vectorial force */
870 tx = _mm256_mul_ps(fscal,dx32);
871 ty = _mm256_mul_ps(fscal,dy32);
872 tz = _mm256_mul_ps(fscal,dz32);
874 /* Update vectorial force */
875 fix3 = _mm256_add_ps(fix3,tx);
876 fiy3 = _mm256_add_ps(fiy3,ty);
877 fiz3 = _mm256_add_ps(fiz3,tz);
879 fjx2 = _mm256_add_ps(fjx2,tx);
880 fjy2 = _mm256_add_ps(fjy2,ty);
881 fjz2 = _mm256_add_ps(fjz2,tz);
885 /**************************
886 * CALCULATE INTERACTIONS *
887 **************************/
889 if (gmx_mm256_any_lt(rsq33,rcutoff2))
892 r33 = _mm256_mul_ps(rsq33,rinv33);
894 /* EWALD ELECTROSTATICS */
896 /* Analytical PME correction */
897 zeta2 = _mm256_mul_ps(beta2,rsq33);
898 rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
899 pmecorrF = avx256_pmecorrF_f(zeta2);
900 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
901 felec = _mm256_mul_ps(qq33,felec);
902 pmecorrV = avx256_pmecorrV_f(zeta2);
903 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
904 velec = _mm256_sub_ps(rinv33,pmecorrV);
905 velec = _mm256_mul_ps(qq33,velec);
907 d = _mm256_sub_ps(r33,rswitch);
908 d = _mm256_max_ps(d,_mm256_setzero_ps());
909 d2 = _mm256_mul_ps(d,d);
910 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
912 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
914 /* Evaluate switch function */
915 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
916 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv33,_mm256_mul_ps(velec,dsw)) );
917 velec = _mm256_mul_ps(velec,sw);
918 cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
920 /* Update potential sum for this i atom from the interaction with this j atom. */
921 velec = _mm256_and_ps(velec,cutoff_mask);
922 velecsum = _mm256_add_ps(velecsum,velec);
926 fscal = _mm256_and_ps(fscal,cutoff_mask);
928 /* Calculate temporary vectorial force */
929 tx = _mm256_mul_ps(fscal,dx33);
930 ty = _mm256_mul_ps(fscal,dy33);
931 tz = _mm256_mul_ps(fscal,dz33);
933 /* Update vectorial force */
934 fix3 = _mm256_add_ps(fix3,tx);
935 fiy3 = _mm256_add_ps(fiy3,ty);
936 fiz3 = _mm256_add_ps(fiz3,tz);
938 fjx3 = _mm256_add_ps(fjx3,tx);
939 fjy3 = _mm256_add_ps(fjy3,ty);
940 fjz3 = _mm256_add_ps(fjz3,tz);
944 fjptrA = f+j_coord_offsetA;
945 fjptrB = f+j_coord_offsetB;
946 fjptrC = f+j_coord_offsetC;
947 fjptrD = f+j_coord_offsetD;
948 fjptrE = f+j_coord_offsetE;
949 fjptrF = f+j_coord_offsetF;
950 fjptrG = f+j_coord_offsetG;
951 fjptrH = f+j_coord_offsetH;
953 gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
954 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
955 fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
957 /* Inner loop uses 1034 flops */
963 /* Get j neighbor index, and coordinate index */
964 jnrlistA = jjnr[jidx];
965 jnrlistB = jjnr[jidx+1];
966 jnrlistC = jjnr[jidx+2];
967 jnrlistD = jjnr[jidx+3];
968 jnrlistE = jjnr[jidx+4];
969 jnrlistF = jjnr[jidx+5];
970 jnrlistG = jjnr[jidx+6];
971 jnrlistH = jjnr[jidx+7];
972 /* Sign of each element will be negative for non-real atoms.
973 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
974 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
976 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
977 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
979 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
980 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
981 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
982 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
983 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
984 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
985 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
986 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
987 j_coord_offsetA = DIM*jnrA;
988 j_coord_offsetB = DIM*jnrB;
989 j_coord_offsetC = DIM*jnrC;
990 j_coord_offsetD = DIM*jnrD;
991 j_coord_offsetE = DIM*jnrE;
992 j_coord_offsetF = DIM*jnrF;
993 j_coord_offsetG = DIM*jnrG;
994 j_coord_offsetH = DIM*jnrH;
996 /* load j atom coordinates */
997 gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
998 x+j_coord_offsetC,x+j_coord_offsetD,
999 x+j_coord_offsetE,x+j_coord_offsetF,
1000 x+j_coord_offsetG,x+j_coord_offsetH,
1001 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1002 &jy2,&jz2,&jx3,&jy3,&jz3);
1004 /* Calculate displacement vector */
1005 dx00 = _mm256_sub_ps(ix0,jx0);
1006 dy00 = _mm256_sub_ps(iy0,jy0);
1007 dz00 = _mm256_sub_ps(iz0,jz0);
1008 dx11 = _mm256_sub_ps(ix1,jx1);
1009 dy11 = _mm256_sub_ps(iy1,jy1);
1010 dz11 = _mm256_sub_ps(iz1,jz1);
1011 dx12 = _mm256_sub_ps(ix1,jx2);
1012 dy12 = _mm256_sub_ps(iy1,jy2);
1013 dz12 = _mm256_sub_ps(iz1,jz2);
1014 dx13 = _mm256_sub_ps(ix1,jx3);
1015 dy13 = _mm256_sub_ps(iy1,jy3);
1016 dz13 = _mm256_sub_ps(iz1,jz3);
1017 dx21 = _mm256_sub_ps(ix2,jx1);
1018 dy21 = _mm256_sub_ps(iy2,jy1);
1019 dz21 = _mm256_sub_ps(iz2,jz1);
1020 dx22 = _mm256_sub_ps(ix2,jx2);
1021 dy22 = _mm256_sub_ps(iy2,jy2);
1022 dz22 = _mm256_sub_ps(iz2,jz2);
1023 dx23 = _mm256_sub_ps(ix2,jx3);
1024 dy23 = _mm256_sub_ps(iy2,jy3);
1025 dz23 = _mm256_sub_ps(iz2,jz3);
1026 dx31 = _mm256_sub_ps(ix3,jx1);
1027 dy31 = _mm256_sub_ps(iy3,jy1);
1028 dz31 = _mm256_sub_ps(iz3,jz1);
1029 dx32 = _mm256_sub_ps(ix3,jx2);
1030 dy32 = _mm256_sub_ps(iy3,jy2);
1031 dz32 = _mm256_sub_ps(iz3,jz2);
1032 dx33 = _mm256_sub_ps(ix3,jx3);
1033 dy33 = _mm256_sub_ps(iy3,jy3);
1034 dz33 = _mm256_sub_ps(iz3,jz3);
1036 /* Calculate squared distance and things based on it */
1037 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1038 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1039 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1040 rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
1041 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1042 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1043 rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
1044 rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
1045 rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
1046 rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
1048 rinv00 = avx256_invsqrt_f(rsq00);
1049 rinv11 = avx256_invsqrt_f(rsq11);
1050 rinv12 = avx256_invsqrt_f(rsq12);
1051 rinv13 = avx256_invsqrt_f(rsq13);
1052 rinv21 = avx256_invsqrt_f(rsq21);
1053 rinv22 = avx256_invsqrt_f(rsq22);
1054 rinv23 = avx256_invsqrt_f(rsq23);
1055 rinv31 = avx256_invsqrt_f(rsq31);
1056 rinv32 = avx256_invsqrt_f(rsq32);
1057 rinv33 = avx256_invsqrt_f(rsq33);
1059 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
1060 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
1061 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
1062 rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
1063 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
1064 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
1065 rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
1066 rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
1067 rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
1068 rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
1070 fjx0 = _mm256_setzero_ps();
1071 fjy0 = _mm256_setzero_ps();
1072 fjz0 = _mm256_setzero_ps();
1073 fjx1 = _mm256_setzero_ps();
1074 fjy1 = _mm256_setzero_ps();
1075 fjz1 = _mm256_setzero_ps();
1076 fjx2 = _mm256_setzero_ps();
1077 fjy2 = _mm256_setzero_ps();
1078 fjz2 = _mm256_setzero_ps();
1079 fjx3 = _mm256_setzero_ps();
1080 fjy3 = _mm256_setzero_ps();
1081 fjz3 = _mm256_setzero_ps();
1083 /**************************
1084 * CALCULATE INTERACTIONS *
1085 **************************/
1087 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1090 r00 = _mm256_mul_ps(rsq00,rinv00);
1091 r00 = _mm256_andnot_ps(dummy_mask,r00);
1093 /* LENNARD-JONES DISPERSION/REPULSION */
1095 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1096 vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
1097 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
1098 vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
1099 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
1101 d = _mm256_sub_ps(r00,rswitch);
1102 d = _mm256_max_ps(d,_mm256_setzero_ps());
1103 d2 = _mm256_mul_ps(d,d);
1104 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1106 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1108 /* Evaluate switch function */
1109 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1110 fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
1111 vvdw = _mm256_mul_ps(vvdw,sw);
1112 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1114 /* Update potential sum for this i atom from the interaction with this j atom. */
1115 vvdw = _mm256_and_ps(vvdw,cutoff_mask);
1116 vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
1117 vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
1121 fscal = _mm256_and_ps(fscal,cutoff_mask);
1123 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1125 /* Calculate temporary vectorial force */
1126 tx = _mm256_mul_ps(fscal,dx00);
1127 ty = _mm256_mul_ps(fscal,dy00);
1128 tz = _mm256_mul_ps(fscal,dz00);
1130 /* Update vectorial force */
1131 fix0 = _mm256_add_ps(fix0,tx);
1132 fiy0 = _mm256_add_ps(fiy0,ty);
1133 fiz0 = _mm256_add_ps(fiz0,tz);
1135 fjx0 = _mm256_add_ps(fjx0,tx);
1136 fjy0 = _mm256_add_ps(fjy0,ty);
1137 fjz0 = _mm256_add_ps(fjz0,tz);
1141 /**************************
1142 * CALCULATE INTERACTIONS *
1143 **************************/
1145 if (gmx_mm256_any_lt(rsq11,rcutoff2))
1148 r11 = _mm256_mul_ps(rsq11,rinv11);
1149 r11 = _mm256_andnot_ps(dummy_mask,r11);
1151 /* EWALD ELECTROSTATICS */
1153 /* Analytical PME correction */
1154 zeta2 = _mm256_mul_ps(beta2,rsq11);
1155 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
1156 pmecorrF = avx256_pmecorrF_f(zeta2);
1157 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1158 felec = _mm256_mul_ps(qq11,felec);
1159 pmecorrV = avx256_pmecorrV_f(zeta2);
1160 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1161 velec = _mm256_sub_ps(rinv11,pmecorrV);
1162 velec = _mm256_mul_ps(qq11,velec);
1164 d = _mm256_sub_ps(r11,rswitch);
1165 d = _mm256_max_ps(d,_mm256_setzero_ps());
1166 d2 = _mm256_mul_ps(d,d);
1167 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1169 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1171 /* Evaluate switch function */
1172 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1173 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
1174 velec = _mm256_mul_ps(velec,sw);
1175 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1177 /* Update potential sum for this i atom from the interaction with this j atom. */
1178 velec = _mm256_and_ps(velec,cutoff_mask);
1179 velec = _mm256_andnot_ps(dummy_mask,velec);
1180 velecsum = _mm256_add_ps(velecsum,velec);
1184 fscal = _mm256_and_ps(fscal,cutoff_mask);
1186 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1188 /* Calculate temporary vectorial force */
1189 tx = _mm256_mul_ps(fscal,dx11);
1190 ty = _mm256_mul_ps(fscal,dy11);
1191 tz = _mm256_mul_ps(fscal,dz11);
1193 /* Update vectorial force */
1194 fix1 = _mm256_add_ps(fix1,tx);
1195 fiy1 = _mm256_add_ps(fiy1,ty);
1196 fiz1 = _mm256_add_ps(fiz1,tz);
1198 fjx1 = _mm256_add_ps(fjx1,tx);
1199 fjy1 = _mm256_add_ps(fjy1,ty);
1200 fjz1 = _mm256_add_ps(fjz1,tz);
1204 /**************************
1205 * CALCULATE INTERACTIONS *
1206 **************************/
1208 if (gmx_mm256_any_lt(rsq12,rcutoff2))
1211 r12 = _mm256_mul_ps(rsq12,rinv12);
1212 r12 = _mm256_andnot_ps(dummy_mask,r12);
1214 /* EWALD ELECTROSTATICS */
1216 /* Analytical PME correction */
1217 zeta2 = _mm256_mul_ps(beta2,rsq12);
1218 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
1219 pmecorrF = avx256_pmecorrF_f(zeta2);
1220 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1221 felec = _mm256_mul_ps(qq12,felec);
1222 pmecorrV = avx256_pmecorrV_f(zeta2);
1223 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1224 velec = _mm256_sub_ps(rinv12,pmecorrV);
1225 velec = _mm256_mul_ps(qq12,velec);
1227 d = _mm256_sub_ps(r12,rswitch);
1228 d = _mm256_max_ps(d,_mm256_setzero_ps());
1229 d2 = _mm256_mul_ps(d,d);
1230 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1232 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1234 /* Evaluate switch function */
1235 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1236 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
1237 velec = _mm256_mul_ps(velec,sw);
1238 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1240 /* Update potential sum for this i atom from the interaction with this j atom. */
1241 velec = _mm256_and_ps(velec,cutoff_mask);
1242 velec = _mm256_andnot_ps(dummy_mask,velec);
1243 velecsum = _mm256_add_ps(velecsum,velec);
1247 fscal = _mm256_and_ps(fscal,cutoff_mask);
1249 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1251 /* Calculate temporary vectorial force */
1252 tx = _mm256_mul_ps(fscal,dx12);
1253 ty = _mm256_mul_ps(fscal,dy12);
1254 tz = _mm256_mul_ps(fscal,dz12);
1256 /* Update vectorial force */
1257 fix1 = _mm256_add_ps(fix1,tx);
1258 fiy1 = _mm256_add_ps(fiy1,ty);
1259 fiz1 = _mm256_add_ps(fiz1,tz);
1261 fjx2 = _mm256_add_ps(fjx2,tx);
1262 fjy2 = _mm256_add_ps(fjy2,ty);
1263 fjz2 = _mm256_add_ps(fjz2,tz);
1267 /**************************
1268 * CALCULATE INTERACTIONS *
1269 **************************/
1271 if (gmx_mm256_any_lt(rsq13,rcutoff2))
1274 r13 = _mm256_mul_ps(rsq13,rinv13);
1275 r13 = _mm256_andnot_ps(dummy_mask,r13);
1277 /* EWALD ELECTROSTATICS */
1279 /* Analytical PME correction */
1280 zeta2 = _mm256_mul_ps(beta2,rsq13);
1281 rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
1282 pmecorrF = avx256_pmecorrF_f(zeta2);
1283 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1284 felec = _mm256_mul_ps(qq13,felec);
1285 pmecorrV = avx256_pmecorrV_f(zeta2);
1286 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1287 velec = _mm256_sub_ps(rinv13,pmecorrV);
1288 velec = _mm256_mul_ps(qq13,velec);
1290 d = _mm256_sub_ps(r13,rswitch);
1291 d = _mm256_max_ps(d,_mm256_setzero_ps());
1292 d2 = _mm256_mul_ps(d,d);
1293 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1295 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1297 /* Evaluate switch function */
1298 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1299 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv13,_mm256_mul_ps(velec,dsw)) );
1300 velec = _mm256_mul_ps(velec,sw);
1301 cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
1303 /* Update potential sum for this i atom from the interaction with this j atom. */
1304 velec = _mm256_and_ps(velec,cutoff_mask);
1305 velec = _mm256_andnot_ps(dummy_mask,velec);
1306 velecsum = _mm256_add_ps(velecsum,velec);
1310 fscal = _mm256_and_ps(fscal,cutoff_mask);
1312 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1314 /* Calculate temporary vectorial force */
1315 tx = _mm256_mul_ps(fscal,dx13);
1316 ty = _mm256_mul_ps(fscal,dy13);
1317 tz = _mm256_mul_ps(fscal,dz13);
1319 /* Update vectorial force */
1320 fix1 = _mm256_add_ps(fix1,tx);
1321 fiy1 = _mm256_add_ps(fiy1,ty);
1322 fiz1 = _mm256_add_ps(fiz1,tz);
1324 fjx3 = _mm256_add_ps(fjx3,tx);
1325 fjy3 = _mm256_add_ps(fjy3,ty);
1326 fjz3 = _mm256_add_ps(fjz3,tz);
1330 /**************************
1331 * CALCULATE INTERACTIONS *
1332 **************************/
1334 if (gmx_mm256_any_lt(rsq21,rcutoff2))
1337 r21 = _mm256_mul_ps(rsq21,rinv21);
1338 r21 = _mm256_andnot_ps(dummy_mask,r21);
1340 /* EWALD ELECTROSTATICS */
1342 /* Analytical PME correction */
1343 zeta2 = _mm256_mul_ps(beta2,rsq21);
1344 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
1345 pmecorrF = avx256_pmecorrF_f(zeta2);
1346 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1347 felec = _mm256_mul_ps(qq21,felec);
1348 pmecorrV = avx256_pmecorrV_f(zeta2);
1349 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1350 velec = _mm256_sub_ps(rinv21,pmecorrV);
1351 velec = _mm256_mul_ps(qq21,velec);
1353 d = _mm256_sub_ps(r21,rswitch);
1354 d = _mm256_max_ps(d,_mm256_setzero_ps());
1355 d2 = _mm256_mul_ps(d,d);
1356 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1358 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1360 /* Evaluate switch function */
1361 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1362 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
1363 velec = _mm256_mul_ps(velec,sw);
1364 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1366 /* Update potential sum for this i atom from the interaction with this j atom. */
1367 velec = _mm256_and_ps(velec,cutoff_mask);
1368 velec = _mm256_andnot_ps(dummy_mask,velec);
1369 velecsum = _mm256_add_ps(velecsum,velec);
1373 fscal = _mm256_and_ps(fscal,cutoff_mask);
1375 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1377 /* Calculate temporary vectorial force */
1378 tx = _mm256_mul_ps(fscal,dx21);
1379 ty = _mm256_mul_ps(fscal,dy21);
1380 tz = _mm256_mul_ps(fscal,dz21);
1382 /* Update vectorial force */
1383 fix2 = _mm256_add_ps(fix2,tx);
1384 fiy2 = _mm256_add_ps(fiy2,ty);
1385 fiz2 = _mm256_add_ps(fiz2,tz);
1387 fjx1 = _mm256_add_ps(fjx1,tx);
1388 fjy1 = _mm256_add_ps(fjy1,ty);
1389 fjz1 = _mm256_add_ps(fjz1,tz);
1393 /**************************
1394 * CALCULATE INTERACTIONS *
1395 **************************/
1397 if (gmx_mm256_any_lt(rsq22,rcutoff2))
1400 r22 = _mm256_mul_ps(rsq22,rinv22);
1401 r22 = _mm256_andnot_ps(dummy_mask,r22);
1403 /* EWALD ELECTROSTATICS */
1405 /* Analytical PME correction */
1406 zeta2 = _mm256_mul_ps(beta2,rsq22);
1407 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
1408 pmecorrF = avx256_pmecorrF_f(zeta2);
1409 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1410 felec = _mm256_mul_ps(qq22,felec);
1411 pmecorrV = avx256_pmecorrV_f(zeta2);
1412 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1413 velec = _mm256_sub_ps(rinv22,pmecorrV);
1414 velec = _mm256_mul_ps(qq22,velec);
1416 d = _mm256_sub_ps(r22,rswitch);
1417 d = _mm256_max_ps(d,_mm256_setzero_ps());
1418 d2 = _mm256_mul_ps(d,d);
1419 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1421 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1423 /* Evaluate switch function */
1424 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1425 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
1426 velec = _mm256_mul_ps(velec,sw);
1427 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1429 /* Update potential sum for this i atom from the interaction with this j atom. */
1430 velec = _mm256_and_ps(velec,cutoff_mask);
1431 velec = _mm256_andnot_ps(dummy_mask,velec);
1432 velecsum = _mm256_add_ps(velecsum,velec);
1436 fscal = _mm256_and_ps(fscal,cutoff_mask);
1438 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1440 /* Calculate temporary vectorial force */
1441 tx = _mm256_mul_ps(fscal,dx22);
1442 ty = _mm256_mul_ps(fscal,dy22);
1443 tz = _mm256_mul_ps(fscal,dz22);
1445 /* Update vectorial force */
1446 fix2 = _mm256_add_ps(fix2,tx);
1447 fiy2 = _mm256_add_ps(fiy2,ty);
1448 fiz2 = _mm256_add_ps(fiz2,tz);
1450 fjx2 = _mm256_add_ps(fjx2,tx);
1451 fjy2 = _mm256_add_ps(fjy2,ty);
1452 fjz2 = _mm256_add_ps(fjz2,tz);
1456 /**************************
1457 * CALCULATE INTERACTIONS *
1458 **************************/
1460 if (gmx_mm256_any_lt(rsq23,rcutoff2))
1463 r23 = _mm256_mul_ps(rsq23,rinv23);
1464 r23 = _mm256_andnot_ps(dummy_mask,r23);
1466 /* EWALD ELECTROSTATICS */
1468 /* Analytical PME correction */
1469 zeta2 = _mm256_mul_ps(beta2,rsq23);
1470 rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
1471 pmecorrF = avx256_pmecorrF_f(zeta2);
1472 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1473 felec = _mm256_mul_ps(qq23,felec);
1474 pmecorrV = avx256_pmecorrV_f(zeta2);
1475 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1476 velec = _mm256_sub_ps(rinv23,pmecorrV);
1477 velec = _mm256_mul_ps(qq23,velec);
1479 d = _mm256_sub_ps(r23,rswitch);
1480 d = _mm256_max_ps(d,_mm256_setzero_ps());
1481 d2 = _mm256_mul_ps(d,d);
1482 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1484 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1486 /* Evaluate switch function */
1487 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1488 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv23,_mm256_mul_ps(velec,dsw)) );
1489 velec = _mm256_mul_ps(velec,sw);
1490 cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
1492 /* Update potential sum for this i atom from the interaction with this j atom. */
1493 velec = _mm256_and_ps(velec,cutoff_mask);
1494 velec = _mm256_andnot_ps(dummy_mask,velec);
1495 velecsum = _mm256_add_ps(velecsum,velec);
1499 fscal = _mm256_and_ps(fscal,cutoff_mask);
1501 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1503 /* Calculate temporary vectorial force */
1504 tx = _mm256_mul_ps(fscal,dx23);
1505 ty = _mm256_mul_ps(fscal,dy23);
1506 tz = _mm256_mul_ps(fscal,dz23);
1508 /* Update vectorial force */
1509 fix2 = _mm256_add_ps(fix2,tx);
1510 fiy2 = _mm256_add_ps(fiy2,ty);
1511 fiz2 = _mm256_add_ps(fiz2,tz);
1513 fjx3 = _mm256_add_ps(fjx3,tx);
1514 fjy3 = _mm256_add_ps(fjy3,ty);
1515 fjz3 = _mm256_add_ps(fjz3,tz);
1519 /**************************
1520 * CALCULATE INTERACTIONS *
1521 **************************/
1523 if (gmx_mm256_any_lt(rsq31,rcutoff2))
1526 r31 = _mm256_mul_ps(rsq31,rinv31);
1527 r31 = _mm256_andnot_ps(dummy_mask,r31);
1529 /* EWALD ELECTROSTATICS */
1531 /* Analytical PME correction */
1532 zeta2 = _mm256_mul_ps(beta2,rsq31);
1533 rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
1534 pmecorrF = avx256_pmecorrF_f(zeta2);
1535 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1536 felec = _mm256_mul_ps(qq31,felec);
1537 pmecorrV = avx256_pmecorrV_f(zeta2);
1538 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1539 velec = _mm256_sub_ps(rinv31,pmecorrV);
1540 velec = _mm256_mul_ps(qq31,velec);
1542 d = _mm256_sub_ps(r31,rswitch);
1543 d = _mm256_max_ps(d,_mm256_setzero_ps());
1544 d2 = _mm256_mul_ps(d,d);
1545 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1547 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1549 /* Evaluate switch function */
1550 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1551 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv31,_mm256_mul_ps(velec,dsw)) );
1552 velec = _mm256_mul_ps(velec,sw);
1553 cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
1555 /* Update potential sum for this i atom from the interaction with this j atom. */
1556 velec = _mm256_and_ps(velec,cutoff_mask);
1557 velec = _mm256_andnot_ps(dummy_mask,velec);
1558 velecsum = _mm256_add_ps(velecsum,velec);
1562 fscal = _mm256_and_ps(fscal,cutoff_mask);
1564 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1566 /* Calculate temporary vectorial force */
1567 tx = _mm256_mul_ps(fscal,dx31);
1568 ty = _mm256_mul_ps(fscal,dy31);
1569 tz = _mm256_mul_ps(fscal,dz31);
1571 /* Update vectorial force */
1572 fix3 = _mm256_add_ps(fix3,tx);
1573 fiy3 = _mm256_add_ps(fiy3,ty);
1574 fiz3 = _mm256_add_ps(fiz3,tz);
1576 fjx1 = _mm256_add_ps(fjx1,tx);
1577 fjy1 = _mm256_add_ps(fjy1,ty);
1578 fjz1 = _mm256_add_ps(fjz1,tz);
1582 /**************************
1583 * CALCULATE INTERACTIONS *
1584 **************************/
1586 if (gmx_mm256_any_lt(rsq32,rcutoff2))
1589 r32 = _mm256_mul_ps(rsq32,rinv32);
1590 r32 = _mm256_andnot_ps(dummy_mask,r32);
1592 /* EWALD ELECTROSTATICS */
1594 /* Analytical PME correction */
1595 zeta2 = _mm256_mul_ps(beta2,rsq32);
1596 rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
1597 pmecorrF = avx256_pmecorrF_f(zeta2);
1598 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1599 felec = _mm256_mul_ps(qq32,felec);
1600 pmecorrV = avx256_pmecorrV_f(zeta2);
1601 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1602 velec = _mm256_sub_ps(rinv32,pmecorrV);
1603 velec = _mm256_mul_ps(qq32,velec);
1605 d = _mm256_sub_ps(r32,rswitch);
1606 d = _mm256_max_ps(d,_mm256_setzero_ps());
1607 d2 = _mm256_mul_ps(d,d);
1608 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1610 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1612 /* Evaluate switch function */
1613 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1614 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv32,_mm256_mul_ps(velec,dsw)) );
1615 velec = _mm256_mul_ps(velec,sw);
1616 cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
1618 /* Update potential sum for this i atom from the interaction with this j atom. */
1619 velec = _mm256_and_ps(velec,cutoff_mask);
1620 velec = _mm256_andnot_ps(dummy_mask,velec);
1621 velecsum = _mm256_add_ps(velecsum,velec);
1625 fscal = _mm256_and_ps(fscal,cutoff_mask);
1627 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1629 /* Calculate temporary vectorial force */
1630 tx = _mm256_mul_ps(fscal,dx32);
1631 ty = _mm256_mul_ps(fscal,dy32);
1632 tz = _mm256_mul_ps(fscal,dz32);
1634 /* Update vectorial force */
1635 fix3 = _mm256_add_ps(fix3,tx);
1636 fiy3 = _mm256_add_ps(fiy3,ty);
1637 fiz3 = _mm256_add_ps(fiz3,tz);
1639 fjx2 = _mm256_add_ps(fjx2,tx);
1640 fjy2 = _mm256_add_ps(fjy2,ty);
1641 fjz2 = _mm256_add_ps(fjz2,tz);
1645 /**************************
1646 * CALCULATE INTERACTIONS *
1647 **************************/
1649 if (gmx_mm256_any_lt(rsq33,rcutoff2))
1652 r33 = _mm256_mul_ps(rsq33,rinv33);
1653 r33 = _mm256_andnot_ps(dummy_mask,r33);
1655 /* EWALD ELECTROSTATICS */
1657 /* Analytical PME correction */
1658 zeta2 = _mm256_mul_ps(beta2,rsq33);
1659 rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
1660 pmecorrF = avx256_pmecorrF_f(zeta2);
1661 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1662 felec = _mm256_mul_ps(qq33,felec);
1663 pmecorrV = avx256_pmecorrV_f(zeta2);
1664 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1665 velec = _mm256_sub_ps(rinv33,pmecorrV);
1666 velec = _mm256_mul_ps(qq33,velec);
1668 d = _mm256_sub_ps(r33,rswitch);
1669 d = _mm256_max_ps(d,_mm256_setzero_ps());
1670 d2 = _mm256_mul_ps(d,d);
1671 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1673 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1675 /* Evaluate switch function */
1676 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1677 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv33,_mm256_mul_ps(velec,dsw)) );
1678 velec = _mm256_mul_ps(velec,sw);
1679 cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
1681 /* Update potential sum for this i atom from the interaction with this j atom. */
1682 velec = _mm256_and_ps(velec,cutoff_mask);
1683 velec = _mm256_andnot_ps(dummy_mask,velec);
1684 velecsum = _mm256_add_ps(velecsum,velec);
1688 fscal = _mm256_and_ps(fscal,cutoff_mask);
1690 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1692 /* Calculate temporary vectorial force */
1693 tx = _mm256_mul_ps(fscal,dx33);
1694 ty = _mm256_mul_ps(fscal,dy33);
1695 tz = _mm256_mul_ps(fscal,dz33);
1697 /* Update vectorial force */
1698 fix3 = _mm256_add_ps(fix3,tx);
1699 fiy3 = _mm256_add_ps(fiy3,ty);
1700 fiz3 = _mm256_add_ps(fiz3,tz);
1702 fjx3 = _mm256_add_ps(fjx3,tx);
1703 fjy3 = _mm256_add_ps(fjy3,ty);
1704 fjz3 = _mm256_add_ps(fjz3,tz);
1708 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1709 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1710 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1711 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1712 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1713 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1714 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1715 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1717 gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1718 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
1719 fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1721 /* Inner loop uses 1044 flops */
1724 /* End of innermost loop */
1726 gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1727 f+i_coord_offset,fshift+i_shift_offset);
1730 /* Update potential energies */
1731 gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1732 gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1734 /* Increment number of inner iterations */
1735 inneriter += j_index_end - j_index_start;
1737 /* Outer loop uses 26 flops */
1740 /* Increment number of outer iterations */
1743 /* Update outer/inner flops */
1745 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*1044);
1748 * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_F_avx_256_single
1749 * Electrostatics interaction: Ewald
1750 * VdW interaction: LennardJones
1751 * Geometry: Water4-Water4
1752 * Calculate force/pot: Force
1755 nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_F_avx_256_single
1756 (t_nblist * gmx_restrict nlist,
1757 rvec * gmx_restrict xx,
1758 rvec * gmx_restrict ff,
1759 struct t_forcerec * gmx_restrict fr,
1760 t_mdatoms * gmx_restrict mdatoms,
1761 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1762 t_nrnb * gmx_restrict nrnb)
1764 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
1765 * just 0 for non-waters.
1766 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1767 * jnr indices corresponding to data put in the four positions in the SIMD register.
1769 int i_shift_offset,i_coord_offset,outeriter,inneriter;
1770 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1771 int jnrA,jnrB,jnrC,jnrD;
1772 int jnrE,jnrF,jnrG,jnrH;
1773 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1774 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1775 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1776 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1777 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
1778 real rcutoff_scalar;
1779 real *shiftvec,*fshift,*x,*f;
1780 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1781 real scratch[4*DIM];
1782 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1783 real * vdwioffsetptr0;
1784 __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1785 real * vdwioffsetptr1;
1786 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1787 real * vdwioffsetptr2;
1788 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1789 real * vdwioffsetptr3;
1790 __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1791 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1792 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1793 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1794 __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1795 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1796 __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1797 int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
1798 __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
1799 __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1800 __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1801 __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1802 __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
1803 __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1804 __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1805 __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
1806 __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
1807 __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
1808 __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
1809 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
1812 __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1815 __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
1816 __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
1818 __m128i ewitab_lo,ewitab_hi;
1819 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1820 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1822 __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
1823 real rswitch_scalar,d_scalar;
1824 __m256 dummy_mask,cutoff_mask;
1825 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1826 __m256 one = _mm256_set1_ps(1.0);
1827 __m256 two = _mm256_set1_ps(2.0);
1833 jindex = nlist->jindex;
1835 shiftidx = nlist->shift;
1837 shiftvec = fr->shift_vec[0];
1838 fshift = fr->fshift[0];
1839 facel = _mm256_set1_ps(fr->ic->epsfac);
1840 charge = mdatoms->chargeA;
1841 nvdwtype = fr->ntype;
1842 vdwparam = fr->nbfp;
1843 vdwtype = mdatoms->typeA;
1845 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
1846 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1847 beta2 = _mm256_mul_ps(beta,beta);
1848 beta3 = _mm256_mul_ps(beta,beta2);
1850 ewtab = fr->ic->tabq_coul_FDV0;
1851 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
1852 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1854 /* Setup water-specific parameters */
1855 inr = nlist->iinr[0];
1856 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1857 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1858 iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
1859 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
1861 jq1 = _mm256_set1_ps(charge[inr+1]);
1862 jq2 = _mm256_set1_ps(charge[inr+2]);
1863 jq3 = _mm256_set1_ps(charge[inr+3]);
1864 vdwjidx0A = 2*vdwtype[inr+0];
1865 c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1866 c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1867 qq11 = _mm256_mul_ps(iq1,jq1);
1868 qq12 = _mm256_mul_ps(iq1,jq2);
1869 qq13 = _mm256_mul_ps(iq1,jq3);
1870 qq21 = _mm256_mul_ps(iq2,jq1);
1871 qq22 = _mm256_mul_ps(iq2,jq2);
1872 qq23 = _mm256_mul_ps(iq2,jq3);
1873 qq31 = _mm256_mul_ps(iq3,jq1);
1874 qq32 = _mm256_mul_ps(iq3,jq2);
1875 qq33 = _mm256_mul_ps(iq3,jq3);
1877 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1878 rcutoff_scalar = fr->ic->rcoulomb;
1879 rcutoff = _mm256_set1_ps(rcutoff_scalar);
1880 rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
1882 rswitch_scalar = fr->ic->rcoulomb_switch;
1883 rswitch = _mm256_set1_ps(rswitch_scalar);
1884 /* Setup switch parameters */
1885 d_scalar = rcutoff_scalar-rswitch_scalar;
1886 d = _mm256_set1_ps(d_scalar);
1887 swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
1888 swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1889 swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1890 swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
1891 swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1892 swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1894 /* Avoid stupid compiler warnings */
1895 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1896 j_coord_offsetA = 0;
1897 j_coord_offsetB = 0;
1898 j_coord_offsetC = 0;
1899 j_coord_offsetD = 0;
1900 j_coord_offsetE = 0;
1901 j_coord_offsetF = 0;
1902 j_coord_offsetG = 0;
1903 j_coord_offsetH = 0;
1908 for(iidx=0;iidx<4*DIM;iidx++)
1910 scratch[iidx] = 0.0;
1913 /* Start outer loop over neighborlists */
1914 for(iidx=0; iidx<nri; iidx++)
1916 /* Load shift vector for this list */
1917 i_shift_offset = DIM*shiftidx[iidx];
1919 /* Load limits for loop over neighbors */
1920 j_index_start = jindex[iidx];
1921 j_index_end = jindex[iidx+1];
1923 /* Get outer coordinate index */
1925 i_coord_offset = DIM*inr;
1927 /* Load i particle coords and add shift vector */
1928 gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1929 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1931 fix0 = _mm256_setzero_ps();
1932 fiy0 = _mm256_setzero_ps();
1933 fiz0 = _mm256_setzero_ps();
1934 fix1 = _mm256_setzero_ps();
1935 fiy1 = _mm256_setzero_ps();
1936 fiz1 = _mm256_setzero_ps();
1937 fix2 = _mm256_setzero_ps();
1938 fiy2 = _mm256_setzero_ps();
1939 fiz2 = _mm256_setzero_ps();
1940 fix3 = _mm256_setzero_ps();
1941 fiy3 = _mm256_setzero_ps();
1942 fiz3 = _mm256_setzero_ps();
1944 /* Start inner kernel loop */
1945 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1948 /* Get j neighbor index, and coordinate index */
1950 jnrB = jjnr[jidx+1];
1951 jnrC = jjnr[jidx+2];
1952 jnrD = jjnr[jidx+3];
1953 jnrE = jjnr[jidx+4];
1954 jnrF = jjnr[jidx+5];
1955 jnrG = jjnr[jidx+6];
1956 jnrH = jjnr[jidx+7];
1957 j_coord_offsetA = DIM*jnrA;
1958 j_coord_offsetB = DIM*jnrB;
1959 j_coord_offsetC = DIM*jnrC;
1960 j_coord_offsetD = DIM*jnrD;
1961 j_coord_offsetE = DIM*jnrE;
1962 j_coord_offsetF = DIM*jnrF;
1963 j_coord_offsetG = DIM*jnrG;
1964 j_coord_offsetH = DIM*jnrH;
1966 /* load j atom coordinates */
1967 gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1968 x+j_coord_offsetC,x+j_coord_offsetD,
1969 x+j_coord_offsetE,x+j_coord_offsetF,
1970 x+j_coord_offsetG,x+j_coord_offsetH,
1971 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1972 &jy2,&jz2,&jx3,&jy3,&jz3);
1974 /* Calculate displacement vector */
1975 dx00 = _mm256_sub_ps(ix0,jx0);
1976 dy00 = _mm256_sub_ps(iy0,jy0);
1977 dz00 = _mm256_sub_ps(iz0,jz0);
1978 dx11 = _mm256_sub_ps(ix1,jx1);
1979 dy11 = _mm256_sub_ps(iy1,jy1);
1980 dz11 = _mm256_sub_ps(iz1,jz1);
1981 dx12 = _mm256_sub_ps(ix1,jx2);
1982 dy12 = _mm256_sub_ps(iy1,jy2);
1983 dz12 = _mm256_sub_ps(iz1,jz2);
1984 dx13 = _mm256_sub_ps(ix1,jx3);
1985 dy13 = _mm256_sub_ps(iy1,jy3);
1986 dz13 = _mm256_sub_ps(iz1,jz3);
1987 dx21 = _mm256_sub_ps(ix2,jx1);
1988 dy21 = _mm256_sub_ps(iy2,jy1);
1989 dz21 = _mm256_sub_ps(iz2,jz1);
1990 dx22 = _mm256_sub_ps(ix2,jx2);
1991 dy22 = _mm256_sub_ps(iy2,jy2);
1992 dz22 = _mm256_sub_ps(iz2,jz2);
1993 dx23 = _mm256_sub_ps(ix2,jx3);
1994 dy23 = _mm256_sub_ps(iy2,jy3);
1995 dz23 = _mm256_sub_ps(iz2,jz3);
1996 dx31 = _mm256_sub_ps(ix3,jx1);
1997 dy31 = _mm256_sub_ps(iy3,jy1);
1998 dz31 = _mm256_sub_ps(iz3,jz1);
1999 dx32 = _mm256_sub_ps(ix3,jx2);
2000 dy32 = _mm256_sub_ps(iy3,jy2);
2001 dz32 = _mm256_sub_ps(iz3,jz2);
2002 dx33 = _mm256_sub_ps(ix3,jx3);
2003 dy33 = _mm256_sub_ps(iy3,jy3);
2004 dz33 = _mm256_sub_ps(iz3,jz3);
2006 /* Calculate squared distance and things based on it */
2007 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
2008 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2009 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2010 rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
2011 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2012 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2013 rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
2014 rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
2015 rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
2016 rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
2018 rinv00 = avx256_invsqrt_f(rsq00);
2019 rinv11 = avx256_invsqrt_f(rsq11);
2020 rinv12 = avx256_invsqrt_f(rsq12);
2021 rinv13 = avx256_invsqrt_f(rsq13);
2022 rinv21 = avx256_invsqrt_f(rsq21);
2023 rinv22 = avx256_invsqrt_f(rsq22);
2024 rinv23 = avx256_invsqrt_f(rsq23);
2025 rinv31 = avx256_invsqrt_f(rsq31);
2026 rinv32 = avx256_invsqrt_f(rsq32);
2027 rinv33 = avx256_invsqrt_f(rsq33);
2029 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
2030 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
2031 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
2032 rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
2033 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
2034 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
2035 rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
2036 rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
2037 rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
2038 rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
2040 fjx0 = _mm256_setzero_ps();
2041 fjy0 = _mm256_setzero_ps();
2042 fjz0 = _mm256_setzero_ps();
2043 fjx1 = _mm256_setzero_ps();
2044 fjy1 = _mm256_setzero_ps();
2045 fjz1 = _mm256_setzero_ps();
2046 fjx2 = _mm256_setzero_ps();
2047 fjy2 = _mm256_setzero_ps();
2048 fjz2 = _mm256_setzero_ps();
2049 fjx3 = _mm256_setzero_ps();
2050 fjy3 = _mm256_setzero_ps();
2051 fjz3 = _mm256_setzero_ps();
2053 /**************************
2054 * CALCULATE INTERACTIONS *
2055 **************************/
2057 if (gmx_mm256_any_lt(rsq00,rcutoff2))
2060 r00 = _mm256_mul_ps(rsq00,rinv00);
2062 /* LENNARD-JONES DISPERSION/REPULSION */
2064 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
2065 vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
2066 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
2067 vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
2068 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
2070 d = _mm256_sub_ps(r00,rswitch);
2071 d = _mm256_max_ps(d,_mm256_setzero_ps());
2072 d2 = _mm256_mul_ps(d,d);
2073 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2075 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2077 /* Evaluate switch function */
2078 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2079 fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
2080 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
2084 fscal = _mm256_and_ps(fscal,cutoff_mask);
2086 /* Calculate temporary vectorial force */
2087 tx = _mm256_mul_ps(fscal,dx00);
2088 ty = _mm256_mul_ps(fscal,dy00);
2089 tz = _mm256_mul_ps(fscal,dz00);
2091 /* Update vectorial force */
2092 fix0 = _mm256_add_ps(fix0,tx);
2093 fiy0 = _mm256_add_ps(fiy0,ty);
2094 fiz0 = _mm256_add_ps(fiz0,tz);
2096 fjx0 = _mm256_add_ps(fjx0,tx);
2097 fjy0 = _mm256_add_ps(fjy0,ty);
2098 fjz0 = _mm256_add_ps(fjz0,tz);
2102 /**************************
2103 * CALCULATE INTERACTIONS *
2104 **************************/
2106 if (gmx_mm256_any_lt(rsq11,rcutoff2))
2109 r11 = _mm256_mul_ps(rsq11,rinv11);
2111 /* EWALD ELECTROSTATICS */
2113 /* Analytical PME correction */
2114 zeta2 = _mm256_mul_ps(beta2,rsq11);
2115 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
2116 pmecorrF = avx256_pmecorrF_f(zeta2);
2117 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2118 felec = _mm256_mul_ps(qq11,felec);
2119 pmecorrV = avx256_pmecorrV_f(zeta2);
2120 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
2121 velec = _mm256_sub_ps(rinv11,pmecorrV);
2122 velec = _mm256_mul_ps(qq11,velec);
2124 d = _mm256_sub_ps(r11,rswitch);
2125 d = _mm256_max_ps(d,_mm256_setzero_ps());
2126 d2 = _mm256_mul_ps(d,d);
2127 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2129 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2131 /* Evaluate switch function */
2132 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2133 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
2134 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2138 fscal = _mm256_and_ps(fscal,cutoff_mask);
2140 /* Calculate temporary vectorial force */
2141 tx = _mm256_mul_ps(fscal,dx11);
2142 ty = _mm256_mul_ps(fscal,dy11);
2143 tz = _mm256_mul_ps(fscal,dz11);
2145 /* Update vectorial force */
2146 fix1 = _mm256_add_ps(fix1,tx);
2147 fiy1 = _mm256_add_ps(fiy1,ty);
2148 fiz1 = _mm256_add_ps(fiz1,tz);
2150 fjx1 = _mm256_add_ps(fjx1,tx);
2151 fjy1 = _mm256_add_ps(fjy1,ty);
2152 fjz1 = _mm256_add_ps(fjz1,tz);
2156 /**************************
2157 * CALCULATE INTERACTIONS *
2158 **************************/
2160 if (gmx_mm256_any_lt(rsq12,rcutoff2))
2163 r12 = _mm256_mul_ps(rsq12,rinv12);
2165 /* EWALD ELECTROSTATICS */
2167 /* Analytical PME correction */
2168 zeta2 = _mm256_mul_ps(beta2,rsq12);
2169 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
2170 pmecorrF = avx256_pmecorrF_f(zeta2);
2171 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2172 felec = _mm256_mul_ps(qq12,felec);
2173 pmecorrV = avx256_pmecorrV_f(zeta2);
2174 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
2175 velec = _mm256_sub_ps(rinv12,pmecorrV);
2176 velec = _mm256_mul_ps(qq12,velec);
2178 d = _mm256_sub_ps(r12,rswitch);
2179 d = _mm256_max_ps(d,_mm256_setzero_ps());
2180 d2 = _mm256_mul_ps(d,d);
2181 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2183 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2185 /* Evaluate switch function */
2186 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2187 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
2188 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2192 fscal = _mm256_and_ps(fscal,cutoff_mask);
2194 /* Calculate temporary vectorial force */
2195 tx = _mm256_mul_ps(fscal,dx12);
2196 ty = _mm256_mul_ps(fscal,dy12);
2197 tz = _mm256_mul_ps(fscal,dz12);
2199 /* Update vectorial force */
2200 fix1 = _mm256_add_ps(fix1,tx);
2201 fiy1 = _mm256_add_ps(fiy1,ty);
2202 fiz1 = _mm256_add_ps(fiz1,tz);
2204 fjx2 = _mm256_add_ps(fjx2,tx);
2205 fjy2 = _mm256_add_ps(fjy2,ty);
2206 fjz2 = _mm256_add_ps(fjz2,tz);
2210 /**************************
2211 * CALCULATE INTERACTIONS *
2212 **************************/
2214 if (gmx_mm256_any_lt(rsq13,rcutoff2))
2217 r13 = _mm256_mul_ps(rsq13,rinv13);
2219 /* EWALD ELECTROSTATICS */
2221 /* Analytical PME correction */
2222 zeta2 = _mm256_mul_ps(beta2,rsq13);
2223 rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
2224 pmecorrF = avx256_pmecorrF_f(zeta2);
2225 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2226 felec = _mm256_mul_ps(qq13,felec);
2227 pmecorrV = avx256_pmecorrV_f(zeta2);
2228 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
2229 velec = _mm256_sub_ps(rinv13,pmecorrV);
2230 velec = _mm256_mul_ps(qq13,velec);
2232 d = _mm256_sub_ps(r13,rswitch);
2233 d = _mm256_max_ps(d,_mm256_setzero_ps());
2234 d2 = _mm256_mul_ps(d,d);
2235 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2237 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2239 /* Evaluate switch function */
2240 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2241 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv13,_mm256_mul_ps(velec,dsw)) );
2242 cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
2246 fscal = _mm256_and_ps(fscal,cutoff_mask);
2248 /* Calculate temporary vectorial force */
2249 tx = _mm256_mul_ps(fscal,dx13);
2250 ty = _mm256_mul_ps(fscal,dy13);
2251 tz = _mm256_mul_ps(fscal,dz13);
2253 /* Update vectorial force */
2254 fix1 = _mm256_add_ps(fix1,tx);
2255 fiy1 = _mm256_add_ps(fiy1,ty);
2256 fiz1 = _mm256_add_ps(fiz1,tz);
2258 fjx3 = _mm256_add_ps(fjx3,tx);
2259 fjy3 = _mm256_add_ps(fjy3,ty);
2260 fjz3 = _mm256_add_ps(fjz3,tz);
2264 /**************************
2265 * CALCULATE INTERACTIONS *
2266 **************************/
2268 if (gmx_mm256_any_lt(rsq21,rcutoff2))
2271 r21 = _mm256_mul_ps(rsq21,rinv21);
2273 /* EWALD ELECTROSTATICS */
2275 /* Analytical PME correction */
2276 zeta2 = _mm256_mul_ps(beta2,rsq21);
2277 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
2278 pmecorrF = avx256_pmecorrF_f(zeta2);
2279 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2280 felec = _mm256_mul_ps(qq21,felec);
2281 pmecorrV = avx256_pmecorrV_f(zeta2);
2282 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
2283 velec = _mm256_sub_ps(rinv21,pmecorrV);
2284 velec = _mm256_mul_ps(qq21,velec);
2286 d = _mm256_sub_ps(r21,rswitch);
2287 d = _mm256_max_ps(d,_mm256_setzero_ps());
2288 d2 = _mm256_mul_ps(d,d);
2289 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2291 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2293 /* Evaluate switch function */
2294 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2295 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
2296 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2300 fscal = _mm256_and_ps(fscal,cutoff_mask);
2302 /* Calculate temporary vectorial force */
2303 tx = _mm256_mul_ps(fscal,dx21);
2304 ty = _mm256_mul_ps(fscal,dy21);
2305 tz = _mm256_mul_ps(fscal,dz21);
2307 /* Update vectorial force */
2308 fix2 = _mm256_add_ps(fix2,tx);
2309 fiy2 = _mm256_add_ps(fiy2,ty);
2310 fiz2 = _mm256_add_ps(fiz2,tz);
2312 fjx1 = _mm256_add_ps(fjx1,tx);
2313 fjy1 = _mm256_add_ps(fjy1,ty);
2314 fjz1 = _mm256_add_ps(fjz1,tz);
2318 /**************************
2319 * CALCULATE INTERACTIONS *
2320 **************************/
2322 if (gmx_mm256_any_lt(rsq22,rcutoff2))
2325 r22 = _mm256_mul_ps(rsq22,rinv22);
2327 /* EWALD ELECTROSTATICS */
2329 /* Analytical PME correction */
2330 zeta2 = _mm256_mul_ps(beta2,rsq22);
2331 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
2332 pmecorrF = avx256_pmecorrF_f(zeta2);
2333 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2334 felec = _mm256_mul_ps(qq22,felec);
2335 pmecorrV = avx256_pmecorrV_f(zeta2);
2336 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
2337 velec = _mm256_sub_ps(rinv22,pmecorrV);
2338 velec = _mm256_mul_ps(qq22,velec);
2340 d = _mm256_sub_ps(r22,rswitch);
2341 d = _mm256_max_ps(d,_mm256_setzero_ps());
2342 d2 = _mm256_mul_ps(d,d);
2343 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2345 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2347 /* Evaluate switch function */
2348 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2349 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
2350 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2354 fscal = _mm256_and_ps(fscal,cutoff_mask);
2356 /* Calculate temporary vectorial force */
2357 tx = _mm256_mul_ps(fscal,dx22);
2358 ty = _mm256_mul_ps(fscal,dy22);
2359 tz = _mm256_mul_ps(fscal,dz22);
2361 /* Update vectorial force */
2362 fix2 = _mm256_add_ps(fix2,tx);
2363 fiy2 = _mm256_add_ps(fiy2,ty);
2364 fiz2 = _mm256_add_ps(fiz2,tz);
2366 fjx2 = _mm256_add_ps(fjx2,tx);
2367 fjy2 = _mm256_add_ps(fjy2,ty);
2368 fjz2 = _mm256_add_ps(fjz2,tz);
2372 /**************************
2373 * CALCULATE INTERACTIONS *
2374 **************************/
2376 if (gmx_mm256_any_lt(rsq23,rcutoff2))
2379 r23 = _mm256_mul_ps(rsq23,rinv23);
2381 /* EWALD ELECTROSTATICS */
2383 /* Analytical PME correction */
2384 zeta2 = _mm256_mul_ps(beta2,rsq23);
2385 rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
2386 pmecorrF = avx256_pmecorrF_f(zeta2);
2387 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2388 felec = _mm256_mul_ps(qq23,felec);
2389 pmecorrV = avx256_pmecorrV_f(zeta2);
2390 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
2391 velec = _mm256_sub_ps(rinv23,pmecorrV);
2392 velec = _mm256_mul_ps(qq23,velec);
2394 d = _mm256_sub_ps(r23,rswitch);
2395 d = _mm256_max_ps(d,_mm256_setzero_ps());
2396 d2 = _mm256_mul_ps(d,d);
2397 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2399 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2401 /* Evaluate switch function */
2402 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2403 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv23,_mm256_mul_ps(velec,dsw)) );
2404 cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
2408 fscal = _mm256_and_ps(fscal,cutoff_mask);
2410 /* Calculate temporary vectorial force */
2411 tx = _mm256_mul_ps(fscal,dx23);
2412 ty = _mm256_mul_ps(fscal,dy23);
2413 tz = _mm256_mul_ps(fscal,dz23);
2415 /* Update vectorial force */
2416 fix2 = _mm256_add_ps(fix2,tx);
2417 fiy2 = _mm256_add_ps(fiy2,ty);
2418 fiz2 = _mm256_add_ps(fiz2,tz);
2420 fjx3 = _mm256_add_ps(fjx3,tx);
2421 fjy3 = _mm256_add_ps(fjy3,ty);
2422 fjz3 = _mm256_add_ps(fjz3,tz);
2426 /**************************
2427 * CALCULATE INTERACTIONS *
2428 **************************/
2430 if (gmx_mm256_any_lt(rsq31,rcutoff2))
2433 r31 = _mm256_mul_ps(rsq31,rinv31);
2435 /* EWALD ELECTROSTATICS */
2437 /* Analytical PME correction */
2438 zeta2 = _mm256_mul_ps(beta2,rsq31);
2439 rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
2440 pmecorrF = avx256_pmecorrF_f(zeta2);
2441 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2442 felec = _mm256_mul_ps(qq31,felec);
2443 pmecorrV = avx256_pmecorrV_f(zeta2);
2444 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
2445 velec = _mm256_sub_ps(rinv31,pmecorrV);
2446 velec = _mm256_mul_ps(qq31,velec);
2448 d = _mm256_sub_ps(r31,rswitch);
2449 d = _mm256_max_ps(d,_mm256_setzero_ps());
2450 d2 = _mm256_mul_ps(d,d);
2451 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2453 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2455 /* Evaluate switch function */
2456 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2457 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv31,_mm256_mul_ps(velec,dsw)) );
2458 cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
2462 fscal = _mm256_and_ps(fscal,cutoff_mask);
2464 /* Calculate temporary vectorial force */
2465 tx = _mm256_mul_ps(fscal,dx31);
2466 ty = _mm256_mul_ps(fscal,dy31);
2467 tz = _mm256_mul_ps(fscal,dz31);
2469 /* Update vectorial force */
2470 fix3 = _mm256_add_ps(fix3,tx);
2471 fiy3 = _mm256_add_ps(fiy3,ty);
2472 fiz3 = _mm256_add_ps(fiz3,tz);
2474 fjx1 = _mm256_add_ps(fjx1,tx);
2475 fjy1 = _mm256_add_ps(fjy1,ty);
2476 fjz1 = _mm256_add_ps(fjz1,tz);
2480 /**************************
2481 * CALCULATE INTERACTIONS *
2482 **************************/
2484 if (gmx_mm256_any_lt(rsq32,rcutoff2))
2487 r32 = _mm256_mul_ps(rsq32,rinv32);
2489 /* EWALD ELECTROSTATICS */
2491 /* Analytical PME correction */
2492 zeta2 = _mm256_mul_ps(beta2,rsq32);
2493 rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
2494 pmecorrF = avx256_pmecorrF_f(zeta2);
2495 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2496 felec = _mm256_mul_ps(qq32,felec);
2497 pmecorrV = avx256_pmecorrV_f(zeta2);
2498 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
2499 velec = _mm256_sub_ps(rinv32,pmecorrV);
2500 velec = _mm256_mul_ps(qq32,velec);
2502 d = _mm256_sub_ps(r32,rswitch);
2503 d = _mm256_max_ps(d,_mm256_setzero_ps());
2504 d2 = _mm256_mul_ps(d,d);
2505 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2507 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2509 /* Evaluate switch function */
2510 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2511 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv32,_mm256_mul_ps(velec,dsw)) );
2512 cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
2516 fscal = _mm256_and_ps(fscal,cutoff_mask);
2518 /* Calculate temporary vectorial force */
2519 tx = _mm256_mul_ps(fscal,dx32);
2520 ty = _mm256_mul_ps(fscal,dy32);
2521 tz = _mm256_mul_ps(fscal,dz32);
2523 /* Update vectorial force */
2524 fix3 = _mm256_add_ps(fix3,tx);
2525 fiy3 = _mm256_add_ps(fiy3,ty);
2526 fiz3 = _mm256_add_ps(fiz3,tz);
2528 fjx2 = _mm256_add_ps(fjx2,tx);
2529 fjy2 = _mm256_add_ps(fjy2,ty);
2530 fjz2 = _mm256_add_ps(fjz2,tz);
2534 /**************************
2535 * CALCULATE INTERACTIONS *
2536 **************************/
2538 if (gmx_mm256_any_lt(rsq33,rcutoff2))
2541 r33 = _mm256_mul_ps(rsq33,rinv33);
2543 /* EWALD ELECTROSTATICS */
2545 /* Analytical PME correction */
2546 zeta2 = _mm256_mul_ps(beta2,rsq33);
2547 rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
2548 pmecorrF = avx256_pmecorrF_f(zeta2);
2549 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2550 felec = _mm256_mul_ps(qq33,felec);
2551 pmecorrV = avx256_pmecorrV_f(zeta2);
2552 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
2553 velec = _mm256_sub_ps(rinv33,pmecorrV);
2554 velec = _mm256_mul_ps(qq33,velec);
2556 d = _mm256_sub_ps(r33,rswitch);
2557 d = _mm256_max_ps(d,_mm256_setzero_ps());
2558 d2 = _mm256_mul_ps(d,d);
2559 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2561 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2563 /* Evaluate switch function */
2564 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2565 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv33,_mm256_mul_ps(velec,dsw)) );
2566 cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
2570 fscal = _mm256_and_ps(fscal,cutoff_mask);
2572 /* Calculate temporary vectorial force */
2573 tx = _mm256_mul_ps(fscal,dx33);
2574 ty = _mm256_mul_ps(fscal,dy33);
2575 tz = _mm256_mul_ps(fscal,dz33);
2577 /* Update vectorial force */
2578 fix3 = _mm256_add_ps(fix3,tx);
2579 fiy3 = _mm256_add_ps(fiy3,ty);
2580 fiz3 = _mm256_add_ps(fiz3,tz);
2582 fjx3 = _mm256_add_ps(fjx3,tx);
2583 fjy3 = _mm256_add_ps(fjy3,ty);
2584 fjz3 = _mm256_add_ps(fjz3,tz);
2588 fjptrA = f+j_coord_offsetA;
2589 fjptrB = f+j_coord_offsetB;
2590 fjptrC = f+j_coord_offsetC;
2591 fjptrD = f+j_coord_offsetD;
2592 fjptrE = f+j_coord_offsetE;
2593 fjptrF = f+j_coord_offsetF;
2594 fjptrG = f+j_coord_offsetG;
2595 fjptrH = f+j_coord_offsetH;
2597 gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2598 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2599 fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2601 /* Inner loop uses 1004 flops */
2604 if(jidx<j_index_end)
2607 /* Get j neighbor index, and coordinate index */
2608 jnrlistA = jjnr[jidx];
2609 jnrlistB = jjnr[jidx+1];
2610 jnrlistC = jjnr[jidx+2];
2611 jnrlistD = jjnr[jidx+3];
2612 jnrlistE = jjnr[jidx+4];
2613 jnrlistF = jjnr[jidx+5];
2614 jnrlistG = jjnr[jidx+6];
2615 jnrlistH = jjnr[jidx+7];
2616 /* Sign of each element will be negative for non-real atoms.
2617 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2618 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
2620 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
2621 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
2623 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
2624 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
2625 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
2626 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
2627 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
2628 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
2629 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
2630 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
2631 j_coord_offsetA = DIM*jnrA;
2632 j_coord_offsetB = DIM*jnrB;
2633 j_coord_offsetC = DIM*jnrC;
2634 j_coord_offsetD = DIM*jnrD;
2635 j_coord_offsetE = DIM*jnrE;
2636 j_coord_offsetF = DIM*jnrF;
2637 j_coord_offsetG = DIM*jnrG;
2638 j_coord_offsetH = DIM*jnrH;
2640 /* load j atom coordinates */
2641 gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
2642 x+j_coord_offsetC,x+j_coord_offsetD,
2643 x+j_coord_offsetE,x+j_coord_offsetF,
2644 x+j_coord_offsetG,x+j_coord_offsetH,
2645 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
2646 &jy2,&jz2,&jx3,&jy3,&jz3);
2648 /* Calculate displacement vector */
2649 dx00 = _mm256_sub_ps(ix0,jx0);
2650 dy00 = _mm256_sub_ps(iy0,jy0);
2651 dz00 = _mm256_sub_ps(iz0,jz0);
2652 dx11 = _mm256_sub_ps(ix1,jx1);
2653 dy11 = _mm256_sub_ps(iy1,jy1);
2654 dz11 = _mm256_sub_ps(iz1,jz1);
2655 dx12 = _mm256_sub_ps(ix1,jx2);
2656 dy12 = _mm256_sub_ps(iy1,jy2);
2657 dz12 = _mm256_sub_ps(iz1,jz2);
2658 dx13 = _mm256_sub_ps(ix1,jx3);
2659 dy13 = _mm256_sub_ps(iy1,jy3);
2660 dz13 = _mm256_sub_ps(iz1,jz3);
2661 dx21 = _mm256_sub_ps(ix2,jx1);
2662 dy21 = _mm256_sub_ps(iy2,jy1);
2663 dz21 = _mm256_sub_ps(iz2,jz1);
2664 dx22 = _mm256_sub_ps(ix2,jx2);
2665 dy22 = _mm256_sub_ps(iy2,jy2);
2666 dz22 = _mm256_sub_ps(iz2,jz2);
2667 dx23 = _mm256_sub_ps(ix2,jx3);
2668 dy23 = _mm256_sub_ps(iy2,jy3);
2669 dz23 = _mm256_sub_ps(iz2,jz3);
2670 dx31 = _mm256_sub_ps(ix3,jx1);
2671 dy31 = _mm256_sub_ps(iy3,jy1);
2672 dz31 = _mm256_sub_ps(iz3,jz1);
2673 dx32 = _mm256_sub_ps(ix3,jx2);
2674 dy32 = _mm256_sub_ps(iy3,jy2);
2675 dz32 = _mm256_sub_ps(iz3,jz2);
2676 dx33 = _mm256_sub_ps(ix3,jx3);
2677 dy33 = _mm256_sub_ps(iy3,jy3);
2678 dz33 = _mm256_sub_ps(iz3,jz3);
2680 /* Calculate squared distance and things based on it */
2681 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
2682 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2683 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2684 rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
2685 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2686 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2687 rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
2688 rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
2689 rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
2690 rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
2692 rinv00 = avx256_invsqrt_f(rsq00);
2693 rinv11 = avx256_invsqrt_f(rsq11);
2694 rinv12 = avx256_invsqrt_f(rsq12);
2695 rinv13 = avx256_invsqrt_f(rsq13);
2696 rinv21 = avx256_invsqrt_f(rsq21);
2697 rinv22 = avx256_invsqrt_f(rsq22);
2698 rinv23 = avx256_invsqrt_f(rsq23);
2699 rinv31 = avx256_invsqrt_f(rsq31);
2700 rinv32 = avx256_invsqrt_f(rsq32);
2701 rinv33 = avx256_invsqrt_f(rsq33);
2703 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
2704 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
2705 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
2706 rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
2707 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
2708 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
2709 rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
2710 rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
2711 rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
2712 rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
2714 fjx0 = _mm256_setzero_ps();
2715 fjy0 = _mm256_setzero_ps();
2716 fjz0 = _mm256_setzero_ps();
2717 fjx1 = _mm256_setzero_ps();
2718 fjy1 = _mm256_setzero_ps();
2719 fjz1 = _mm256_setzero_ps();
2720 fjx2 = _mm256_setzero_ps();
2721 fjy2 = _mm256_setzero_ps();
2722 fjz2 = _mm256_setzero_ps();
2723 fjx3 = _mm256_setzero_ps();
2724 fjy3 = _mm256_setzero_ps();
2725 fjz3 = _mm256_setzero_ps();
2727 /**************************
2728 * CALCULATE INTERACTIONS *
2729 **************************/
2731 if (gmx_mm256_any_lt(rsq00,rcutoff2))
2734 r00 = _mm256_mul_ps(rsq00,rinv00);
2735 r00 = _mm256_andnot_ps(dummy_mask,r00);
2737 /* LENNARD-JONES DISPERSION/REPULSION */
2739 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
2740 vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
2741 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
2742 vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
2743 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
2745 d = _mm256_sub_ps(r00,rswitch);
2746 d = _mm256_max_ps(d,_mm256_setzero_ps());
2747 d2 = _mm256_mul_ps(d,d);
2748 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2750 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2752 /* Evaluate switch function */
2753 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2754 fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
2755 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
2759 fscal = _mm256_and_ps(fscal,cutoff_mask);
2761 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2763 /* Calculate temporary vectorial force */
2764 tx = _mm256_mul_ps(fscal,dx00);
2765 ty = _mm256_mul_ps(fscal,dy00);
2766 tz = _mm256_mul_ps(fscal,dz00);
2768 /* Update vectorial force */
2769 fix0 = _mm256_add_ps(fix0,tx);
2770 fiy0 = _mm256_add_ps(fiy0,ty);
2771 fiz0 = _mm256_add_ps(fiz0,tz);
2773 fjx0 = _mm256_add_ps(fjx0,tx);
2774 fjy0 = _mm256_add_ps(fjy0,ty);
2775 fjz0 = _mm256_add_ps(fjz0,tz);
2779 /**************************
2780 * CALCULATE INTERACTIONS *
2781 **************************/
2783 if (gmx_mm256_any_lt(rsq11,rcutoff2))
2786 r11 = _mm256_mul_ps(rsq11,rinv11);
2787 r11 = _mm256_andnot_ps(dummy_mask,r11);
2789 /* EWALD ELECTROSTATICS */
2791 /* Analytical PME correction */
2792 zeta2 = _mm256_mul_ps(beta2,rsq11);
2793 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
2794 pmecorrF = avx256_pmecorrF_f(zeta2);
2795 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2796 felec = _mm256_mul_ps(qq11,felec);
2797 pmecorrV = avx256_pmecorrV_f(zeta2);
2798 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
2799 velec = _mm256_sub_ps(rinv11,pmecorrV);
2800 velec = _mm256_mul_ps(qq11,velec);
2802 d = _mm256_sub_ps(r11,rswitch);
2803 d = _mm256_max_ps(d,_mm256_setzero_ps());
2804 d2 = _mm256_mul_ps(d,d);
2805 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2807 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2809 /* Evaluate switch function */
2810 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2811 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
2812 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2816 fscal = _mm256_and_ps(fscal,cutoff_mask);
2818 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2820 /* Calculate temporary vectorial force */
2821 tx = _mm256_mul_ps(fscal,dx11);
2822 ty = _mm256_mul_ps(fscal,dy11);
2823 tz = _mm256_mul_ps(fscal,dz11);
2825 /* Update vectorial force */
2826 fix1 = _mm256_add_ps(fix1,tx);
2827 fiy1 = _mm256_add_ps(fiy1,ty);
2828 fiz1 = _mm256_add_ps(fiz1,tz);
2830 fjx1 = _mm256_add_ps(fjx1,tx);
2831 fjy1 = _mm256_add_ps(fjy1,ty);
2832 fjz1 = _mm256_add_ps(fjz1,tz);
2836 /**************************
2837 * CALCULATE INTERACTIONS *
2838 **************************/
2840 if (gmx_mm256_any_lt(rsq12,rcutoff2))
2843 r12 = _mm256_mul_ps(rsq12,rinv12);
2844 r12 = _mm256_andnot_ps(dummy_mask,r12);
2846 /* EWALD ELECTROSTATICS */
2848 /* Analytical PME correction */
2849 zeta2 = _mm256_mul_ps(beta2,rsq12);
2850 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
2851 pmecorrF = avx256_pmecorrF_f(zeta2);
2852 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2853 felec = _mm256_mul_ps(qq12,felec);
2854 pmecorrV = avx256_pmecorrV_f(zeta2);
2855 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
2856 velec = _mm256_sub_ps(rinv12,pmecorrV);
2857 velec = _mm256_mul_ps(qq12,velec);
2859 d = _mm256_sub_ps(r12,rswitch);
2860 d = _mm256_max_ps(d,_mm256_setzero_ps());
2861 d2 = _mm256_mul_ps(d,d);
2862 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2864 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2866 /* Evaluate switch function */
2867 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2868 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
2869 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2873 fscal = _mm256_and_ps(fscal,cutoff_mask);
2875 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2877 /* Calculate temporary vectorial force */
2878 tx = _mm256_mul_ps(fscal,dx12);
2879 ty = _mm256_mul_ps(fscal,dy12);
2880 tz = _mm256_mul_ps(fscal,dz12);
2882 /* Update vectorial force */
2883 fix1 = _mm256_add_ps(fix1,tx);
2884 fiy1 = _mm256_add_ps(fiy1,ty);
2885 fiz1 = _mm256_add_ps(fiz1,tz);
2887 fjx2 = _mm256_add_ps(fjx2,tx);
2888 fjy2 = _mm256_add_ps(fjy2,ty);
2889 fjz2 = _mm256_add_ps(fjz2,tz);
2893 /**************************
2894 * CALCULATE INTERACTIONS *
2895 **************************/
2897 if (gmx_mm256_any_lt(rsq13,rcutoff2))
2900 r13 = _mm256_mul_ps(rsq13,rinv13);
2901 r13 = _mm256_andnot_ps(dummy_mask,r13);
2903 /* EWALD ELECTROSTATICS */
2905 /* Analytical PME correction */
2906 zeta2 = _mm256_mul_ps(beta2,rsq13);
2907 rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
2908 pmecorrF = avx256_pmecorrF_f(zeta2);
2909 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2910 felec = _mm256_mul_ps(qq13,felec);
2911 pmecorrV = avx256_pmecorrV_f(zeta2);
2912 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
2913 velec = _mm256_sub_ps(rinv13,pmecorrV);
2914 velec = _mm256_mul_ps(qq13,velec);
2916 d = _mm256_sub_ps(r13,rswitch);
2917 d = _mm256_max_ps(d,_mm256_setzero_ps());
2918 d2 = _mm256_mul_ps(d,d);
2919 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2921 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2923 /* Evaluate switch function */
2924 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2925 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv13,_mm256_mul_ps(velec,dsw)) );
2926 cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
2930 fscal = _mm256_and_ps(fscal,cutoff_mask);
2932 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2934 /* Calculate temporary vectorial force */
2935 tx = _mm256_mul_ps(fscal,dx13);
2936 ty = _mm256_mul_ps(fscal,dy13);
2937 tz = _mm256_mul_ps(fscal,dz13);
2939 /* Update vectorial force */
2940 fix1 = _mm256_add_ps(fix1,tx);
2941 fiy1 = _mm256_add_ps(fiy1,ty);
2942 fiz1 = _mm256_add_ps(fiz1,tz);
2944 fjx3 = _mm256_add_ps(fjx3,tx);
2945 fjy3 = _mm256_add_ps(fjy3,ty);
2946 fjz3 = _mm256_add_ps(fjz3,tz);
2950 /**************************
2951 * CALCULATE INTERACTIONS *
2952 **************************/
2954 if (gmx_mm256_any_lt(rsq21,rcutoff2))
2957 r21 = _mm256_mul_ps(rsq21,rinv21);
2958 r21 = _mm256_andnot_ps(dummy_mask,r21);
2960 /* EWALD ELECTROSTATICS */
2962 /* Analytical PME correction */
2963 zeta2 = _mm256_mul_ps(beta2,rsq21);
2964 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
2965 pmecorrF = avx256_pmecorrF_f(zeta2);
2966 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2967 felec = _mm256_mul_ps(qq21,felec);
2968 pmecorrV = avx256_pmecorrV_f(zeta2);
2969 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
2970 velec = _mm256_sub_ps(rinv21,pmecorrV);
2971 velec = _mm256_mul_ps(qq21,velec);
2973 d = _mm256_sub_ps(r21,rswitch);
2974 d = _mm256_max_ps(d,_mm256_setzero_ps());
2975 d2 = _mm256_mul_ps(d,d);
2976 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2978 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2980 /* Evaluate switch function */
2981 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2982 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
2983 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2987 fscal = _mm256_and_ps(fscal,cutoff_mask);
2989 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2991 /* Calculate temporary vectorial force */
2992 tx = _mm256_mul_ps(fscal,dx21);
2993 ty = _mm256_mul_ps(fscal,dy21);
2994 tz = _mm256_mul_ps(fscal,dz21);
2996 /* Update vectorial force */
2997 fix2 = _mm256_add_ps(fix2,tx);
2998 fiy2 = _mm256_add_ps(fiy2,ty);
2999 fiz2 = _mm256_add_ps(fiz2,tz);
3001 fjx1 = _mm256_add_ps(fjx1,tx);
3002 fjy1 = _mm256_add_ps(fjy1,ty);
3003 fjz1 = _mm256_add_ps(fjz1,tz);
3007 /**************************
3008 * CALCULATE INTERACTIONS *
3009 **************************/
3011 if (gmx_mm256_any_lt(rsq22,rcutoff2))
3014 r22 = _mm256_mul_ps(rsq22,rinv22);
3015 r22 = _mm256_andnot_ps(dummy_mask,r22);
3017 /* EWALD ELECTROSTATICS */
3019 /* Analytical PME correction */
3020 zeta2 = _mm256_mul_ps(beta2,rsq22);
3021 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
3022 pmecorrF = avx256_pmecorrF_f(zeta2);
3023 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
3024 felec = _mm256_mul_ps(qq22,felec);
3025 pmecorrV = avx256_pmecorrV_f(zeta2);
3026 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
3027 velec = _mm256_sub_ps(rinv22,pmecorrV);
3028 velec = _mm256_mul_ps(qq22,velec);
3030 d = _mm256_sub_ps(r22,rswitch);
3031 d = _mm256_max_ps(d,_mm256_setzero_ps());
3032 d2 = _mm256_mul_ps(d,d);
3033 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
3035 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
3037 /* Evaluate switch function */
3038 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3039 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
3040 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
3044 fscal = _mm256_and_ps(fscal,cutoff_mask);
3046 fscal = _mm256_andnot_ps(dummy_mask,fscal);
3048 /* Calculate temporary vectorial force */
3049 tx = _mm256_mul_ps(fscal,dx22);
3050 ty = _mm256_mul_ps(fscal,dy22);
3051 tz = _mm256_mul_ps(fscal,dz22);
3053 /* Update vectorial force */
3054 fix2 = _mm256_add_ps(fix2,tx);
3055 fiy2 = _mm256_add_ps(fiy2,ty);
3056 fiz2 = _mm256_add_ps(fiz2,tz);
3058 fjx2 = _mm256_add_ps(fjx2,tx);
3059 fjy2 = _mm256_add_ps(fjy2,ty);
3060 fjz2 = _mm256_add_ps(fjz2,tz);
3064 /**************************
3065 * CALCULATE INTERACTIONS *
3066 **************************/
3068 if (gmx_mm256_any_lt(rsq23,rcutoff2))
3071 r23 = _mm256_mul_ps(rsq23,rinv23);
3072 r23 = _mm256_andnot_ps(dummy_mask,r23);
3074 /* EWALD ELECTROSTATICS */
3076 /* Analytical PME correction */
3077 zeta2 = _mm256_mul_ps(beta2,rsq23);
3078 rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
3079 pmecorrF = avx256_pmecorrF_f(zeta2);
3080 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
3081 felec = _mm256_mul_ps(qq23,felec);
3082 pmecorrV = avx256_pmecorrV_f(zeta2);
3083 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
3084 velec = _mm256_sub_ps(rinv23,pmecorrV);
3085 velec = _mm256_mul_ps(qq23,velec);
3087 d = _mm256_sub_ps(r23,rswitch);
3088 d = _mm256_max_ps(d,_mm256_setzero_ps());
3089 d2 = _mm256_mul_ps(d,d);
3090 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
3092 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
3094 /* Evaluate switch function */
3095 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3096 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv23,_mm256_mul_ps(velec,dsw)) );
3097 cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
3101 fscal = _mm256_and_ps(fscal,cutoff_mask);
3103 fscal = _mm256_andnot_ps(dummy_mask,fscal);
3105 /* Calculate temporary vectorial force */
3106 tx = _mm256_mul_ps(fscal,dx23);
3107 ty = _mm256_mul_ps(fscal,dy23);
3108 tz = _mm256_mul_ps(fscal,dz23);
3110 /* Update vectorial force */
3111 fix2 = _mm256_add_ps(fix2,tx);
3112 fiy2 = _mm256_add_ps(fiy2,ty);
3113 fiz2 = _mm256_add_ps(fiz2,tz);
3115 fjx3 = _mm256_add_ps(fjx3,tx);
3116 fjy3 = _mm256_add_ps(fjy3,ty);
3117 fjz3 = _mm256_add_ps(fjz3,tz);
3121 /**************************
3122 * CALCULATE INTERACTIONS *
3123 **************************/
3125 if (gmx_mm256_any_lt(rsq31,rcutoff2))
3128 r31 = _mm256_mul_ps(rsq31,rinv31);
3129 r31 = _mm256_andnot_ps(dummy_mask,r31);
3131 /* EWALD ELECTROSTATICS */
3133 /* Analytical PME correction */
3134 zeta2 = _mm256_mul_ps(beta2,rsq31);
3135 rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
3136 pmecorrF = avx256_pmecorrF_f(zeta2);
3137 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
3138 felec = _mm256_mul_ps(qq31,felec);
3139 pmecorrV = avx256_pmecorrV_f(zeta2);
3140 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
3141 velec = _mm256_sub_ps(rinv31,pmecorrV);
3142 velec = _mm256_mul_ps(qq31,velec);
3144 d = _mm256_sub_ps(r31,rswitch);
3145 d = _mm256_max_ps(d,_mm256_setzero_ps());
3146 d2 = _mm256_mul_ps(d,d);
3147 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
3149 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
3151 /* Evaluate switch function */
3152 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3153 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv31,_mm256_mul_ps(velec,dsw)) );
3154 cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
3158 fscal = _mm256_and_ps(fscal,cutoff_mask);
3160 fscal = _mm256_andnot_ps(dummy_mask,fscal);
3162 /* Calculate temporary vectorial force */
3163 tx = _mm256_mul_ps(fscal,dx31);
3164 ty = _mm256_mul_ps(fscal,dy31);
3165 tz = _mm256_mul_ps(fscal,dz31);
3167 /* Update vectorial force */
3168 fix3 = _mm256_add_ps(fix3,tx);
3169 fiy3 = _mm256_add_ps(fiy3,ty);
3170 fiz3 = _mm256_add_ps(fiz3,tz);
3172 fjx1 = _mm256_add_ps(fjx1,tx);
3173 fjy1 = _mm256_add_ps(fjy1,ty);
3174 fjz1 = _mm256_add_ps(fjz1,tz);
3178 /**************************
3179 * CALCULATE INTERACTIONS *
3180 **************************/
3182 if (gmx_mm256_any_lt(rsq32,rcutoff2))
3185 r32 = _mm256_mul_ps(rsq32,rinv32);
3186 r32 = _mm256_andnot_ps(dummy_mask,r32);
3188 /* EWALD ELECTROSTATICS */
3190 /* Analytical PME correction */
3191 zeta2 = _mm256_mul_ps(beta2,rsq32);
3192 rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
3193 pmecorrF = avx256_pmecorrF_f(zeta2);
3194 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
3195 felec = _mm256_mul_ps(qq32,felec);
3196 pmecorrV = avx256_pmecorrV_f(zeta2);
3197 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
3198 velec = _mm256_sub_ps(rinv32,pmecorrV);
3199 velec = _mm256_mul_ps(qq32,velec);
3201 d = _mm256_sub_ps(r32,rswitch);
3202 d = _mm256_max_ps(d,_mm256_setzero_ps());
3203 d2 = _mm256_mul_ps(d,d);
3204 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
3206 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
3208 /* Evaluate switch function */
3209 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3210 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv32,_mm256_mul_ps(velec,dsw)) );
3211 cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
3215 fscal = _mm256_and_ps(fscal,cutoff_mask);
3217 fscal = _mm256_andnot_ps(dummy_mask,fscal);
3219 /* Calculate temporary vectorial force */
3220 tx = _mm256_mul_ps(fscal,dx32);
3221 ty = _mm256_mul_ps(fscal,dy32);
3222 tz = _mm256_mul_ps(fscal,dz32);
3224 /* Update vectorial force */
3225 fix3 = _mm256_add_ps(fix3,tx);
3226 fiy3 = _mm256_add_ps(fiy3,ty);
3227 fiz3 = _mm256_add_ps(fiz3,tz);
3229 fjx2 = _mm256_add_ps(fjx2,tx);
3230 fjy2 = _mm256_add_ps(fjy2,ty);
3231 fjz2 = _mm256_add_ps(fjz2,tz);
3235 /**************************
3236 * CALCULATE INTERACTIONS *
3237 **************************/
3239 if (gmx_mm256_any_lt(rsq33,rcutoff2))
3242 r33 = _mm256_mul_ps(rsq33,rinv33);
3243 r33 = _mm256_andnot_ps(dummy_mask,r33);
3245 /* EWALD ELECTROSTATICS */
3247 /* Analytical PME correction */
3248 zeta2 = _mm256_mul_ps(beta2,rsq33);
3249 rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
3250 pmecorrF = avx256_pmecorrF_f(zeta2);
3251 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
3252 felec = _mm256_mul_ps(qq33,felec);
3253 pmecorrV = avx256_pmecorrV_f(zeta2);
3254 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
3255 velec = _mm256_sub_ps(rinv33,pmecorrV);
3256 velec = _mm256_mul_ps(qq33,velec);
3258 d = _mm256_sub_ps(r33,rswitch);
3259 d = _mm256_max_ps(d,_mm256_setzero_ps());
3260 d2 = _mm256_mul_ps(d,d);
3261 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
3263 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
3265 /* Evaluate switch function */
3266 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3267 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv33,_mm256_mul_ps(velec,dsw)) );
3268 cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
3272 fscal = _mm256_and_ps(fscal,cutoff_mask);
3274 fscal = _mm256_andnot_ps(dummy_mask,fscal);
3276 /* Calculate temporary vectorial force */
3277 tx = _mm256_mul_ps(fscal,dx33);
3278 ty = _mm256_mul_ps(fscal,dy33);
3279 tz = _mm256_mul_ps(fscal,dz33);
3281 /* Update vectorial force */
3282 fix3 = _mm256_add_ps(fix3,tx);
3283 fiy3 = _mm256_add_ps(fiy3,ty);
3284 fiz3 = _mm256_add_ps(fiz3,tz);
3286 fjx3 = _mm256_add_ps(fjx3,tx);
3287 fjy3 = _mm256_add_ps(fjy3,ty);
3288 fjz3 = _mm256_add_ps(fjz3,tz);
3292 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
3293 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
3294 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
3295 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
3296 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
3297 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
3298 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
3299 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
3301 gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
3302 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
3303 fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
3305 /* Inner loop uses 1014 flops */
3308 /* End of innermost loop */
3310 gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
3311 f+i_coord_offset,fshift+i_shift_offset);
3313 /* Increment number of inner iterations */
3314 inneriter += j_index_end - j_index_start;
3316 /* Outer loop uses 24 flops */
3319 /* Increment number of outer iterations */
3322 /* Update outer/inner flops */
3324 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*1014);