2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_single kernel generator.
44 #include "../nb_kernel.h"
45 #include "types/simple.h"
49 #include "gromacs/simd/math_x86_avx_256_single.h"
50 #include "kernelutil_x86_avx_256_single.h"
53 * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_VF_avx_256_single
54 * Electrostatics interaction: Ewald
55 * VdW interaction: LJEwald
56 * Geometry: Water4-Water4
57 * Calculate force/pot: PotentialAndForce
60 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_VF_avx_256_single
61 (t_nblist * gmx_restrict nlist,
62 rvec * gmx_restrict xx,
63 rvec * gmx_restrict ff,
64 t_forcerec * gmx_restrict fr,
65 t_mdatoms * gmx_restrict mdatoms,
66 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67 t_nrnb * gmx_restrict nrnb)
69 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
70 * just 0 for non-waters.
71 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
72 * jnr indices corresponding to data put in the four positions in the SIMD register.
74 int i_shift_offset,i_coord_offset,outeriter,inneriter;
75 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76 int jnrA,jnrB,jnrC,jnrD;
77 int jnrE,jnrF,jnrG,jnrH;
78 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
79 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
80 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
81 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
82 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
84 real *shiftvec,*fshift,*x,*f;
85 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
87 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
88 real * vdwioffsetptr0;
89 real * vdwgridioffsetptr0;
90 __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
91 real * vdwioffsetptr1;
92 real * vdwgridioffsetptr1;
93 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
94 real * vdwioffsetptr2;
95 real * vdwgridioffsetptr2;
96 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
97 real * vdwioffsetptr3;
98 real * vdwgridioffsetptr3;
99 __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
100 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
101 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
102 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
103 __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
104 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
105 __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
106 int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
107 __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
108 __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
109 __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
110 __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
111 __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
112 __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
113 __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
114 __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
115 __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
116 __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
117 __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
118 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
121 __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
124 __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
125 __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
137 __m256 ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
138 __m256 one_half = _mm256_set1_ps(0.5);
139 __m256 minus_one = _mm256_set1_ps(-1.0);
141 __m128i ewitab_lo,ewitab_hi;
142 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
143 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
145 __m256 dummy_mask,cutoff_mask;
146 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
147 __m256 one = _mm256_set1_ps(1.0);
148 __m256 two = _mm256_set1_ps(2.0);
154 jindex = nlist->jindex;
156 shiftidx = nlist->shift;
158 shiftvec = fr->shift_vec[0];
159 fshift = fr->fshift[0];
160 facel = _mm256_set1_ps(fr->epsfac);
161 charge = mdatoms->chargeA;
162 nvdwtype = fr->ntype;
164 vdwtype = mdatoms->typeA;
165 vdwgridparam = fr->ljpme_c6grid;
166 sh_lj_ewald = _mm256_set1_ps(fr->ic->sh_lj_ewald);
167 ewclj = _mm256_set1_ps(fr->ewaldcoeff_lj);
168 ewclj2 = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
170 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
171 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
172 beta2 = _mm256_mul_ps(beta,beta);
173 beta3 = _mm256_mul_ps(beta,beta2);
175 ewtab = fr->ic->tabq_coul_FDV0;
176 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
177 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
179 /* Setup water-specific parameters */
180 inr = nlist->iinr[0];
181 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
182 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
183 iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
184 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
185 vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
187 jq1 = _mm256_set1_ps(charge[inr+1]);
188 jq2 = _mm256_set1_ps(charge[inr+2]);
189 jq3 = _mm256_set1_ps(charge[inr+3]);
190 vdwjidx0A = 2*vdwtype[inr+0];
191 c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
192 c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
193 c6grid_00 = _mm256_set1_ps(vdwgridioffsetptr0[vdwjidx0A]);
194 qq11 = _mm256_mul_ps(iq1,jq1);
195 qq12 = _mm256_mul_ps(iq1,jq2);
196 qq13 = _mm256_mul_ps(iq1,jq3);
197 qq21 = _mm256_mul_ps(iq2,jq1);
198 qq22 = _mm256_mul_ps(iq2,jq2);
199 qq23 = _mm256_mul_ps(iq2,jq3);
200 qq31 = _mm256_mul_ps(iq3,jq1);
201 qq32 = _mm256_mul_ps(iq3,jq2);
202 qq33 = _mm256_mul_ps(iq3,jq3);
204 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
205 rcutoff_scalar = fr->rcoulomb;
206 rcutoff = _mm256_set1_ps(rcutoff_scalar);
207 rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
209 sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
210 rvdw = _mm256_set1_ps(fr->rvdw);
212 /* Avoid stupid compiler warnings */
213 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
226 for(iidx=0;iidx<4*DIM;iidx++)
231 /* Start outer loop over neighborlists */
232 for(iidx=0; iidx<nri; iidx++)
234 /* Load shift vector for this list */
235 i_shift_offset = DIM*shiftidx[iidx];
237 /* Load limits for loop over neighbors */
238 j_index_start = jindex[iidx];
239 j_index_end = jindex[iidx+1];
241 /* Get outer coordinate index */
243 i_coord_offset = DIM*inr;
245 /* Load i particle coords and add shift vector */
246 gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
247 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
249 fix0 = _mm256_setzero_ps();
250 fiy0 = _mm256_setzero_ps();
251 fiz0 = _mm256_setzero_ps();
252 fix1 = _mm256_setzero_ps();
253 fiy1 = _mm256_setzero_ps();
254 fiz1 = _mm256_setzero_ps();
255 fix2 = _mm256_setzero_ps();
256 fiy2 = _mm256_setzero_ps();
257 fiz2 = _mm256_setzero_ps();
258 fix3 = _mm256_setzero_ps();
259 fiy3 = _mm256_setzero_ps();
260 fiz3 = _mm256_setzero_ps();
262 /* Reset potential sums */
263 velecsum = _mm256_setzero_ps();
264 vvdwsum = _mm256_setzero_ps();
266 /* Start inner kernel loop */
267 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
270 /* Get j neighbor index, and coordinate index */
279 j_coord_offsetA = DIM*jnrA;
280 j_coord_offsetB = DIM*jnrB;
281 j_coord_offsetC = DIM*jnrC;
282 j_coord_offsetD = DIM*jnrD;
283 j_coord_offsetE = DIM*jnrE;
284 j_coord_offsetF = DIM*jnrF;
285 j_coord_offsetG = DIM*jnrG;
286 j_coord_offsetH = DIM*jnrH;
288 /* load j atom coordinates */
289 gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
290 x+j_coord_offsetC,x+j_coord_offsetD,
291 x+j_coord_offsetE,x+j_coord_offsetF,
292 x+j_coord_offsetG,x+j_coord_offsetH,
293 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
294 &jy2,&jz2,&jx3,&jy3,&jz3);
296 /* Calculate displacement vector */
297 dx00 = _mm256_sub_ps(ix0,jx0);
298 dy00 = _mm256_sub_ps(iy0,jy0);
299 dz00 = _mm256_sub_ps(iz0,jz0);
300 dx11 = _mm256_sub_ps(ix1,jx1);
301 dy11 = _mm256_sub_ps(iy1,jy1);
302 dz11 = _mm256_sub_ps(iz1,jz1);
303 dx12 = _mm256_sub_ps(ix1,jx2);
304 dy12 = _mm256_sub_ps(iy1,jy2);
305 dz12 = _mm256_sub_ps(iz1,jz2);
306 dx13 = _mm256_sub_ps(ix1,jx3);
307 dy13 = _mm256_sub_ps(iy1,jy3);
308 dz13 = _mm256_sub_ps(iz1,jz3);
309 dx21 = _mm256_sub_ps(ix2,jx1);
310 dy21 = _mm256_sub_ps(iy2,jy1);
311 dz21 = _mm256_sub_ps(iz2,jz1);
312 dx22 = _mm256_sub_ps(ix2,jx2);
313 dy22 = _mm256_sub_ps(iy2,jy2);
314 dz22 = _mm256_sub_ps(iz2,jz2);
315 dx23 = _mm256_sub_ps(ix2,jx3);
316 dy23 = _mm256_sub_ps(iy2,jy3);
317 dz23 = _mm256_sub_ps(iz2,jz3);
318 dx31 = _mm256_sub_ps(ix3,jx1);
319 dy31 = _mm256_sub_ps(iy3,jy1);
320 dz31 = _mm256_sub_ps(iz3,jz1);
321 dx32 = _mm256_sub_ps(ix3,jx2);
322 dy32 = _mm256_sub_ps(iy3,jy2);
323 dz32 = _mm256_sub_ps(iz3,jz2);
324 dx33 = _mm256_sub_ps(ix3,jx3);
325 dy33 = _mm256_sub_ps(iy3,jy3);
326 dz33 = _mm256_sub_ps(iz3,jz3);
328 /* Calculate squared distance and things based on it */
329 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
330 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
331 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
332 rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
333 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
334 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
335 rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
336 rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
337 rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
338 rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
340 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
341 rinv11 = gmx_mm256_invsqrt_ps(rsq11);
342 rinv12 = gmx_mm256_invsqrt_ps(rsq12);
343 rinv13 = gmx_mm256_invsqrt_ps(rsq13);
344 rinv21 = gmx_mm256_invsqrt_ps(rsq21);
345 rinv22 = gmx_mm256_invsqrt_ps(rsq22);
346 rinv23 = gmx_mm256_invsqrt_ps(rsq23);
347 rinv31 = gmx_mm256_invsqrt_ps(rsq31);
348 rinv32 = gmx_mm256_invsqrt_ps(rsq32);
349 rinv33 = gmx_mm256_invsqrt_ps(rsq33);
351 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
352 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
353 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
354 rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
355 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
356 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
357 rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
358 rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
359 rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
360 rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
362 fjx0 = _mm256_setzero_ps();
363 fjy0 = _mm256_setzero_ps();
364 fjz0 = _mm256_setzero_ps();
365 fjx1 = _mm256_setzero_ps();
366 fjy1 = _mm256_setzero_ps();
367 fjz1 = _mm256_setzero_ps();
368 fjx2 = _mm256_setzero_ps();
369 fjy2 = _mm256_setzero_ps();
370 fjz2 = _mm256_setzero_ps();
371 fjx3 = _mm256_setzero_ps();
372 fjy3 = _mm256_setzero_ps();
373 fjz3 = _mm256_setzero_ps();
375 /**************************
376 * CALCULATE INTERACTIONS *
377 **************************/
379 if (gmx_mm256_any_lt(rsq00,rcutoff2))
382 r00 = _mm256_mul_ps(rsq00,rinv00);
384 /* Analytical LJ-PME */
385 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
386 ewcljrsq = _mm256_mul_ps(ewclj2,rsq00);
387 ewclj6 = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
388 exponent = gmx_simd_exp_r(ewcljrsq);
389 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
390 poly = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
391 /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
392 vvdw6 = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
393 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
394 vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
395 _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
396 /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
397 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
399 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
401 /* Update potential sum for this i atom from the interaction with this j atom. */
402 vvdw = _mm256_and_ps(vvdw,cutoff_mask);
403 vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
407 fscal = _mm256_and_ps(fscal,cutoff_mask);
409 /* Calculate temporary vectorial force */
410 tx = _mm256_mul_ps(fscal,dx00);
411 ty = _mm256_mul_ps(fscal,dy00);
412 tz = _mm256_mul_ps(fscal,dz00);
414 /* Update vectorial force */
415 fix0 = _mm256_add_ps(fix0,tx);
416 fiy0 = _mm256_add_ps(fiy0,ty);
417 fiz0 = _mm256_add_ps(fiz0,tz);
419 fjx0 = _mm256_add_ps(fjx0,tx);
420 fjy0 = _mm256_add_ps(fjy0,ty);
421 fjz0 = _mm256_add_ps(fjz0,tz);
425 /**************************
426 * CALCULATE INTERACTIONS *
427 **************************/
429 if (gmx_mm256_any_lt(rsq11,rcutoff2))
432 r11 = _mm256_mul_ps(rsq11,rinv11);
434 /* EWALD ELECTROSTATICS */
436 /* Analytical PME correction */
437 zeta2 = _mm256_mul_ps(beta2,rsq11);
438 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
439 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
440 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
441 felec = _mm256_mul_ps(qq11,felec);
442 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
443 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
444 velec = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
445 velec = _mm256_mul_ps(qq11,velec);
447 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
449 /* Update potential sum for this i atom from the interaction with this j atom. */
450 velec = _mm256_and_ps(velec,cutoff_mask);
451 velecsum = _mm256_add_ps(velecsum,velec);
455 fscal = _mm256_and_ps(fscal,cutoff_mask);
457 /* Calculate temporary vectorial force */
458 tx = _mm256_mul_ps(fscal,dx11);
459 ty = _mm256_mul_ps(fscal,dy11);
460 tz = _mm256_mul_ps(fscal,dz11);
462 /* Update vectorial force */
463 fix1 = _mm256_add_ps(fix1,tx);
464 fiy1 = _mm256_add_ps(fiy1,ty);
465 fiz1 = _mm256_add_ps(fiz1,tz);
467 fjx1 = _mm256_add_ps(fjx1,tx);
468 fjy1 = _mm256_add_ps(fjy1,ty);
469 fjz1 = _mm256_add_ps(fjz1,tz);
473 /**************************
474 * CALCULATE INTERACTIONS *
475 **************************/
477 if (gmx_mm256_any_lt(rsq12,rcutoff2))
480 r12 = _mm256_mul_ps(rsq12,rinv12);
482 /* EWALD ELECTROSTATICS */
484 /* Analytical PME correction */
485 zeta2 = _mm256_mul_ps(beta2,rsq12);
486 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
487 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
488 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
489 felec = _mm256_mul_ps(qq12,felec);
490 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
491 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
492 velec = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
493 velec = _mm256_mul_ps(qq12,velec);
495 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
497 /* Update potential sum for this i atom from the interaction with this j atom. */
498 velec = _mm256_and_ps(velec,cutoff_mask);
499 velecsum = _mm256_add_ps(velecsum,velec);
503 fscal = _mm256_and_ps(fscal,cutoff_mask);
505 /* Calculate temporary vectorial force */
506 tx = _mm256_mul_ps(fscal,dx12);
507 ty = _mm256_mul_ps(fscal,dy12);
508 tz = _mm256_mul_ps(fscal,dz12);
510 /* Update vectorial force */
511 fix1 = _mm256_add_ps(fix1,tx);
512 fiy1 = _mm256_add_ps(fiy1,ty);
513 fiz1 = _mm256_add_ps(fiz1,tz);
515 fjx2 = _mm256_add_ps(fjx2,tx);
516 fjy2 = _mm256_add_ps(fjy2,ty);
517 fjz2 = _mm256_add_ps(fjz2,tz);
521 /**************************
522 * CALCULATE INTERACTIONS *
523 **************************/
525 if (gmx_mm256_any_lt(rsq13,rcutoff2))
528 r13 = _mm256_mul_ps(rsq13,rinv13);
530 /* EWALD ELECTROSTATICS */
532 /* Analytical PME correction */
533 zeta2 = _mm256_mul_ps(beta2,rsq13);
534 rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
535 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
536 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
537 felec = _mm256_mul_ps(qq13,felec);
538 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
539 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
540 velec = _mm256_sub_ps(_mm256_sub_ps(rinv13,sh_ewald),pmecorrV);
541 velec = _mm256_mul_ps(qq13,velec);
543 cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
545 /* Update potential sum for this i atom from the interaction with this j atom. */
546 velec = _mm256_and_ps(velec,cutoff_mask);
547 velecsum = _mm256_add_ps(velecsum,velec);
551 fscal = _mm256_and_ps(fscal,cutoff_mask);
553 /* Calculate temporary vectorial force */
554 tx = _mm256_mul_ps(fscal,dx13);
555 ty = _mm256_mul_ps(fscal,dy13);
556 tz = _mm256_mul_ps(fscal,dz13);
558 /* Update vectorial force */
559 fix1 = _mm256_add_ps(fix1,tx);
560 fiy1 = _mm256_add_ps(fiy1,ty);
561 fiz1 = _mm256_add_ps(fiz1,tz);
563 fjx3 = _mm256_add_ps(fjx3,tx);
564 fjy3 = _mm256_add_ps(fjy3,ty);
565 fjz3 = _mm256_add_ps(fjz3,tz);
569 /**************************
570 * CALCULATE INTERACTIONS *
571 **************************/
573 if (gmx_mm256_any_lt(rsq21,rcutoff2))
576 r21 = _mm256_mul_ps(rsq21,rinv21);
578 /* EWALD ELECTROSTATICS */
580 /* Analytical PME correction */
581 zeta2 = _mm256_mul_ps(beta2,rsq21);
582 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
583 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
584 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
585 felec = _mm256_mul_ps(qq21,felec);
586 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
587 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
588 velec = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
589 velec = _mm256_mul_ps(qq21,velec);
591 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
593 /* Update potential sum for this i atom from the interaction with this j atom. */
594 velec = _mm256_and_ps(velec,cutoff_mask);
595 velecsum = _mm256_add_ps(velecsum,velec);
599 fscal = _mm256_and_ps(fscal,cutoff_mask);
601 /* Calculate temporary vectorial force */
602 tx = _mm256_mul_ps(fscal,dx21);
603 ty = _mm256_mul_ps(fscal,dy21);
604 tz = _mm256_mul_ps(fscal,dz21);
606 /* Update vectorial force */
607 fix2 = _mm256_add_ps(fix2,tx);
608 fiy2 = _mm256_add_ps(fiy2,ty);
609 fiz2 = _mm256_add_ps(fiz2,tz);
611 fjx1 = _mm256_add_ps(fjx1,tx);
612 fjy1 = _mm256_add_ps(fjy1,ty);
613 fjz1 = _mm256_add_ps(fjz1,tz);
617 /**************************
618 * CALCULATE INTERACTIONS *
619 **************************/
621 if (gmx_mm256_any_lt(rsq22,rcutoff2))
624 r22 = _mm256_mul_ps(rsq22,rinv22);
626 /* EWALD ELECTROSTATICS */
628 /* Analytical PME correction */
629 zeta2 = _mm256_mul_ps(beta2,rsq22);
630 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
631 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
632 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
633 felec = _mm256_mul_ps(qq22,felec);
634 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
635 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
636 velec = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
637 velec = _mm256_mul_ps(qq22,velec);
639 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
641 /* Update potential sum for this i atom from the interaction with this j atom. */
642 velec = _mm256_and_ps(velec,cutoff_mask);
643 velecsum = _mm256_add_ps(velecsum,velec);
647 fscal = _mm256_and_ps(fscal,cutoff_mask);
649 /* Calculate temporary vectorial force */
650 tx = _mm256_mul_ps(fscal,dx22);
651 ty = _mm256_mul_ps(fscal,dy22);
652 tz = _mm256_mul_ps(fscal,dz22);
654 /* Update vectorial force */
655 fix2 = _mm256_add_ps(fix2,tx);
656 fiy2 = _mm256_add_ps(fiy2,ty);
657 fiz2 = _mm256_add_ps(fiz2,tz);
659 fjx2 = _mm256_add_ps(fjx2,tx);
660 fjy2 = _mm256_add_ps(fjy2,ty);
661 fjz2 = _mm256_add_ps(fjz2,tz);
665 /**************************
666 * CALCULATE INTERACTIONS *
667 **************************/
669 if (gmx_mm256_any_lt(rsq23,rcutoff2))
672 r23 = _mm256_mul_ps(rsq23,rinv23);
674 /* EWALD ELECTROSTATICS */
676 /* Analytical PME correction */
677 zeta2 = _mm256_mul_ps(beta2,rsq23);
678 rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
679 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
680 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
681 felec = _mm256_mul_ps(qq23,felec);
682 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
683 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
684 velec = _mm256_sub_ps(_mm256_sub_ps(rinv23,sh_ewald),pmecorrV);
685 velec = _mm256_mul_ps(qq23,velec);
687 cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
689 /* Update potential sum for this i atom from the interaction with this j atom. */
690 velec = _mm256_and_ps(velec,cutoff_mask);
691 velecsum = _mm256_add_ps(velecsum,velec);
695 fscal = _mm256_and_ps(fscal,cutoff_mask);
697 /* Calculate temporary vectorial force */
698 tx = _mm256_mul_ps(fscal,dx23);
699 ty = _mm256_mul_ps(fscal,dy23);
700 tz = _mm256_mul_ps(fscal,dz23);
702 /* Update vectorial force */
703 fix2 = _mm256_add_ps(fix2,tx);
704 fiy2 = _mm256_add_ps(fiy2,ty);
705 fiz2 = _mm256_add_ps(fiz2,tz);
707 fjx3 = _mm256_add_ps(fjx3,tx);
708 fjy3 = _mm256_add_ps(fjy3,ty);
709 fjz3 = _mm256_add_ps(fjz3,tz);
713 /**************************
714 * CALCULATE INTERACTIONS *
715 **************************/
717 if (gmx_mm256_any_lt(rsq31,rcutoff2))
720 r31 = _mm256_mul_ps(rsq31,rinv31);
722 /* EWALD ELECTROSTATICS */
724 /* Analytical PME correction */
725 zeta2 = _mm256_mul_ps(beta2,rsq31);
726 rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
727 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
728 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
729 felec = _mm256_mul_ps(qq31,felec);
730 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
731 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
732 velec = _mm256_sub_ps(_mm256_sub_ps(rinv31,sh_ewald),pmecorrV);
733 velec = _mm256_mul_ps(qq31,velec);
735 cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
737 /* Update potential sum for this i atom from the interaction with this j atom. */
738 velec = _mm256_and_ps(velec,cutoff_mask);
739 velecsum = _mm256_add_ps(velecsum,velec);
743 fscal = _mm256_and_ps(fscal,cutoff_mask);
745 /* Calculate temporary vectorial force */
746 tx = _mm256_mul_ps(fscal,dx31);
747 ty = _mm256_mul_ps(fscal,dy31);
748 tz = _mm256_mul_ps(fscal,dz31);
750 /* Update vectorial force */
751 fix3 = _mm256_add_ps(fix3,tx);
752 fiy3 = _mm256_add_ps(fiy3,ty);
753 fiz3 = _mm256_add_ps(fiz3,tz);
755 fjx1 = _mm256_add_ps(fjx1,tx);
756 fjy1 = _mm256_add_ps(fjy1,ty);
757 fjz1 = _mm256_add_ps(fjz1,tz);
761 /**************************
762 * CALCULATE INTERACTIONS *
763 **************************/
765 if (gmx_mm256_any_lt(rsq32,rcutoff2))
768 r32 = _mm256_mul_ps(rsq32,rinv32);
770 /* EWALD ELECTROSTATICS */
772 /* Analytical PME correction */
773 zeta2 = _mm256_mul_ps(beta2,rsq32);
774 rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
775 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
776 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
777 felec = _mm256_mul_ps(qq32,felec);
778 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
779 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
780 velec = _mm256_sub_ps(_mm256_sub_ps(rinv32,sh_ewald),pmecorrV);
781 velec = _mm256_mul_ps(qq32,velec);
783 cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
785 /* Update potential sum for this i atom from the interaction with this j atom. */
786 velec = _mm256_and_ps(velec,cutoff_mask);
787 velecsum = _mm256_add_ps(velecsum,velec);
791 fscal = _mm256_and_ps(fscal,cutoff_mask);
793 /* Calculate temporary vectorial force */
794 tx = _mm256_mul_ps(fscal,dx32);
795 ty = _mm256_mul_ps(fscal,dy32);
796 tz = _mm256_mul_ps(fscal,dz32);
798 /* Update vectorial force */
799 fix3 = _mm256_add_ps(fix3,tx);
800 fiy3 = _mm256_add_ps(fiy3,ty);
801 fiz3 = _mm256_add_ps(fiz3,tz);
803 fjx2 = _mm256_add_ps(fjx2,tx);
804 fjy2 = _mm256_add_ps(fjy2,ty);
805 fjz2 = _mm256_add_ps(fjz2,tz);
809 /**************************
810 * CALCULATE INTERACTIONS *
811 **************************/
813 if (gmx_mm256_any_lt(rsq33,rcutoff2))
816 r33 = _mm256_mul_ps(rsq33,rinv33);
818 /* EWALD ELECTROSTATICS */
820 /* Analytical PME correction */
821 zeta2 = _mm256_mul_ps(beta2,rsq33);
822 rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
823 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
824 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
825 felec = _mm256_mul_ps(qq33,felec);
826 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
827 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
828 velec = _mm256_sub_ps(_mm256_sub_ps(rinv33,sh_ewald),pmecorrV);
829 velec = _mm256_mul_ps(qq33,velec);
831 cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
833 /* Update potential sum for this i atom from the interaction with this j atom. */
834 velec = _mm256_and_ps(velec,cutoff_mask);
835 velecsum = _mm256_add_ps(velecsum,velec);
839 fscal = _mm256_and_ps(fscal,cutoff_mask);
841 /* Calculate temporary vectorial force */
842 tx = _mm256_mul_ps(fscal,dx33);
843 ty = _mm256_mul_ps(fscal,dy33);
844 tz = _mm256_mul_ps(fscal,dz33);
846 /* Update vectorial force */
847 fix3 = _mm256_add_ps(fix3,tx);
848 fiy3 = _mm256_add_ps(fiy3,ty);
849 fiz3 = _mm256_add_ps(fiz3,tz);
851 fjx3 = _mm256_add_ps(fjx3,tx);
852 fjy3 = _mm256_add_ps(fjy3,ty);
853 fjz3 = _mm256_add_ps(fjz3,tz);
857 fjptrA = f+j_coord_offsetA;
858 fjptrB = f+j_coord_offsetB;
859 fjptrC = f+j_coord_offsetC;
860 fjptrD = f+j_coord_offsetD;
861 fjptrE = f+j_coord_offsetE;
862 fjptrF = f+j_coord_offsetF;
863 fjptrG = f+j_coord_offsetG;
864 fjptrH = f+j_coord_offsetH;
866 gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
867 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
868 fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
870 /* Inner loop uses 1046 flops */
876 /* Get j neighbor index, and coordinate index */
877 jnrlistA = jjnr[jidx];
878 jnrlistB = jjnr[jidx+1];
879 jnrlistC = jjnr[jidx+2];
880 jnrlistD = jjnr[jidx+3];
881 jnrlistE = jjnr[jidx+4];
882 jnrlistF = jjnr[jidx+5];
883 jnrlistG = jjnr[jidx+6];
884 jnrlistH = jjnr[jidx+7];
885 /* Sign of each element will be negative for non-real atoms.
886 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
887 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
889 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
890 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
892 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
893 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
894 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
895 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
896 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
897 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
898 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
899 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
900 j_coord_offsetA = DIM*jnrA;
901 j_coord_offsetB = DIM*jnrB;
902 j_coord_offsetC = DIM*jnrC;
903 j_coord_offsetD = DIM*jnrD;
904 j_coord_offsetE = DIM*jnrE;
905 j_coord_offsetF = DIM*jnrF;
906 j_coord_offsetG = DIM*jnrG;
907 j_coord_offsetH = DIM*jnrH;
909 /* load j atom coordinates */
910 gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
911 x+j_coord_offsetC,x+j_coord_offsetD,
912 x+j_coord_offsetE,x+j_coord_offsetF,
913 x+j_coord_offsetG,x+j_coord_offsetH,
914 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
915 &jy2,&jz2,&jx3,&jy3,&jz3);
917 /* Calculate displacement vector */
918 dx00 = _mm256_sub_ps(ix0,jx0);
919 dy00 = _mm256_sub_ps(iy0,jy0);
920 dz00 = _mm256_sub_ps(iz0,jz0);
921 dx11 = _mm256_sub_ps(ix1,jx1);
922 dy11 = _mm256_sub_ps(iy1,jy1);
923 dz11 = _mm256_sub_ps(iz1,jz1);
924 dx12 = _mm256_sub_ps(ix1,jx2);
925 dy12 = _mm256_sub_ps(iy1,jy2);
926 dz12 = _mm256_sub_ps(iz1,jz2);
927 dx13 = _mm256_sub_ps(ix1,jx3);
928 dy13 = _mm256_sub_ps(iy1,jy3);
929 dz13 = _mm256_sub_ps(iz1,jz3);
930 dx21 = _mm256_sub_ps(ix2,jx1);
931 dy21 = _mm256_sub_ps(iy2,jy1);
932 dz21 = _mm256_sub_ps(iz2,jz1);
933 dx22 = _mm256_sub_ps(ix2,jx2);
934 dy22 = _mm256_sub_ps(iy2,jy2);
935 dz22 = _mm256_sub_ps(iz2,jz2);
936 dx23 = _mm256_sub_ps(ix2,jx3);
937 dy23 = _mm256_sub_ps(iy2,jy3);
938 dz23 = _mm256_sub_ps(iz2,jz3);
939 dx31 = _mm256_sub_ps(ix3,jx1);
940 dy31 = _mm256_sub_ps(iy3,jy1);
941 dz31 = _mm256_sub_ps(iz3,jz1);
942 dx32 = _mm256_sub_ps(ix3,jx2);
943 dy32 = _mm256_sub_ps(iy3,jy2);
944 dz32 = _mm256_sub_ps(iz3,jz2);
945 dx33 = _mm256_sub_ps(ix3,jx3);
946 dy33 = _mm256_sub_ps(iy3,jy3);
947 dz33 = _mm256_sub_ps(iz3,jz3);
949 /* Calculate squared distance and things based on it */
950 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
951 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
952 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
953 rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
954 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
955 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
956 rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
957 rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
958 rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
959 rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
961 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
962 rinv11 = gmx_mm256_invsqrt_ps(rsq11);
963 rinv12 = gmx_mm256_invsqrt_ps(rsq12);
964 rinv13 = gmx_mm256_invsqrt_ps(rsq13);
965 rinv21 = gmx_mm256_invsqrt_ps(rsq21);
966 rinv22 = gmx_mm256_invsqrt_ps(rsq22);
967 rinv23 = gmx_mm256_invsqrt_ps(rsq23);
968 rinv31 = gmx_mm256_invsqrt_ps(rsq31);
969 rinv32 = gmx_mm256_invsqrt_ps(rsq32);
970 rinv33 = gmx_mm256_invsqrt_ps(rsq33);
972 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
973 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
974 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
975 rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
976 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
977 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
978 rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
979 rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
980 rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
981 rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
983 fjx0 = _mm256_setzero_ps();
984 fjy0 = _mm256_setzero_ps();
985 fjz0 = _mm256_setzero_ps();
986 fjx1 = _mm256_setzero_ps();
987 fjy1 = _mm256_setzero_ps();
988 fjz1 = _mm256_setzero_ps();
989 fjx2 = _mm256_setzero_ps();
990 fjy2 = _mm256_setzero_ps();
991 fjz2 = _mm256_setzero_ps();
992 fjx3 = _mm256_setzero_ps();
993 fjy3 = _mm256_setzero_ps();
994 fjz3 = _mm256_setzero_ps();
996 /**************************
997 * CALCULATE INTERACTIONS *
998 **************************/
1000 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1003 r00 = _mm256_mul_ps(rsq00,rinv00);
1004 r00 = _mm256_andnot_ps(dummy_mask,r00);
1006 /* Analytical LJ-PME */
1007 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1008 ewcljrsq = _mm256_mul_ps(ewclj2,rsq00);
1009 ewclj6 = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1010 exponent = gmx_simd_exp_r(ewcljrsq);
1011 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1012 poly = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1013 /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
1014 vvdw6 = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
1015 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
1016 vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
1017 _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
1018 /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
1019 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
1021 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1023 /* Update potential sum for this i atom from the interaction with this j atom. */
1024 vvdw = _mm256_and_ps(vvdw,cutoff_mask);
1025 vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
1026 vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
1030 fscal = _mm256_and_ps(fscal,cutoff_mask);
1032 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1034 /* Calculate temporary vectorial force */
1035 tx = _mm256_mul_ps(fscal,dx00);
1036 ty = _mm256_mul_ps(fscal,dy00);
1037 tz = _mm256_mul_ps(fscal,dz00);
1039 /* Update vectorial force */
1040 fix0 = _mm256_add_ps(fix0,tx);
1041 fiy0 = _mm256_add_ps(fiy0,ty);
1042 fiz0 = _mm256_add_ps(fiz0,tz);
1044 fjx0 = _mm256_add_ps(fjx0,tx);
1045 fjy0 = _mm256_add_ps(fjy0,ty);
1046 fjz0 = _mm256_add_ps(fjz0,tz);
1050 /**************************
1051 * CALCULATE INTERACTIONS *
1052 **************************/
1054 if (gmx_mm256_any_lt(rsq11,rcutoff2))
1057 r11 = _mm256_mul_ps(rsq11,rinv11);
1058 r11 = _mm256_andnot_ps(dummy_mask,r11);
1060 /* EWALD ELECTROSTATICS */
1062 /* Analytical PME correction */
1063 zeta2 = _mm256_mul_ps(beta2,rsq11);
1064 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
1065 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1066 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1067 felec = _mm256_mul_ps(qq11,felec);
1068 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1069 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1070 velec = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
1071 velec = _mm256_mul_ps(qq11,velec);
1073 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1075 /* Update potential sum for this i atom from the interaction with this j atom. */
1076 velec = _mm256_and_ps(velec,cutoff_mask);
1077 velec = _mm256_andnot_ps(dummy_mask,velec);
1078 velecsum = _mm256_add_ps(velecsum,velec);
1082 fscal = _mm256_and_ps(fscal,cutoff_mask);
1084 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1086 /* Calculate temporary vectorial force */
1087 tx = _mm256_mul_ps(fscal,dx11);
1088 ty = _mm256_mul_ps(fscal,dy11);
1089 tz = _mm256_mul_ps(fscal,dz11);
1091 /* Update vectorial force */
1092 fix1 = _mm256_add_ps(fix1,tx);
1093 fiy1 = _mm256_add_ps(fiy1,ty);
1094 fiz1 = _mm256_add_ps(fiz1,tz);
1096 fjx1 = _mm256_add_ps(fjx1,tx);
1097 fjy1 = _mm256_add_ps(fjy1,ty);
1098 fjz1 = _mm256_add_ps(fjz1,tz);
1102 /**************************
1103 * CALCULATE INTERACTIONS *
1104 **************************/
1106 if (gmx_mm256_any_lt(rsq12,rcutoff2))
1109 r12 = _mm256_mul_ps(rsq12,rinv12);
1110 r12 = _mm256_andnot_ps(dummy_mask,r12);
1112 /* EWALD ELECTROSTATICS */
1114 /* Analytical PME correction */
1115 zeta2 = _mm256_mul_ps(beta2,rsq12);
1116 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
1117 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1118 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1119 felec = _mm256_mul_ps(qq12,felec);
1120 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1121 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1122 velec = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
1123 velec = _mm256_mul_ps(qq12,velec);
1125 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1127 /* Update potential sum for this i atom from the interaction with this j atom. */
1128 velec = _mm256_and_ps(velec,cutoff_mask);
1129 velec = _mm256_andnot_ps(dummy_mask,velec);
1130 velecsum = _mm256_add_ps(velecsum,velec);
1134 fscal = _mm256_and_ps(fscal,cutoff_mask);
1136 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1138 /* Calculate temporary vectorial force */
1139 tx = _mm256_mul_ps(fscal,dx12);
1140 ty = _mm256_mul_ps(fscal,dy12);
1141 tz = _mm256_mul_ps(fscal,dz12);
1143 /* Update vectorial force */
1144 fix1 = _mm256_add_ps(fix1,tx);
1145 fiy1 = _mm256_add_ps(fiy1,ty);
1146 fiz1 = _mm256_add_ps(fiz1,tz);
1148 fjx2 = _mm256_add_ps(fjx2,tx);
1149 fjy2 = _mm256_add_ps(fjy2,ty);
1150 fjz2 = _mm256_add_ps(fjz2,tz);
1154 /**************************
1155 * CALCULATE INTERACTIONS *
1156 **************************/
1158 if (gmx_mm256_any_lt(rsq13,rcutoff2))
1161 r13 = _mm256_mul_ps(rsq13,rinv13);
1162 r13 = _mm256_andnot_ps(dummy_mask,r13);
1164 /* EWALD ELECTROSTATICS */
1166 /* Analytical PME correction */
1167 zeta2 = _mm256_mul_ps(beta2,rsq13);
1168 rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
1169 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1170 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1171 felec = _mm256_mul_ps(qq13,felec);
1172 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1173 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1174 velec = _mm256_sub_ps(_mm256_sub_ps(rinv13,sh_ewald),pmecorrV);
1175 velec = _mm256_mul_ps(qq13,velec);
1177 cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
1179 /* Update potential sum for this i atom from the interaction with this j atom. */
1180 velec = _mm256_and_ps(velec,cutoff_mask);
1181 velec = _mm256_andnot_ps(dummy_mask,velec);
1182 velecsum = _mm256_add_ps(velecsum,velec);
1186 fscal = _mm256_and_ps(fscal,cutoff_mask);
1188 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1190 /* Calculate temporary vectorial force */
1191 tx = _mm256_mul_ps(fscal,dx13);
1192 ty = _mm256_mul_ps(fscal,dy13);
1193 tz = _mm256_mul_ps(fscal,dz13);
1195 /* Update vectorial force */
1196 fix1 = _mm256_add_ps(fix1,tx);
1197 fiy1 = _mm256_add_ps(fiy1,ty);
1198 fiz1 = _mm256_add_ps(fiz1,tz);
1200 fjx3 = _mm256_add_ps(fjx3,tx);
1201 fjy3 = _mm256_add_ps(fjy3,ty);
1202 fjz3 = _mm256_add_ps(fjz3,tz);
1206 /**************************
1207 * CALCULATE INTERACTIONS *
1208 **************************/
1210 if (gmx_mm256_any_lt(rsq21,rcutoff2))
1213 r21 = _mm256_mul_ps(rsq21,rinv21);
1214 r21 = _mm256_andnot_ps(dummy_mask,r21);
1216 /* EWALD ELECTROSTATICS */
1218 /* Analytical PME correction */
1219 zeta2 = _mm256_mul_ps(beta2,rsq21);
1220 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
1221 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1222 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1223 felec = _mm256_mul_ps(qq21,felec);
1224 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1225 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1226 velec = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
1227 velec = _mm256_mul_ps(qq21,velec);
1229 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1231 /* Update potential sum for this i atom from the interaction with this j atom. */
1232 velec = _mm256_and_ps(velec,cutoff_mask);
1233 velec = _mm256_andnot_ps(dummy_mask,velec);
1234 velecsum = _mm256_add_ps(velecsum,velec);
1238 fscal = _mm256_and_ps(fscal,cutoff_mask);
1240 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1242 /* Calculate temporary vectorial force */
1243 tx = _mm256_mul_ps(fscal,dx21);
1244 ty = _mm256_mul_ps(fscal,dy21);
1245 tz = _mm256_mul_ps(fscal,dz21);
1247 /* Update vectorial force */
1248 fix2 = _mm256_add_ps(fix2,tx);
1249 fiy2 = _mm256_add_ps(fiy2,ty);
1250 fiz2 = _mm256_add_ps(fiz2,tz);
1252 fjx1 = _mm256_add_ps(fjx1,tx);
1253 fjy1 = _mm256_add_ps(fjy1,ty);
1254 fjz1 = _mm256_add_ps(fjz1,tz);
1258 /**************************
1259 * CALCULATE INTERACTIONS *
1260 **************************/
1262 if (gmx_mm256_any_lt(rsq22,rcutoff2))
1265 r22 = _mm256_mul_ps(rsq22,rinv22);
1266 r22 = _mm256_andnot_ps(dummy_mask,r22);
1268 /* EWALD ELECTROSTATICS */
1270 /* Analytical PME correction */
1271 zeta2 = _mm256_mul_ps(beta2,rsq22);
1272 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
1273 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1274 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1275 felec = _mm256_mul_ps(qq22,felec);
1276 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1277 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1278 velec = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
1279 velec = _mm256_mul_ps(qq22,velec);
1281 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1283 /* Update potential sum for this i atom from the interaction with this j atom. */
1284 velec = _mm256_and_ps(velec,cutoff_mask);
1285 velec = _mm256_andnot_ps(dummy_mask,velec);
1286 velecsum = _mm256_add_ps(velecsum,velec);
1290 fscal = _mm256_and_ps(fscal,cutoff_mask);
1292 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1294 /* Calculate temporary vectorial force */
1295 tx = _mm256_mul_ps(fscal,dx22);
1296 ty = _mm256_mul_ps(fscal,dy22);
1297 tz = _mm256_mul_ps(fscal,dz22);
1299 /* Update vectorial force */
1300 fix2 = _mm256_add_ps(fix2,tx);
1301 fiy2 = _mm256_add_ps(fiy2,ty);
1302 fiz2 = _mm256_add_ps(fiz2,tz);
1304 fjx2 = _mm256_add_ps(fjx2,tx);
1305 fjy2 = _mm256_add_ps(fjy2,ty);
1306 fjz2 = _mm256_add_ps(fjz2,tz);
1310 /**************************
1311 * CALCULATE INTERACTIONS *
1312 **************************/
1314 if (gmx_mm256_any_lt(rsq23,rcutoff2))
1317 r23 = _mm256_mul_ps(rsq23,rinv23);
1318 r23 = _mm256_andnot_ps(dummy_mask,r23);
1320 /* EWALD ELECTROSTATICS */
1322 /* Analytical PME correction */
1323 zeta2 = _mm256_mul_ps(beta2,rsq23);
1324 rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
1325 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1326 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1327 felec = _mm256_mul_ps(qq23,felec);
1328 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1329 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1330 velec = _mm256_sub_ps(_mm256_sub_ps(rinv23,sh_ewald),pmecorrV);
1331 velec = _mm256_mul_ps(qq23,velec);
1333 cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
1335 /* Update potential sum for this i atom from the interaction with this j atom. */
1336 velec = _mm256_and_ps(velec,cutoff_mask);
1337 velec = _mm256_andnot_ps(dummy_mask,velec);
1338 velecsum = _mm256_add_ps(velecsum,velec);
1342 fscal = _mm256_and_ps(fscal,cutoff_mask);
1344 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1346 /* Calculate temporary vectorial force */
1347 tx = _mm256_mul_ps(fscal,dx23);
1348 ty = _mm256_mul_ps(fscal,dy23);
1349 tz = _mm256_mul_ps(fscal,dz23);
1351 /* Update vectorial force */
1352 fix2 = _mm256_add_ps(fix2,tx);
1353 fiy2 = _mm256_add_ps(fiy2,ty);
1354 fiz2 = _mm256_add_ps(fiz2,tz);
1356 fjx3 = _mm256_add_ps(fjx3,tx);
1357 fjy3 = _mm256_add_ps(fjy3,ty);
1358 fjz3 = _mm256_add_ps(fjz3,tz);
1362 /**************************
1363 * CALCULATE INTERACTIONS *
1364 **************************/
1366 if (gmx_mm256_any_lt(rsq31,rcutoff2))
1369 r31 = _mm256_mul_ps(rsq31,rinv31);
1370 r31 = _mm256_andnot_ps(dummy_mask,r31);
1372 /* EWALD ELECTROSTATICS */
1374 /* Analytical PME correction */
1375 zeta2 = _mm256_mul_ps(beta2,rsq31);
1376 rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
1377 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1378 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1379 felec = _mm256_mul_ps(qq31,felec);
1380 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1381 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1382 velec = _mm256_sub_ps(_mm256_sub_ps(rinv31,sh_ewald),pmecorrV);
1383 velec = _mm256_mul_ps(qq31,velec);
1385 cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
1387 /* Update potential sum for this i atom from the interaction with this j atom. */
1388 velec = _mm256_and_ps(velec,cutoff_mask);
1389 velec = _mm256_andnot_ps(dummy_mask,velec);
1390 velecsum = _mm256_add_ps(velecsum,velec);
1394 fscal = _mm256_and_ps(fscal,cutoff_mask);
1396 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1398 /* Calculate temporary vectorial force */
1399 tx = _mm256_mul_ps(fscal,dx31);
1400 ty = _mm256_mul_ps(fscal,dy31);
1401 tz = _mm256_mul_ps(fscal,dz31);
1403 /* Update vectorial force */
1404 fix3 = _mm256_add_ps(fix3,tx);
1405 fiy3 = _mm256_add_ps(fiy3,ty);
1406 fiz3 = _mm256_add_ps(fiz3,tz);
1408 fjx1 = _mm256_add_ps(fjx1,tx);
1409 fjy1 = _mm256_add_ps(fjy1,ty);
1410 fjz1 = _mm256_add_ps(fjz1,tz);
1414 /**************************
1415 * CALCULATE INTERACTIONS *
1416 **************************/
1418 if (gmx_mm256_any_lt(rsq32,rcutoff2))
1421 r32 = _mm256_mul_ps(rsq32,rinv32);
1422 r32 = _mm256_andnot_ps(dummy_mask,r32);
1424 /* EWALD ELECTROSTATICS */
1426 /* Analytical PME correction */
1427 zeta2 = _mm256_mul_ps(beta2,rsq32);
1428 rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
1429 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1430 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1431 felec = _mm256_mul_ps(qq32,felec);
1432 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1433 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1434 velec = _mm256_sub_ps(_mm256_sub_ps(rinv32,sh_ewald),pmecorrV);
1435 velec = _mm256_mul_ps(qq32,velec);
1437 cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
1439 /* Update potential sum for this i atom from the interaction with this j atom. */
1440 velec = _mm256_and_ps(velec,cutoff_mask);
1441 velec = _mm256_andnot_ps(dummy_mask,velec);
1442 velecsum = _mm256_add_ps(velecsum,velec);
1446 fscal = _mm256_and_ps(fscal,cutoff_mask);
1448 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1450 /* Calculate temporary vectorial force */
1451 tx = _mm256_mul_ps(fscal,dx32);
1452 ty = _mm256_mul_ps(fscal,dy32);
1453 tz = _mm256_mul_ps(fscal,dz32);
1455 /* Update vectorial force */
1456 fix3 = _mm256_add_ps(fix3,tx);
1457 fiy3 = _mm256_add_ps(fiy3,ty);
1458 fiz3 = _mm256_add_ps(fiz3,tz);
1460 fjx2 = _mm256_add_ps(fjx2,tx);
1461 fjy2 = _mm256_add_ps(fjy2,ty);
1462 fjz2 = _mm256_add_ps(fjz2,tz);
1466 /**************************
1467 * CALCULATE INTERACTIONS *
1468 **************************/
1470 if (gmx_mm256_any_lt(rsq33,rcutoff2))
1473 r33 = _mm256_mul_ps(rsq33,rinv33);
1474 r33 = _mm256_andnot_ps(dummy_mask,r33);
1476 /* EWALD ELECTROSTATICS */
1478 /* Analytical PME correction */
1479 zeta2 = _mm256_mul_ps(beta2,rsq33);
1480 rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
1481 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1482 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1483 felec = _mm256_mul_ps(qq33,felec);
1484 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1485 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1486 velec = _mm256_sub_ps(_mm256_sub_ps(rinv33,sh_ewald),pmecorrV);
1487 velec = _mm256_mul_ps(qq33,velec);
1489 cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
1491 /* Update potential sum for this i atom from the interaction with this j atom. */
1492 velec = _mm256_and_ps(velec,cutoff_mask);
1493 velec = _mm256_andnot_ps(dummy_mask,velec);
1494 velecsum = _mm256_add_ps(velecsum,velec);
1498 fscal = _mm256_and_ps(fscal,cutoff_mask);
1500 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1502 /* Calculate temporary vectorial force */
1503 tx = _mm256_mul_ps(fscal,dx33);
1504 ty = _mm256_mul_ps(fscal,dy33);
1505 tz = _mm256_mul_ps(fscal,dz33);
1507 /* Update vectorial force */
1508 fix3 = _mm256_add_ps(fix3,tx);
1509 fiy3 = _mm256_add_ps(fiy3,ty);
1510 fiz3 = _mm256_add_ps(fiz3,tz);
1512 fjx3 = _mm256_add_ps(fjx3,tx);
1513 fjy3 = _mm256_add_ps(fjy3,ty);
1514 fjz3 = _mm256_add_ps(fjz3,tz);
1518 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1519 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1520 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1521 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1522 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1523 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1524 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1525 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1527 gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1528 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
1529 fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1531 /* Inner loop uses 1056 flops */
1534 /* End of innermost loop */
1536 gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1537 f+i_coord_offset,fshift+i_shift_offset);
1540 /* Update potential energies */
1541 gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1542 gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1544 /* Increment number of inner iterations */
1545 inneriter += j_index_end - j_index_start;
1547 /* Outer loop uses 26 flops */
1550 /* Increment number of outer iterations */
1553 /* Update outer/inner flops */
1555 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*1056);
1558 * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_F_avx_256_single
1559 * Electrostatics interaction: Ewald
1560 * VdW interaction: LJEwald
1561 * Geometry: Water4-Water4
1562 * Calculate force/pot: Force
1565 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_F_avx_256_single
1566 (t_nblist * gmx_restrict nlist,
1567 rvec * gmx_restrict xx,
1568 rvec * gmx_restrict ff,
1569 t_forcerec * gmx_restrict fr,
1570 t_mdatoms * gmx_restrict mdatoms,
1571 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1572 t_nrnb * gmx_restrict nrnb)
1574 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
1575 * just 0 for non-waters.
1576 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1577 * jnr indices corresponding to data put in the four positions in the SIMD register.
1579 int i_shift_offset,i_coord_offset,outeriter,inneriter;
1580 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1581 int jnrA,jnrB,jnrC,jnrD;
1582 int jnrE,jnrF,jnrG,jnrH;
1583 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1584 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1585 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1586 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1587 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
1588 real rcutoff_scalar;
1589 real *shiftvec,*fshift,*x,*f;
1590 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1591 real scratch[4*DIM];
1592 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1593 real * vdwioffsetptr0;
1594 real * vdwgridioffsetptr0;
1595 __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1596 real * vdwioffsetptr1;
1597 real * vdwgridioffsetptr1;
1598 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1599 real * vdwioffsetptr2;
1600 real * vdwgridioffsetptr2;
1601 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1602 real * vdwioffsetptr3;
1603 real * vdwgridioffsetptr3;
1604 __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1605 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1606 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1607 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1608 __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1609 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1610 __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1611 int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
1612 __m256 jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
1613 __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1614 __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1615 __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1616 __m256 dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
1617 __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1618 __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1619 __m256 dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
1620 __m256 dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
1621 __m256 dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
1622 __m256 dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
1623 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
1626 __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1629 __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
1630 __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
1642 __m256 ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
1643 __m256 one_half = _mm256_set1_ps(0.5);
1644 __m256 minus_one = _mm256_set1_ps(-1.0);
1646 __m128i ewitab_lo,ewitab_hi;
1647 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1648 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1650 __m256 dummy_mask,cutoff_mask;
1651 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1652 __m256 one = _mm256_set1_ps(1.0);
1653 __m256 two = _mm256_set1_ps(2.0);
1659 jindex = nlist->jindex;
1661 shiftidx = nlist->shift;
1663 shiftvec = fr->shift_vec[0];
1664 fshift = fr->fshift[0];
1665 facel = _mm256_set1_ps(fr->epsfac);
1666 charge = mdatoms->chargeA;
1667 nvdwtype = fr->ntype;
1668 vdwparam = fr->nbfp;
1669 vdwtype = mdatoms->typeA;
1670 vdwgridparam = fr->ljpme_c6grid;
1671 sh_lj_ewald = _mm256_set1_ps(fr->ic->sh_lj_ewald);
1672 ewclj = _mm256_set1_ps(fr->ewaldcoeff_lj);
1673 ewclj2 = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
1675 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
1676 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1677 beta2 = _mm256_mul_ps(beta,beta);
1678 beta3 = _mm256_mul_ps(beta,beta2);
1680 ewtab = fr->ic->tabq_coul_F;
1681 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
1682 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1684 /* Setup water-specific parameters */
1685 inr = nlist->iinr[0];
1686 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1687 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1688 iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
1689 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
1690 vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
1692 jq1 = _mm256_set1_ps(charge[inr+1]);
1693 jq2 = _mm256_set1_ps(charge[inr+2]);
1694 jq3 = _mm256_set1_ps(charge[inr+3]);
1695 vdwjidx0A = 2*vdwtype[inr+0];
1696 c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1697 c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1698 c6grid_00 = _mm256_set1_ps(vdwgridioffsetptr0[vdwjidx0A]);
1699 qq11 = _mm256_mul_ps(iq1,jq1);
1700 qq12 = _mm256_mul_ps(iq1,jq2);
1701 qq13 = _mm256_mul_ps(iq1,jq3);
1702 qq21 = _mm256_mul_ps(iq2,jq1);
1703 qq22 = _mm256_mul_ps(iq2,jq2);
1704 qq23 = _mm256_mul_ps(iq2,jq3);
1705 qq31 = _mm256_mul_ps(iq3,jq1);
1706 qq32 = _mm256_mul_ps(iq3,jq2);
1707 qq33 = _mm256_mul_ps(iq3,jq3);
1709 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1710 rcutoff_scalar = fr->rcoulomb;
1711 rcutoff = _mm256_set1_ps(rcutoff_scalar);
1712 rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
1714 sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
1715 rvdw = _mm256_set1_ps(fr->rvdw);
1717 /* Avoid stupid compiler warnings */
1718 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1719 j_coord_offsetA = 0;
1720 j_coord_offsetB = 0;
1721 j_coord_offsetC = 0;
1722 j_coord_offsetD = 0;
1723 j_coord_offsetE = 0;
1724 j_coord_offsetF = 0;
1725 j_coord_offsetG = 0;
1726 j_coord_offsetH = 0;
1731 for(iidx=0;iidx<4*DIM;iidx++)
1733 scratch[iidx] = 0.0;
1736 /* Start outer loop over neighborlists */
1737 for(iidx=0; iidx<nri; iidx++)
1739 /* Load shift vector for this list */
1740 i_shift_offset = DIM*shiftidx[iidx];
1742 /* Load limits for loop over neighbors */
1743 j_index_start = jindex[iidx];
1744 j_index_end = jindex[iidx+1];
1746 /* Get outer coordinate index */
1748 i_coord_offset = DIM*inr;
1750 /* Load i particle coords and add shift vector */
1751 gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1752 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1754 fix0 = _mm256_setzero_ps();
1755 fiy0 = _mm256_setzero_ps();
1756 fiz0 = _mm256_setzero_ps();
1757 fix1 = _mm256_setzero_ps();
1758 fiy1 = _mm256_setzero_ps();
1759 fiz1 = _mm256_setzero_ps();
1760 fix2 = _mm256_setzero_ps();
1761 fiy2 = _mm256_setzero_ps();
1762 fiz2 = _mm256_setzero_ps();
1763 fix3 = _mm256_setzero_ps();
1764 fiy3 = _mm256_setzero_ps();
1765 fiz3 = _mm256_setzero_ps();
1767 /* Start inner kernel loop */
1768 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1771 /* Get j neighbor index, and coordinate index */
1773 jnrB = jjnr[jidx+1];
1774 jnrC = jjnr[jidx+2];
1775 jnrD = jjnr[jidx+3];
1776 jnrE = jjnr[jidx+4];
1777 jnrF = jjnr[jidx+5];
1778 jnrG = jjnr[jidx+6];
1779 jnrH = jjnr[jidx+7];
1780 j_coord_offsetA = DIM*jnrA;
1781 j_coord_offsetB = DIM*jnrB;
1782 j_coord_offsetC = DIM*jnrC;
1783 j_coord_offsetD = DIM*jnrD;
1784 j_coord_offsetE = DIM*jnrE;
1785 j_coord_offsetF = DIM*jnrF;
1786 j_coord_offsetG = DIM*jnrG;
1787 j_coord_offsetH = DIM*jnrH;
1789 /* load j atom coordinates */
1790 gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1791 x+j_coord_offsetC,x+j_coord_offsetD,
1792 x+j_coord_offsetE,x+j_coord_offsetF,
1793 x+j_coord_offsetG,x+j_coord_offsetH,
1794 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1795 &jy2,&jz2,&jx3,&jy3,&jz3);
1797 /* Calculate displacement vector */
1798 dx00 = _mm256_sub_ps(ix0,jx0);
1799 dy00 = _mm256_sub_ps(iy0,jy0);
1800 dz00 = _mm256_sub_ps(iz0,jz0);
1801 dx11 = _mm256_sub_ps(ix1,jx1);
1802 dy11 = _mm256_sub_ps(iy1,jy1);
1803 dz11 = _mm256_sub_ps(iz1,jz1);
1804 dx12 = _mm256_sub_ps(ix1,jx2);
1805 dy12 = _mm256_sub_ps(iy1,jy2);
1806 dz12 = _mm256_sub_ps(iz1,jz2);
1807 dx13 = _mm256_sub_ps(ix1,jx3);
1808 dy13 = _mm256_sub_ps(iy1,jy3);
1809 dz13 = _mm256_sub_ps(iz1,jz3);
1810 dx21 = _mm256_sub_ps(ix2,jx1);
1811 dy21 = _mm256_sub_ps(iy2,jy1);
1812 dz21 = _mm256_sub_ps(iz2,jz1);
1813 dx22 = _mm256_sub_ps(ix2,jx2);
1814 dy22 = _mm256_sub_ps(iy2,jy2);
1815 dz22 = _mm256_sub_ps(iz2,jz2);
1816 dx23 = _mm256_sub_ps(ix2,jx3);
1817 dy23 = _mm256_sub_ps(iy2,jy3);
1818 dz23 = _mm256_sub_ps(iz2,jz3);
1819 dx31 = _mm256_sub_ps(ix3,jx1);
1820 dy31 = _mm256_sub_ps(iy3,jy1);
1821 dz31 = _mm256_sub_ps(iz3,jz1);
1822 dx32 = _mm256_sub_ps(ix3,jx2);
1823 dy32 = _mm256_sub_ps(iy3,jy2);
1824 dz32 = _mm256_sub_ps(iz3,jz2);
1825 dx33 = _mm256_sub_ps(ix3,jx3);
1826 dy33 = _mm256_sub_ps(iy3,jy3);
1827 dz33 = _mm256_sub_ps(iz3,jz3);
1829 /* Calculate squared distance and things based on it */
1830 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1831 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1832 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1833 rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
1834 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1835 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1836 rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
1837 rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
1838 rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
1839 rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
1841 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
1842 rinv11 = gmx_mm256_invsqrt_ps(rsq11);
1843 rinv12 = gmx_mm256_invsqrt_ps(rsq12);
1844 rinv13 = gmx_mm256_invsqrt_ps(rsq13);
1845 rinv21 = gmx_mm256_invsqrt_ps(rsq21);
1846 rinv22 = gmx_mm256_invsqrt_ps(rsq22);
1847 rinv23 = gmx_mm256_invsqrt_ps(rsq23);
1848 rinv31 = gmx_mm256_invsqrt_ps(rsq31);
1849 rinv32 = gmx_mm256_invsqrt_ps(rsq32);
1850 rinv33 = gmx_mm256_invsqrt_ps(rsq33);
1852 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
1853 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
1854 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
1855 rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
1856 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
1857 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
1858 rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
1859 rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
1860 rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
1861 rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
1863 fjx0 = _mm256_setzero_ps();
1864 fjy0 = _mm256_setzero_ps();
1865 fjz0 = _mm256_setzero_ps();
1866 fjx1 = _mm256_setzero_ps();
1867 fjy1 = _mm256_setzero_ps();
1868 fjz1 = _mm256_setzero_ps();
1869 fjx2 = _mm256_setzero_ps();
1870 fjy2 = _mm256_setzero_ps();
1871 fjz2 = _mm256_setzero_ps();
1872 fjx3 = _mm256_setzero_ps();
1873 fjy3 = _mm256_setzero_ps();
1874 fjz3 = _mm256_setzero_ps();
1876 /**************************
1877 * CALCULATE INTERACTIONS *
1878 **************************/
1880 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1883 r00 = _mm256_mul_ps(rsq00,rinv00);
1885 /* Analytical LJ-PME */
1886 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1887 ewcljrsq = _mm256_mul_ps(ewclj2,rsq00);
1888 ewclj6 = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1889 exponent = gmx_simd_exp_r(ewcljrsq);
1890 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1891 poly = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1892 /* f6A = 6 * C6grid * (1 - poly) */
1893 f6A = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
1894 /* f6B = C6grid * exponent * beta^6 */
1895 f6B = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
1896 /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1897 fvdw = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1899 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1903 fscal = _mm256_and_ps(fscal,cutoff_mask);
1905 /* Calculate temporary vectorial force */
1906 tx = _mm256_mul_ps(fscal,dx00);
1907 ty = _mm256_mul_ps(fscal,dy00);
1908 tz = _mm256_mul_ps(fscal,dz00);
1910 /* Update vectorial force */
1911 fix0 = _mm256_add_ps(fix0,tx);
1912 fiy0 = _mm256_add_ps(fiy0,ty);
1913 fiz0 = _mm256_add_ps(fiz0,tz);
1915 fjx0 = _mm256_add_ps(fjx0,tx);
1916 fjy0 = _mm256_add_ps(fjy0,ty);
1917 fjz0 = _mm256_add_ps(fjz0,tz);
1921 /**************************
1922 * CALCULATE INTERACTIONS *
1923 **************************/
1925 if (gmx_mm256_any_lt(rsq11,rcutoff2))
1928 r11 = _mm256_mul_ps(rsq11,rinv11);
1930 /* EWALD ELECTROSTATICS */
1932 /* Analytical PME correction */
1933 zeta2 = _mm256_mul_ps(beta2,rsq11);
1934 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
1935 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1936 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1937 felec = _mm256_mul_ps(qq11,felec);
1939 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1943 fscal = _mm256_and_ps(fscal,cutoff_mask);
1945 /* Calculate temporary vectorial force */
1946 tx = _mm256_mul_ps(fscal,dx11);
1947 ty = _mm256_mul_ps(fscal,dy11);
1948 tz = _mm256_mul_ps(fscal,dz11);
1950 /* Update vectorial force */
1951 fix1 = _mm256_add_ps(fix1,tx);
1952 fiy1 = _mm256_add_ps(fiy1,ty);
1953 fiz1 = _mm256_add_ps(fiz1,tz);
1955 fjx1 = _mm256_add_ps(fjx1,tx);
1956 fjy1 = _mm256_add_ps(fjy1,ty);
1957 fjz1 = _mm256_add_ps(fjz1,tz);
1961 /**************************
1962 * CALCULATE INTERACTIONS *
1963 **************************/
1965 if (gmx_mm256_any_lt(rsq12,rcutoff2))
1968 r12 = _mm256_mul_ps(rsq12,rinv12);
1970 /* EWALD ELECTROSTATICS */
1972 /* Analytical PME correction */
1973 zeta2 = _mm256_mul_ps(beta2,rsq12);
1974 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
1975 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1976 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1977 felec = _mm256_mul_ps(qq12,felec);
1979 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1983 fscal = _mm256_and_ps(fscal,cutoff_mask);
1985 /* Calculate temporary vectorial force */
1986 tx = _mm256_mul_ps(fscal,dx12);
1987 ty = _mm256_mul_ps(fscal,dy12);
1988 tz = _mm256_mul_ps(fscal,dz12);
1990 /* Update vectorial force */
1991 fix1 = _mm256_add_ps(fix1,tx);
1992 fiy1 = _mm256_add_ps(fiy1,ty);
1993 fiz1 = _mm256_add_ps(fiz1,tz);
1995 fjx2 = _mm256_add_ps(fjx2,tx);
1996 fjy2 = _mm256_add_ps(fjy2,ty);
1997 fjz2 = _mm256_add_ps(fjz2,tz);
2001 /**************************
2002 * CALCULATE INTERACTIONS *
2003 **************************/
2005 if (gmx_mm256_any_lt(rsq13,rcutoff2))
2008 r13 = _mm256_mul_ps(rsq13,rinv13);
2010 /* EWALD ELECTROSTATICS */
2012 /* Analytical PME correction */
2013 zeta2 = _mm256_mul_ps(beta2,rsq13);
2014 rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
2015 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2016 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2017 felec = _mm256_mul_ps(qq13,felec);
2019 cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
2023 fscal = _mm256_and_ps(fscal,cutoff_mask);
2025 /* Calculate temporary vectorial force */
2026 tx = _mm256_mul_ps(fscal,dx13);
2027 ty = _mm256_mul_ps(fscal,dy13);
2028 tz = _mm256_mul_ps(fscal,dz13);
2030 /* Update vectorial force */
2031 fix1 = _mm256_add_ps(fix1,tx);
2032 fiy1 = _mm256_add_ps(fiy1,ty);
2033 fiz1 = _mm256_add_ps(fiz1,tz);
2035 fjx3 = _mm256_add_ps(fjx3,tx);
2036 fjy3 = _mm256_add_ps(fjy3,ty);
2037 fjz3 = _mm256_add_ps(fjz3,tz);
2041 /**************************
2042 * CALCULATE INTERACTIONS *
2043 **************************/
2045 if (gmx_mm256_any_lt(rsq21,rcutoff2))
2048 r21 = _mm256_mul_ps(rsq21,rinv21);
2050 /* EWALD ELECTROSTATICS */
2052 /* Analytical PME correction */
2053 zeta2 = _mm256_mul_ps(beta2,rsq21);
2054 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
2055 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2056 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2057 felec = _mm256_mul_ps(qq21,felec);
2059 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2063 fscal = _mm256_and_ps(fscal,cutoff_mask);
2065 /* Calculate temporary vectorial force */
2066 tx = _mm256_mul_ps(fscal,dx21);
2067 ty = _mm256_mul_ps(fscal,dy21);
2068 tz = _mm256_mul_ps(fscal,dz21);
2070 /* Update vectorial force */
2071 fix2 = _mm256_add_ps(fix2,tx);
2072 fiy2 = _mm256_add_ps(fiy2,ty);
2073 fiz2 = _mm256_add_ps(fiz2,tz);
2075 fjx1 = _mm256_add_ps(fjx1,tx);
2076 fjy1 = _mm256_add_ps(fjy1,ty);
2077 fjz1 = _mm256_add_ps(fjz1,tz);
2081 /**************************
2082 * CALCULATE INTERACTIONS *
2083 **************************/
2085 if (gmx_mm256_any_lt(rsq22,rcutoff2))
2088 r22 = _mm256_mul_ps(rsq22,rinv22);
2090 /* EWALD ELECTROSTATICS */
2092 /* Analytical PME correction */
2093 zeta2 = _mm256_mul_ps(beta2,rsq22);
2094 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
2095 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2096 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2097 felec = _mm256_mul_ps(qq22,felec);
2099 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2103 fscal = _mm256_and_ps(fscal,cutoff_mask);
2105 /* Calculate temporary vectorial force */
2106 tx = _mm256_mul_ps(fscal,dx22);
2107 ty = _mm256_mul_ps(fscal,dy22);
2108 tz = _mm256_mul_ps(fscal,dz22);
2110 /* Update vectorial force */
2111 fix2 = _mm256_add_ps(fix2,tx);
2112 fiy2 = _mm256_add_ps(fiy2,ty);
2113 fiz2 = _mm256_add_ps(fiz2,tz);
2115 fjx2 = _mm256_add_ps(fjx2,tx);
2116 fjy2 = _mm256_add_ps(fjy2,ty);
2117 fjz2 = _mm256_add_ps(fjz2,tz);
2121 /**************************
2122 * CALCULATE INTERACTIONS *
2123 **************************/
2125 if (gmx_mm256_any_lt(rsq23,rcutoff2))
2128 r23 = _mm256_mul_ps(rsq23,rinv23);
2130 /* EWALD ELECTROSTATICS */
2132 /* Analytical PME correction */
2133 zeta2 = _mm256_mul_ps(beta2,rsq23);
2134 rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
2135 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2136 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2137 felec = _mm256_mul_ps(qq23,felec);
2139 cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
2143 fscal = _mm256_and_ps(fscal,cutoff_mask);
2145 /* Calculate temporary vectorial force */
2146 tx = _mm256_mul_ps(fscal,dx23);
2147 ty = _mm256_mul_ps(fscal,dy23);
2148 tz = _mm256_mul_ps(fscal,dz23);
2150 /* Update vectorial force */
2151 fix2 = _mm256_add_ps(fix2,tx);
2152 fiy2 = _mm256_add_ps(fiy2,ty);
2153 fiz2 = _mm256_add_ps(fiz2,tz);
2155 fjx3 = _mm256_add_ps(fjx3,tx);
2156 fjy3 = _mm256_add_ps(fjy3,ty);
2157 fjz3 = _mm256_add_ps(fjz3,tz);
2161 /**************************
2162 * CALCULATE INTERACTIONS *
2163 **************************/
2165 if (gmx_mm256_any_lt(rsq31,rcutoff2))
2168 r31 = _mm256_mul_ps(rsq31,rinv31);
2170 /* EWALD ELECTROSTATICS */
2172 /* Analytical PME correction */
2173 zeta2 = _mm256_mul_ps(beta2,rsq31);
2174 rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
2175 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2176 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2177 felec = _mm256_mul_ps(qq31,felec);
2179 cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
2183 fscal = _mm256_and_ps(fscal,cutoff_mask);
2185 /* Calculate temporary vectorial force */
2186 tx = _mm256_mul_ps(fscal,dx31);
2187 ty = _mm256_mul_ps(fscal,dy31);
2188 tz = _mm256_mul_ps(fscal,dz31);
2190 /* Update vectorial force */
2191 fix3 = _mm256_add_ps(fix3,tx);
2192 fiy3 = _mm256_add_ps(fiy3,ty);
2193 fiz3 = _mm256_add_ps(fiz3,tz);
2195 fjx1 = _mm256_add_ps(fjx1,tx);
2196 fjy1 = _mm256_add_ps(fjy1,ty);
2197 fjz1 = _mm256_add_ps(fjz1,tz);
2201 /**************************
2202 * CALCULATE INTERACTIONS *
2203 **************************/
2205 if (gmx_mm256_any_lt(rsq32,rcutoff2))
2208 r32 = _mm256_mul_ps(rsq32,rinv32);
2210 /* EWALD ELECTROSTATICS */
2212 /* Analytical PME correction */
2213 zeta2 = _mm256_mul_ps(beta2,rsq32);
2214 rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
2215 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2216 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2217 felec = _mm256_mul_ps(qq32,felec);
2219 cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
2223 fscal = _mm256_and_ps(fscal,cutoff_mask);
2225 /* Calculate temporary vectorial force */
2226 tx = _mm256_mul_ps(fscal,dx32);
2227 ty = _mm256_mul_ps(fscal,dy32);
2228 tz = _mm256_mul_ps(fscal,dz32);
2230 /* Update vectorial force */
2231 fix3 = _mm256_add_ps(fix3,tx);
2232 fiy3 = _mm256_add_ps(fiy3,ty);
2233 fiz3 = _mm256_add_ps(fiz3,tz);
2235 fjx2 = _mm256_add_ps(fjx2,tx);
2236 fjy2 = _mm256_add_ps(fjy2,ty);
2237 fjz2 = _mm256_add_ps(fjz2,tz);
2241 /**************************
2242 * CALCULATE INTERACTIONS *
2243 **************************/
2245 if (gmx_mm256_any_lt(rsq33,rcutoff2))
2248 r33 = _mm256_mul_ps(rsq33,rinv33);
2250 /* EWALD ELECTROSTATICS */
2252 /* Analytical PME correction */
2253 zeta2 = _mm256_mul_ps(beta2,rsq33);
2254 rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
2255 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2256 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2257 felec = _mm256_mul_ps(qq33,felec);
2259 cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
2263 fscal = _mm256_and_ps(fscal,cutoff_mask);
2265 /* Calculate temporary vectorial force */
2266 tx = _mm256_mul_ps(fscal,dx33);
2267 ty = _mm256_mul_ps(fscal,dy33);
2268 tz = _mm256_mul_ps(fscal,dz33);
2270 /* Update vectorial force */
2271 fix3 = _mm256_add_ps(fix3,tx);
2272 fiy3 = _mm256_add_ps(fiy3,ty);
2273 fiz3 = _mm256_add_ps(fiz3,tz);
2275 fjx3 = _mm256_add_ps(fjx3,tx);
2276 fjy3 = _mm256_add_ps(fjy3,ty);
2277 fjz3 = _mm256_add_ps(fjz3,tz);
2281 fjptrA = f+j_coord_offsetA;
2282 fjptrB = f+j_coord_offsetB;
2283 fjptrC = f+j_coord_offsetC;
2284 fjptrD = f+j_coord_offsetD;
2285 fjptrE = f+j_coord_offsetE;
2286 fjptrF = f+j_coord_offsetF;
2287 fjptrG = f+j_coord_offsetG;
2288 fjptrH = f+j_coord_offsetH;
2290 gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2291 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2292 fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2294 /* Inner loop uses 583 flops */
2297 if(jidx<j_index_end)
2300 /* Get j neighbor index, and coordinate index */
2301 jnrlistA = jjnr[jidx];
2302 jnrlistB = jjnr[jidx+1];
2303 jnrlistC = jjnr[jidx+2];
2304 jnrlistD = jjnr[jidx+3];
2305 jnrlistE = jjnr[jidx+4];
2306 jnrlistF = jjnr[jidx+5];
2307 jnrlistG = jjnr[jidx+6];
2308 jnrlistH = jjnr[jidx+7];
2309 /* Sign of each element will be negative for non-real atoms.
2310 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2311 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
2313 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
2314 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
2316 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
2317 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
2318 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
2319 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
2320 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
2321 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
2322 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
2323 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
2324 j_coord_offsetA = DIM*jnrA;
2325 j_coord_offsetB = DIM*jnrB;
2326 j_coord_offsetC = DIM*jnrC;
2327 j_coord_offsetD = DIM*jnrD;
2328 j_coord_offsetE = DIM*jnrE;
2329 j_coord_offsetF = DIM*jnrF;
2330 j_coord_offsetG = DIM*jnrG;
2331 j_coord_offsetH = DIM*jnrH;
2333 /* load j atom coordinates */
2334 gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
2335 x+j_coord_offsetC,x+j_coord_offsetD,
2336 x+j_coord_offsetE,x+j_coord_offsetF,
2337 x+j_coord_offsetG,x+j_coord_offsetH,
2338 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
2339 &jy2,&jz2,&jx3,&jy3,&jz3);
2341 /* Calculate displacement vector */
2342 dx00 = _mm256_sub_ps(ix0,jx0);
2343 dy00 = _mm256_sub_ps(iy0,jy0);
2344 dz00 = _mm256_sub_ps(iz0,jz0);
2345 dx11 = _mm256_sub_ps(ix1,jx1);
2346 dy11 = _mm256_sub_ps(iy1,jy1);
2347 dz11 = _mm256_sub_ps(iz1,jz1);
2348 dx12 = _mm256_sub_ps(ix1,jx2);
2349 dy12 = _mm256_sub_ps(iy1,jy2);
2350 dz12 = _mm256_sub_ps(iz1,jz2);
2351 dx13 = _mm256_sub_ps(ix1,jx3);
2352 dy13 = _mm256_sub_ps(iy1,jy3);
2353 dz13 = _mm256_sub_ps(iz1,jz3);
2354 dx21 = _mm256_sub_ps(ix2,jx1);
2355 dy21 = _mm256_sub_ps(iy2,jy1);
2356 dz21 = _mm256_sub_ps(iz2,jz1);
2357 dx22 = _mm256_sub_ps(ix2,jx2);
2358 dy22 = _mm256_sub_ps(iy2,jy2);
2359 dz22 = _mm256_sub_ps(iz2,jz2);
2360 dx23 = _mm256_sub_ps(ix2,jx3);
2361 dy23 = _mm256_sub_ps(iy2,jy3);
2362 dz23 = _mm256_sub_ps(iz2,jz3);
2363 dx31 = _mm256_sub_ps(ix3,jx1);
2364 dy31 = _mm256_sub_ps(iy3,jy1);
2365 dz31 = _mm256_sub_ps(iz3,jz1);
2366 dx32 = _mm256_sub_ps(ix3,jx2);
2367 dy32 = _mm256_sub_ps(iy3,jy2);
2368 dz32 = _mm256_sub_ps(iz3,jz2);
2369 dx33 = _mm256_sub_ps(ix3,jx3);
2370 dy33 = _mm256_sub_ps(iy3,jy3);
2371 dz33 = _mm256_sub_ps(iz3,jz3);
2373 /* Calculate squared distance and things based on it */
2374 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
2375 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2376 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2377 rsq13 = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
2378 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2379 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2380 rsq23 = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
2381 rsq31 = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
2382 rsq32 = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
2383 rsq33 = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
2385 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
2386 rinv11 = gmx_mm256_invsqrt_ps(rsq11);
2387 rinv12 = gmx_mm256_invsqrt_ps(rsq12);
2388 rinv13 = gmx_mm256_invsqrt_ps(rsq13);
2389 rinv21 = gmx_mm256_invsqrt_ps(rsq21);
2390 rinv22 = gmx_mm256_invsqrt_ps(rsq22);
2391 rinv23 = gmx_mm256_invsqrt_ps(rsq23);
2392 rinv31 = gmx_mm256_invsqrt_ps(rsq31);
2393 rinv32 = gmx_mm256_invsqrt_ps(rsq32);
2394 rinv33 = gmx_mm256_invsqrt_ps(rsq33);
2396 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
2397 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
2398 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
2399 rinvsq13 = _mm256_mul_ps(rinv13,rinv13);
2400 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
2401 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
2402 rinvsq23 = _mm256_mul_ps(rinv23,rinv23);
2403 rinvsq31 = _mm256_mul_ps(rinv31,rinv31);
2404 rinvsq32 = _mm256_mul_ps(rinv32,rinv32);
2405 rinvsq33 = _mm256_mul_ps(rinv33,rinv33);
2407 fjx0 = _mm256_setzero_ps();
2408 fjy0 = _mm256_setzero_ps();
2409 fjz0 = _mm256_setzero_ps();
2410 fjx1 = _mm256_setzero_ps();
2411 fjy1 = _mm256_setzero_ps();
2412 fjz1 = _mm256_setzero_ps();
2413 fjx2 = _mm256_setzero_ps();
2414 fjy2 = _mm256_setzero_ps();
2415 fjz2 = _mm256_setzero_ps();
2416 fjx3 = _mm256_setzero_ps();
2417 fjy3 = _mm256_setzero_ps();
2418 fjz3 = _mm256_setzero_ps();
2420 /**************************
2421 * CALCULATE INTERACTIONS *
2422 **************************/
2424 if (gmx_mm256_any_lt(rsq00,rcutoff2))
2427 r00 = _mm256_mul_ps(rsq00,rinv00);
2428 r00 = _mm256_andnot_ps(dummy_mask,r00);
2430 /* Analytical LJ-PME */
2431 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
2432 ewcljrsq = _mm256_mul_ps(ewclj2,rsq00);
2433 ewclj6 = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
2434 exponent = gmx_simd_exp_r(ewcljrsq);
2435 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
2436 poly = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
2437 /* f6A = 6 * C6grid * (1 - poly) */
2438 f6A = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
2439 /* f6B = C6grid * exponent * beta^6 */
2440 f6B = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
2441 /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
2442 fvdw = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
2444 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
2448 fscal = _mm256_and_ps(fscal,cutoff_mask);
2450 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2452 /* Calculate temporary vectorial force */
2453 tx = _mm256_mul_ps(fscal,dx00);
2454 ty = _mm256_mul_ps(fscal,dy00);
2455 tz = _mm256_mul_ps(fscal,dz00);
2457 /* Update vectorial force */
2458 fix0 = _mm256_add_ps(fix0,tx);
2459 fiy0 = _mm256_add_ps(fiy0,ty);
2460 fiz0 = _mm256_add_ps(fiz0,tz);
2462 fjx0 = _mm256_add_ps(fjx0,tx);
2463 fjy0 = _mm256_add_ps(fjy0,ty);
2464 fjz0 = _mm256_add_ps(fjz0,tz);
2468 /**************************
2469 * CALCULATE INTERACTIONS *
2470 **************************/
2472 if (gmx_mm256_any_lt(rsq11,rcutoff2))
2475 r11 = _mm256_mul_ps(rsq11,rinv11);
2476 r11 = _mm256_andnot_ps(dummy_mask,r11);
2478 /* EWALD ELECTROSTATICS */
2480 /* Analytical PME correction */
2481 zeta2 = _mm256_mul_ps(beta2,rsq11);
2482 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
2483 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2484 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2485 felec = _mm256_mul_ps(qq11,felec);
2487 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2491 fscal = _mm256_and_ps(fscal,cutoff_mask);
2493 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2495 /* Calculate temporary vectorial force */
2496 tx = _mm256_mul_ps(fscal,dx11);
2497 ty = _mm256_mul_ps(fscal,dy11);
2498 tz = _mm256_mul_ps(fscal,dz11);
2500 /* Update vectorial force */
2501 fix1 = _mm256_add_ps(fix1,tx);
2502 fiy1 = _mm256_add_ps(fiy1,ty);
2503 fiz1 = _mm256_add_ps(fiz1,tz);
2505 fjx1 = _mm256_add_ps(fjx1,tx);
2506 fjy1 = _mm256_add_ps(fjy1,ty);
2507 fjz1 = _mm256_add_ps(fjz1,tz);
2511 /**************************
2512 * CALCULATE INTERACTIONS *
2513 **************************/
2515 if (gmx_mm256_any_lt(rsq12,rcutoff2))
2518 r12 = _mm256_mul_ps(rsq12,rinv12);
2519 r12 = _mm256_andnot_ps(dummy_mask,r12);
2521 /* EWALD ELECTROSTATICS */
2523 /* Analytical PME correction */
2524 zeta2 = _mm256_mul_ps(beta2,rsq12);
2525 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
2526 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2527 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2528 felec = _mm256_mul_ps(qq12,felec);
2530 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2534 fscal = _mm256_and_ps(fscal,cutoff_mask);
2536 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2538 /* Calculate temporary vectorial force */
2539 tx = _mm256_mul_ps(fscal,dx12);
2540 ty = _mm256_mul_ps(fscal,dy12);
2541 tz = _mm256_mul_ps(fscal,dz12);
2543 /* Update vectorial force */
2544 fix1 = _mm256_add_ps(fix1,tx);
2545 fiy1 = _mm256_add_ps(fiy1,ty);
2546 fiz1 = _mm256_add_ps(fiz1,tz);
2548 fjx2 = _mm256_add_ps(fjx2,tx);
2549 fjy2 = _mm256_add_ps(fjy2,ty);
2550 fjz2 = _mm256_add_ps(fjz2,tz);
2554 /**************************
2555 * CALCULATE INTERACTIONS *
2556 **************************/
2558 if (gmx_mm256_any_lt(rsq13,rcutoff2))
2561 r13 = _mm256_mul_ps(rsq13,rinv13);
2562 r13 = _mm256_andnot_ps(dummy_mask,r13);
2564 /* EWALD ELECTROSTATICS */
2566 /* Analytical PME correction */
2567 zeta2 = _mm256_mul_ps(beta2,rsq13);
2568 rinv3 = _mm256_mul_ps(rinvsq13,rinv13);
2569 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2570 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2571 felec = _mm256_mul_ps(qq13,felec);
2573 cutoff_mask = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
2577 fscal = _mm256_and_ps(fscal,cutoff_mask);
2579 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2581 /* Calculate temporary vectorial force */
2582 tx = _mm256_mul_ps(fscal,dx13);
2583 ty = _mm256_mul_ps(fscal,dy13);
2584 tz = _mm256_mul_ps(fscal,dz13);
2586 /* Update vectorial force */
2587 fix1 = _mm256_add_ps(fix1,tx);
2588 fiy1 = _mm256_add_ps(fiy1,ty);
2589 fiz1 = _mm256_add_ps(fiz1,tz);
2591 fjx3 = _mm256_add_ps(fjx3,tx);
2592 fjy3 = _mm256_add_ps(fjy3,ty);
2593 fjz3 = _mm256_add_ps(fjz3,tz);
2597 /**************************
2598 * CALCULATE INTERACTIONS *
2599 **************************/
2601 if (gmx_mm256_any_lt(rsq21,rcutoff2))
2604 r21 = _mm256_mul_ps(rsq21,rinv21);
2605 r21 = _mm256_andnot_ps(dummy_mask,r21);
2607 /* EWALD ELECTROSTATICS */
2609 /* Analytical PME correction */
2610 zeta2 = _mm256_mul_ps(beta2,rsq21);
2611 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
2612 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2613 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2614 felec = _mm256_mul_ps(qq21,felec);
2616 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2620 fscal = _mm256_and_ps(fscal,cutoff_mask);
2622 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2624 /* Calculate temporary vectorial force */
2625 tx = _mm256_mul_ps(fscal,dx21);
2626 ty = _mm256_mul_ps(fscal,dy21);
2627 tz = _mm256_mul_ps(fscal,dz21);
2629 /* Update vectorial force */
2630 fix2 = _mm256_add_ps(fix2,tx);
2631 fiy2 = _mm256_add_ps(fiy2,ty);
2632 fiz2 = _mm256_add_ps(fiz2,tz);
2634 fjx1 = _mm256_add_ps(fjx1,tx);
2635 fjy1 = _mm256_add_ps(fjy1,ty);
2636 fjz1 = _mm256_add_ps(fjz1,tz);
2640 /**************************
2641 * CALCULATE INTERACTIONS *
2642 **************************/
2644 if (gmx_mm256_any_lt(rsq22,rcutoff2))
2647 r22 = _mm256_mul_ps(rsq22,rinv22);
2648 r22 = _mm256_andnot_ps(dummy_mask,r22);
2650 /* EWALD ELECTROSTATICS */
2652 /* Analytical PME correction */
2653 zeta2 = _mm256_mul_ps(beta2,rsq22);
2654 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
2655 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2656 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2657 felec = _mm256_mul_ps(qq22,felec);
2659 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2663 fscal = _mm256_and_ps(fscal,cutoff_mask);
2665 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2667 /* Calculate temporary vectorial force */
2668 tx = _mm256_mul_ps(fscal,dx22);
2669 ty = _mm256_mul_ps(fscal,dy22);
2670 tz = _mm256_mul_ps(fscal,dz22);
2672 /* Update vectorial force */
2673 fix2 = _mm256_add_ps(fix2,tx);
2674 fiy2 = _mm256_add_ps(fiy2,ty);
2675 fiz2 = _mm256_add_ps(fiz2,tz);
2677 fjx2 = _mm256_add_ps(fjx2,tx);
2678 fjy2 = _mm256_add_ps(fjy2,ty);
2679 fjz2 = _mm256_add_ps(fjz2,tz);
2683 /**************************
2684 * CALCULATE INTERACTIONS *
2685 **************************/
2687 if (gmx_mm256_any_lt(rsq23,rcutoff2))
2690 r23 = _mm256_mul_ps(rsq23,rinv23);
2691 r23 = _mm256_andnot_ps(dummy_mask,r23);
2693 /* EWALD ELECTROSTATICS */
2695 /* Analytical PME correction */
2696 zeta2 = _mm256_mul_ps(beta2,rsq23);
2697 rinv3 = _mm256_mul_ps(rinvsq23,rinv23);
2698 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2699 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2700 felec = _mm256_mul_ps(qq23,felec);
2702 cutoff_mask = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
2706 fscal = _mm256_and_ps(fscal,cutoff_mask);
2708 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2710 /* Calculate temporary vectorial force */
2711 tx = _mm256_mul_ps(fscal,dx23);
2712 ty = _mm256_mul_ps(fscal,dy23);
2713 tz = _mm256_mul_ps(fscal,dz23);
2715 /* Update vectorial force */
2716 fix2 = _mm256_add_ps(fix2,tx);
2717 fiy2 = _mm256_add_ps(fiy2,ty);
2718 fiz2 = _mm256_add_ps(fiz2,tz);
2720 fjx3 = _mm256_add_ps(fjx3,tx);
2721 fjy3 = _mm256_add_ps(fjy3,ty);
2722 fjz3 = _mm256_add_ps(fjz3,tz);
2726 /**************************
2727 * CALCULATE INTERACTIONS *
2728 **************************/
2730 if (gmx_mm256_any_lt(rsq31,rcutoff2))
2733 r31 = _mm256_mul_ps(rsq31,rinv31);
2734 r31 = _mm256_andnot_ps(dummy_mask,r31);
2736 /* EWALD ELECTROSTATICS */
2738 /* Analytical PME correction */
2739 zeta2 = _mm256_mul_ps(beta2,rsq31);
2740 rinv3 = _mm256_mul_ps(rinvsq31,rinv31);
2741 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2742 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2743 felec = _mm256_mul_ps(qq31,felec);
2745 cutoff_mask = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
2749 fscal = _mm256_and_ps(fscal,cutoff_mask);
2751 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2753 /* Calculate temporary vectorial force */
2754 tx = _mm256_mul_ps(fscal,dx31);
2755 ty = _mm256_mul_ps(fscal,dy31);
2756 tz = _mm256_mul_ps(fscal,dz31);
2758 /* Update vectorial force */
2759 fix3 = _mm256_add_ps(fix3,tx);
2760 fiy3 = _mm256_add_ps(fiy3,ty);
2761 fiz3 = _mm256_add_ps(fiz3,tz);
2763 fjx1 = _mm256_add_ps(fjx1,tx);
2764 fjy1 = _mm256_add_ps(fjy1,ty);
2765 fjz1 = _mm256_add_ps(fjz1,tz);
2769 /**************************
2770 * CALCULATE INTERACTIONS *
2771 **************************/
2773 if (gmx_mm256_any_lt(rsq32,rcutoff2))
2776 r32 = _mm256_mul_ps(rsq32,rinv32);
2777 r32 = _mm256_andnot_ps(dummy_mask,r32);
2779 /* EWALD ELECTROSTATICS */
2781 /* Analytical PME correction */
2782 zeta2 = _mm256_mul_ps(beta2,rsq32);
2783 rinv3 = _mm256_mul_ps(rinvsq32,rinv32);
2784 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2785 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2786 felec = _mm256_mul_ps(qq32,felec);
2788 cutoff_mask = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
2792 fscal = _mm256_and_ps(fscal,cutoff_mask);
2794 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2796 /* Calculate temporary vectorial force */
2797 tx = _mm256_mul_ps(fscal,dx32);
2798 ty = _mm256_mul_ps(fscal,dy32);
2799 tz = _mm256_mul_ps(fscal,dz32);
2801 /* Update vectorial force */
2802 fix3 = _mm256_add_ps(fix3,tx);
2803 fiy3 = _mm256_add_ps(fiy3,ty);
2804 fiz3 = _mm256_add_ps(fiz3,tz);
2806 fjx2 = _mm256_add_ps(fjx2,tx);
2807 fjy2 = _mm256_add_ps(fjy2,ty);
2808 fjz2 = _mm256_add_ps(fjz2,tz);
2812 /**************************
2813 * CALCULATE INTERACTIONS *
2814 **************************/
2816 if (gmx_mm256_any_lt(rsq33,rcutoff2))
2819 r33 = _mm256_mul_ps(rsq33,rinv33);
2820 r33 = _mm256_andnot_ps(dummy_mask,r33);
2822 /* EWALD ELECTROSTATICS */
2824 /* Analytical PME correction */
2825 zeta2 = _mm256_mul_ps(beta2,rsq33);
2826 rinv3 = _mm256_mul_ps(rinvsq33,rinv33);
2827 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2828 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2829 felec = _mm256_mul_ps(qq33,felec);
2831 cutoff_mask = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
2835 fscal = _mm256_and_ps(fscal,cutoff_mask);
2837 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2839 /* Calculate temporary vectorial force */
2840 tx = _mm256_mul_ps(fscal,dx33);
2841 ty = _mm256_mul_ps(fscal,dy33);
2842 tz = _mm256_mul_ps(fscal,dz33);
2844 /* Update vectorial force */
2845 fix3 = _mm256_add_ps(fix3,tx);
2846 fiy3 = _mm256_add_ps(fiy3,ty);
2847 fiz3 = _mm256_add_ps(fiz3,tz);
2849 fjx3 = _mm256_add_ps(fjx3,tx);
2850 fjy3 = _mm256_add_ps(fjy3,ty);
2851 fjz3 = _mm256_add_ps(fjz3,tz);
2855 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2856 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2857 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2858 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2859 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2860 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2861 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2862 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2864 gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2865 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2866 fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2868 /* Inner loop uses 593 flops */
2871 /* End of innermost loop */
2873 gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
2874 f+i_coord_offset,fshift+i_shift_offset);
2876 /* Increment number of inner iterations */
2877 inneriter += j_index_end - j_index_start;
2879 /* Outer loop uses 24 flops */
2882 /* Increment number of outer iterations */
2885 /* Update outer/inner flops */
2887 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*593);