2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_single kernel generator.
44 #include "../nb_kernel.h"
45 #include "gromacs/legacyheaders/types/simple.h"
46 #include "gromacs/math/vec.h"
47 #include "gromacs/legacyheaders/nrnb.h"
49 #include "gromacs/simd/math_x86_avx_256_single.h"
50 #include "kernelutil_x86_avx_256_single.h"
53 * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_VF_avx_256_single
54 * Electrostatics interaction: Ewald
55 * VdW interaction: LJEwald
56 * Geometry: Water3-Water3
57 * Calculate force/pot: PotentialAndForce
60 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_VF_avx_256_single
61 (t_nblist * gmx_restrict nlist,
62 rvec * gmx_restrict xx,
63 rvec * gmx_restrict ff,
64 t_forcerec * gmx_restrict fr,
65 t_mdatoms * gmx_restrict mdatoms,
66 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67 t_nrnb * gmx_restrict nrnb)
69 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
70 * just 0 for non-waters.
71 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
72 * jnr indices corresponding to data put in the four positions in the SIMD register.
74 int i_shift_offset,i_coord_offset,outeriter,inneriter;
75 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76 int jnrA,jnrB,jnrC,jnrD;
77 int jnrE,jnrF,jnrG,jnrH;
78 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
79 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
80 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
81 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
82 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
84 real *shiftvec,*fshift,*x,*f;
85 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
87 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
88 real * vdwioffsetptr0;
89 real * vdwgridioffsetptr0;
90 __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
91 real * vdwioffsetptr1;
92 real * vdwgridioffsetptr1;
93 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
94 real * vdwioffsetptr2;
95 real * vdwgridioffsetptr2;
96 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
97 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
98 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
99 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
100 __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
101 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
102 __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
103 __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
104 __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
105 __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
106 __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
107 __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
108 __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
109 __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
110 __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
111 __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
112 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
115 __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
118 __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
119 __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
130 __m256 ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
131 __m256 one_half = _mm256_set1_ps(0.5);
132 __m256 minus_one = _mm256_set1_ps(-1.0);
134 __m128i ewitab_lo,ewitab_hi;
135 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
136 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
138 __m256 dummy_mask,cutoff_mask;
139 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
140 __m256 one = _mm256_set1_ps(1.0);
141 __m256 two = _mm256_set1_ps(2.0);
147 jindex = nlist->jindex;
149 shiftidx = nlist->shift;
151 shiftvec = fr->shift_vec[0];
152 fshift = fr->fshift[0];
153 facel = _mm256_set1_ps(fr->epsfac);
154 charge = mdatoms->chargeA;
155 nvdwtype = fr->ntype;
157 vdwtype = mdatoms->typeA;
158 vdwgridparam = fr->ljpme_c6grid;
159 sh_lj_ewald = _mm256_set1_ps(fr->ic->sh_lj_ewald);
160 ewclj = _mm256_set1_ps(fr->ewaldcoeff_lj);
161 ewclj2 = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
163 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
164 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
165 beta2 = _mm256_mul_ps(beta,beta);
166 beta3 = _mm256_mul_ps(beta,beta2);
168 ewtab = fr->ic->tabq_coul_FDV0;
169 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
170 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
172 /* Setup water-specific parameters */
173 inr = nlist->iinr[0];
174 iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
175 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
176 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
177 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
178 vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
180 jq0 = _mm256_set1_ps(charge[inr+0]);
181 jq1 = _mm256_set1_ps(charge[inr+1]);
182 jq2 = _mm256_set1_ps(charge[inr+2]);
183 vdwjidx0A = 2*vdwtype[inr+0];
184 qq00 = _mm256_mul_ps(iq0,jq0);
185 c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
186 c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
187 c6grid_00 = _mm256_set1_ps(vdwgridioffsetptr0[vdwjidx0A]);
188 qq01 = _mm256_mul_ps(iq0,jq1);
189 qq02 = _mm256_mul_ps(iq0,jq2);
190 qq10 = _mm256_mul_ps(iq1,jq0);
191 qq11 = _mm256_mul_ps(iq1,jq1);
192 qq12 = _mm256_mul_ps(iq1,jq2);
193 qq20 = _mm256_mul_ps(iq2,jq0);
194 qq21 = _mm256_mul_ps(iq2,jq1);
195 qq22 = _mm256_mul_ps(iq2,jq2);
197 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
198 rcutoff_scalar = fr->rcoulomb;
199 rcutoff = _mm256_set1_ps(rcutoff_scalar);
200 rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
202 sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
203 rvdw = _mm256_set1_ps(fr->rvdw);
205 /* Avoid stupid compiler warnings */
206 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
219 for(iidx=0;iidx<4*DIM;iidx++)
224 /* Start outer loop over neighborlists */
225 for(iidx=0; iidx<nri; iidx++)
227 /* Load shift vector for this list */
228 i_shift_offset = DIM*shiftidx[iidx];
230 /* Load limits for loop over neighbors */
231 j_index_start = jindex[iidx];
232 j_index_end = jindex[iidx+1];
234 /* Get outer coordinate index */
236 i_coord_offset = DIM*inr;
238 /* Load i particle coords and add shift vector */
239 gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
240 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
242 fix0 = _mm256_setzero_ps();
243 fiy0 = _mm256_setzero_ps();
244 fiz0 = _mm256_setzero_ps();
245 fix1 = _mm256_setzero_ps();
246 fiy1 = _mm256_setzero_ps();
247 fiz1 = _mm256_setzero_ps();
248 fix2 = _mm256_setzero_ps();
249 fiy2 = _mm256_setzero_ps();
250 fiz2 = _mm256_setzero_ps();
252 /* Reset potential sums */
253 velecsum = _mm256_setzero_ps();
254 vvdwsum = _mm256_setzero_ps();
256 /* Start inner kernel loop */
257 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
260 /* Get j neighbor index, and coordinate index */
269 j_coord_offsetA = DIM*jnrA;
270 j_coord_offsetB = DIM*jnrB;
271 j_coord_offsetC = DIM*jnrC;
272 j_coord_offsetD = DIM*jnrD;
273 j_coord_offsetE = DIM*jnrE;
274 j_coord_offsetF = DIM*jnrF;
275 j_coord_offsetG = DIM*jnrG;
276 j_coord_offsetH = DIM*jnrH;
278 /* load j atom coordinates */
279 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
280 x+j_coord_offsetC,x+j_coord_offsetD,
281 x+j_coord_offsetE,x+j_coord_offsetF,
282 x+j_coord_offsetG,x+j_coord_offsetH,
283 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
285 /* Calculate displacement vector */
286 dx00 = _mm256_sub_ps(ix0,jx0);
287 dy00 = _mm256_sub_ps(iy0,jy0);
288 dz00 = _mm256_sub_ps(iz0,jz0);
289 dx01 = _mm256_sub_ps(ix0,jx1);
290 dy01 = _mm256_sub_ps(iy0,jy1);
291 dz01 = _mm256_sub_ps(iz0,jz1);
292 dx02 = _mm256_sub_ps(ix0,jx2);
293 dy02 = _mm256_sub_ps(iy0,jy2);
294 dz02 = _mm256_sub_ps(iz0,jz2);
295 dx10 = _mm256_sub_ps(ix1,jx0);
296 dy10 = _mm256_sub_ps(iy1,jy0);
297 dz10 = _mm256_sub_ps(iz1,jz0);
298 dx11 = _mm256_sub_ps(ix1,jx1);
299 dy11 = _mm256_sub_ps(iy1,jy1);
300 dz11 = _mm256_sub_ps(iz1,jz1);
301 dx12 = _mm256_sub_ps(ix1,jx2);
302 dy12 = _mm256_sub_ps(iy1,jy2);
303 dz12 = _mm256_sub_ps(iz1,jz2);
304 dx20 = _mm256_sub_ps(ix2,jx0);
305 dy20 = _mm256_sub_ps(iy2,jy0);
306 dz20 = _mm256_sub_ps(iz2,jz0);
307 dx21 = _mm256_sub_ps(ix2,jx1);
308 dy21 = _mm256_sub_ps(iy2,jy1);
309 dz21 = _mm256_sub_ps(iz2,jz1);
310 dx22 = _mm256_sub_ps(ix2,jx2);
311 dy22 = _mm256_sub_ps(iy2,jy2);
312 dz22 = _mm256_sub_ps(iz2,jz2);
314 /* Calculate squared distance and things based on it */
315 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
316 rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
317 rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
318 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
319 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
320 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
321 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
322 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
323 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
325 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
326 rinv01 = gmx_mm256_invsqrt_ps(rsq01);
327 rinv02 = gmx_mm256_invsqrt_ps(rsq02);
328 rinv10 = gmx_mm256_invsqrt_ps(rsq10);
329 rinv11 = gmx_mm256_invsqrt_ps(rsq11);
330 rinv12 = gmx_mm256_invsqrt_ps(rsq12);
331 rinv20 = gmx_mm256_invsqrt_ps(rsq20);
332 rinv21 = gmx_mm256_invsqrt_ps(rsq21);
333 rinv22 = gmx_mm256_invsqrt_ps(rsq22);
335 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
336 rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
337 rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
338 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
339 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
340 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
341 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
342 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
343 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
345 fjx0 = _mm256_setzero_ps();
346 fjy0 = _mm256_setzero_ps();
347 fjz0 = _mm256_setzero_ps();
348 fjx1 = _mm256_setzero_ps();
349 fjy1 = _mm256_setzero_ps();
350 fjz1 = _mm256_setzero_ps();
351 fjx2 = _mm256_setzero_ps();
352 fjy2 = _mm256_setzero_ps();
353 fjz2 = _mm256_setzero_ps();
355 /**************************
356 * CALCULATE INTERACTIONS *
357 **************************/
359 if (gmx_mm256_any_lt(rsq00,rcutoff2))
362 r00 = _mm256_mul_ps(rsq00,rinv00);
364 /* EWALD ELECTROSTATICS */
366 /* Analytical PME correction */
367 zeta2 = _mm256_mul_ps(beta2,rsq00);
368 rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
369 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
370 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
371 felec = _mm256_mul_ps(qq00,felec);
372 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
373 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
374 velec = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
375 velec = _mm256_mul_ps(qq00,velec);
377 /* Analytical LJ-PME */
378 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
379 ewcljrsq = _mm256_mul_ps(ewclj2,rsq00);
380 ewclj6 = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
381 exponent = gmx_simd_exp_r(ewcljrsq);
382 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
383 poly = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
384 /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
385 vvdw6 = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
386 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
387 vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
388 _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
389 /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
390 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
392 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
394 /* Update potential sum for this i atom from the interaction with this j atom. */
395 velec = _mm256_and_ps(velec,cutoff_mask);
396 velecsum = _mm256_add_ps(velecsum,velec);
397 vvdw = _mm256_and_ps(vvdw,cutoff_mask);
398 vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
400 fscal = _mm256_add_ps(felec,fvdw);
402 fscal = _mm256_and_ps(fscal,cutoff_mask);
404 /* Calculate temporary vectorial force */
405 tx = _mm256_mul_ps(fscal,dx00);
406 ty = _mm256_mul_ps(fscal,dy00);
407 tz = _mm256_mul_ps(fscal,dz00);
409 /* Update vectorial force */
410 fix0 = _mm256_add_ps(fix0,tx);
411 fiy0 = _mm256_add_ps(fiy0,ty);
412 fiz0 = _mm256_add_ps(fiz0,tz);
414 fjx0 = _mm256_add_ps(fjx0,tx);
415 fjy0 = _mm256_add_ps(fjy0,ty);
416 fjz0 = _mm256_add_ps(fjz0,tz);
420 /**************************
421 * CALCULATE INTERACTIONS *
422 **************************/
424 if (gmx_mm256_any_lt(rsq01,rcutoff2))
427 r01 = _mm256_mul_ps(rsq01,rinv01);
429 /* EWALD ELECTROSTATICS */
431 /* Analytical PME correction */
432 zeta2 = _mm256_mul_ps(beta2,rsq01);
433 rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
434 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
435 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
436 felec = _mm256_mul_ps(qq01,felec);
437 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
438 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
439 velec = _mm256_sub_ps(_mm256_sub_ps(rinv01,sh_ewald),pmecorrV);
440 velec = _mm256_mul_ps(qq01,velec);
442 cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
444 /* Update potential sum for this i atom from the interaction with this j atom. */
445 velec = _mm256_and_ps(velec,cutoff_mask);
446 velecsum = _mm256_add_ps(velecsum,velec);
450 fscal = _mm256_and_ps(fscal,cutoff_mask);
452 /* Calculate temporary vectorial force */
453 tx = _mm256_mul_ps(fscal,dx01);
454 ty = _mm256_mul_ps(fscal,dy01);
455 tz = _mm256_mul_ps(fscal,dz01);
457 /* Update vectorial force */
458 fix0 = _mm256_add_ps(fix0,tx);
459 fiy0 = _mm256_add_ps(fiy0,ty);
460 fiz0 = _mm256_add_ps(fiz0,tz);
462 fjx1 = _mm256_add_ps(fjx1,tx);
463 fjy1 = _mm256_add_ps(fjy1,ty);
464 fjz1 = _mm256_add_ps(fjz1,tz);
468 /**************************
469 * CALCULATE INTERACTIONS *
470 **************************/
472 if (gmx_mm256_any_lt(rsq02,rcutoff2))
475 r02 = _mm256_mul_ps(rsq02,rinv02);
477 /* EWALD ELECTROSTATICS */
479 /* Analytical PME correction */
480 zeta2 = _mm256_mul_ps(beta2,rsq02);
481 rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
482 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
483 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
484 felec = _mm256_mul_ps(qq02,felec);
485 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
486 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
487 velec = _mm256_sub_ps(_mm256_sub_ps(rinv02,sh_ewald),pmecorrV);
488 velec = _mm256_mul_ps(qq02,velec);
490 cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
492 /* Update potential sum for this i atom from the interaction with this j atom. */
493 velec = _mm256_and_ps(velec,cutoff_mask);
494 velecsum = _mm256_add_ps(velecsum,velec);
498 fscal = _mm256_and_ps(fscal,cutoff_mask);
500 /* Calculate temporary vectorial force */
501 tx = _mm256_mul_ps(fscal,dx02);
502 ty = _mm256_mul_ps(fscal,dy02);
503 tz = _mm256_mul_ps(fscal,dz02);
505 /* Update vectorial force */
506 fix0 = _mm256_add_ps(fix0,tx);
507 fiy0 = _mm256_add_ps(fiy0,ty);
508 fiz0 = _mm256_add_ps(fiz0,tz);
510 fjx2 = _mm256_add_ps(fjx2,tx);
511 fjy2 = _mm256_add_ps(fjy2,ty);
512 fjz2 = _mm256_add_ps(fjz2,tz);
516 /**************************
517 * CALCULATE INTERACTIONS *
518 **************************/
520 if (gmx_mm256_any_lt(rsq10,rcutoff2))
523 r10 = _mm256_mul_ps(rsq10,rinv10);
525 /* EWALD ELECTROSTATICS */
527 /* Analytical PME correction */
528 zeta2 = _mm256_mul_ps(beta2,rsq10);
529 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
530 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
531 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
532 felec = _mm256_mul_ps(qq10,felec);
533 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
534 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
535 velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
536 velec = _mm256_mul_ps(qq10,velec);
538 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
540 /* Update potential sum for this i atom from the interaction with this j atom. */
541 velec = _mm256_and_ps(velec,cutoff_mask);
542 velecsum = _mm256_add_ps(velecsum,velec);
546 fscal = _mm256_and_ps(fscal,cutoff_mask);
548 /* Calculate temporary vectorial force */
549 tx = _mm256_mul_ps(fscal,dx10);
550 ty = _mm256_mul_ps(fscal,dy10);
551 tz = _mm256_mul_ps(fscal,dz10);
553 /* Update vectorial force */
554 fix1 = _mm256_add_ps(fix1,tx);
555 fiy1 = _mm256_add_ps(fiy1,ty);
556 fiz1 = _mm256_add_ps(fiz1,tz);
558 fjx0 = _mm256_add_ps(fjx0,tx);
559 fjy0 = _mm256_add_ps(fjy0,ty);
560 fjz0 = _mm256_add_ps(fjz0,tz);
564 /**************************
565 * CALCULATE INTERACTIONS *
566 **************************/
568 if (gmx_mm256_any_lt(rsq11,rcutoff2))
571 r11 = _mm256_mul_ps(rsq11,rinv11);
573 /* EWALD ELECTROSTATICS */
575 /* Analytical PME correction */
576 zeta2 = _mm256_mul_ps(beta2,rsq11);
577 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
578 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
579 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
580 felec = _mm256_mul_ps(qq11,felec);
581 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
582 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
583 velec = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
584 velec = _mm256_mul_ps(qq11,velec);
586 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
588 /* Update potential sum for this i atom from the interaction with this j atom. */
589 velec = _mm256_and_ps(velec,cutoff_mask);
590 velecsum = _mm256_add_ps(velecsum,velec);
594 fscal = _mm256_and_ps(fscal,cutoff_mask);
596 /* Calculate temporary vectorial force */
597 tx = _mm256_mul_ps(fscal,dx11);
598 ty = _mm256_mul_ps(fscal,dy11);
599 tz = _mm256_mul_ps(fscal,dz11);
601 /* Update vectorial force */
602 fix1 = _mm256_add_ps(fix1,tx);
603 fiy1 = _mm256_add_ps(fiy1,ty);
604 fiz1 = _mm256_add_ps(fiz1,tz);
606 fjx1 = _mm256_add_ps(fjx1,tx);
607 fjy1 = _mm256_add_ps(fjy1,ty);
608 fjz1 = _mm256_add_ps(fjz1,tz);
612 /**************************
613 * CALCULATE INTERACTIONS *
614 **************************/
616 if (gmx_mm256_any_lt(rsq12,rcutoff2))
619 r12 = _mm256_mul_ps(rsq12,rinv12);
621 /* EWALD ELECTROSTATICS */
623 /* Analytical PME correction */
624 zeta2 = _mm256_mul_ps(beta2,rsq12);
625 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
626 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
627 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
628 felec = _mm256_mul_ps(qq12,felec);
629 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
630 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
631 velec = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
632 velec = _mm256_mul_ps(qq12,velec);
634 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
636 /* Update potential sum for this i atom from the interaction with this j atom. */
637 velec = _mm256_and_ps(velec,cutoff_mask);
638 velecsum = _mm256_add_ps(velecsum,velec);
642 fscal = _mm256_and_ps(fscal,cutoff_mask);
644 /* Calculate temporary vectorial force */
645 tx = _mm256_mul_ps(fscal,dx12);
646 ty = _mm256_mul_ps(fscal,dy12);
647 tz = _mm256_mul_ps(fscal,dz12);
649 /* Update vectorial force */
650 fix1 = _mm256_add_ps(fix1,tx);
651 fiy1 = _mm256_add_ps(fiy1,ty);
652 fiz1 = _mm256_add_ps(fiz1,tz);
654 fjx2 = _mm256_add_ps(fjx2,tx);
655 fjy2 = _mm256_add_ps(fjy2,ty);
656 fjz2 = _mm256_add_ps(fjz2,tz);
660 /**************************
661 * CALCULATE INTERACTIONS *
662 **************************/
664 if (gmx_mm256_any_lt(rsq20,rcutoff2))
667 r20 = _mm256_mul_ps(rsq20,rinv20);
669 /* EWALD ELECTROSTATICS */
671 /* Analytical PME correction */
672 zeta2 = _mm256_mul_ps(beta2,rsq20);
673 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
674 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
675 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
676 felec = _mm256_mul_ps(qq20,felec);
677 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
678 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
679 velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
680 velec = _mm256_mul_ps(qq20,velec);
682 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
684 /* Update potential sum for this i atom from the interaction with this j atom. */
685 velec = _mm256_and_ps(velec,cutoff_mask);
686 velecsum = _mm256_add_ps(velecsum,velec);
690 fscal = _mm256_and_ps(fscal,cutoff_mask);
692 /* Calculate temporary vectorial force */
693 tx = _mm256_mul_ps(fscal,dx20);
694 ty = _mm256_mul_ps(fscal,dy20);
695 tz = _mm256_mul_ps(fscal,dz20);
697 /* Update vectorial force */
698 fix2 = _mm256_add_ps(fix2,tx);
699 fiy2 = _mm256_add_ps(fiy2,ty);
700 fiz2 = _mm256_add_ps(fiz2,tz);
702 fjx0 = _mm256_add_ps(fjx0,tx);
703 fjy0 = _mm256_add_ps(fjy0,ty);
704 fjz0 = _mm256_add_ps(fjz0,tz);
708 /**************************
709 * CALCULATE INTERACTIONS *
710 **************************/
712 if (gmx_mm256_any_lt(rsq21,rcutoff2))
715 r21 = _mm256_mul_ps(rsq21,rinv21);
717 /* EWALD ELECTROSTATICS */
719 /* Analytical PME correction */
720 zeta2 = _mm256_mul_ps(beta2,rsq21);
721 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
722 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
723 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
724 felec = _mm256_mul_ps(qq21,felec);
725 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
726 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
727 velec = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
728 velec = _mm256_mul_ps(qq21,velec);
730 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
732 /* Update potential sum for this i atom from the interaction with this j atom. */
733 velec = _mm256_and_ps(velec,cutoff_mask);
734 velecsum = _mm256_add_ps(velecsum,velec);
738 fscal = _mm256_and_ps(fscal,cutoff_mask);
740 /* Calculate temporary vectorial force */
741 tx = _mm256_mul_ps(fscal,dx21);
742 ty = _mm256_mul_ps(fscal,dy21);
743 tz = _mm256_mul_ps(fscal,dz21);
745 /* Update vectorial force */
746 fix2 = _mm256_add_ps(fix2,tx);
747 fiy2 = _mm256_add_ps(fiy2,ty);
748 fiz2 = _mm256_add_ps(fiz2,tz);
750 fjx1 = _mm256_add_ps(fjx1,tx);
751 fjy1 = _mm256_add_ps(fjy1,ty);
752 fjz1 = _mm256_add_ps(fjz1,tz);
756 /**************************
757 * CALCULATE INTERACTIONS *
758 **************************/
760 if (gmx_mm256_any_lt(rsq22,rcutoff2))
763 r22 = _mm256_mul_ps(rsq22,rinv22);
765 /* EWALD ELECTROSTATICS */
767 /* Analytical PME correction */
768 zeta2 = _mm256_mul_ps(beta2,rsq22);
769 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
770 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
771 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
772 felec = _mm256_mul_ps(qq22,felec);
773 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
774 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
775 velec = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
776 velec = _mm256_mul_ps(qq22,velec);
778 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
780 /* Update potential sum for this i atom from the interaction with this j atom. */
781 velec = _mm256_and_ps(velec,cutoff_mask);
782 velecsum = _mm256_add_ps(velecsum,velec);
786 fscal = _mm256_and_ps(fscal,cutoff_mask);
788 /* Calculate temporary vectorial force */
789 tx = _mm256_mul_ps(fscal,dx22);
790 ty = _mm256_mul_ps(fscal,dy22);
791 tz = _mm256_mul_ps(fscal,dz22);
793 /* Update vectorial force */
794 fix2 = _mm256_add_ps(fix2,tx);
795 fiy2 = _mm256_add_ps(fiy2,ty);
796 fiz2 = _mm256_add_ps(fiz2,tz);
798 fjx2 = _mm256_add_ps(fjx2,tx);
799 fjy2 = _mm256_add_ps(fjy2,ty);
800 fjz2 = _mm256_add_ps(fjz2,tz);
804 fjptrA = f+j_coord_offsetA;
805 fjptrB = f+j_coord_offsetB;
806 fjptrC = f+j_coord_offsetC;
807 fjptrD = f+j_coord_offsetD;
808 fjptrE = f+j_coord_offsetE;
809 fjptrF = f+j_coord_offsetF;
810 fjptrG = f+j_coord_offsetG;
811 fjptrH = f+j_coord_offsetH;
813 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
814 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
816 /* Inner loop uses 1017 flops */
822 /* Get j neighbor index, and coordinate index */
823 jnrlistA = jjnr[jidx];
824 jnrlistB = jjnr[jidx+1];
825 jnrlistC = jjnr[jidx+2];
826 jnrlistD = jjnr[jidx+3];
827 jnrlistE = jjnr[jidx+4];
828 jnrlistF = jjnr[jidx+5];
829 jnrlistG = jjnr[jidx+6];
830 jnrlistH = jjnr[jidx+7];
831 /* Sign of each element will be negative for non-real atoms.
832 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
833 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
835 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
836 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
838 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
839 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
840 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
841 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
842 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
843 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
844 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
845 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
846 j_coord_offsetA = DIM*jnrA;
847 j_coord_offsetB = DIM*jnrB;
848 j_coord_offsetC = DIM*jnrC;
849 j_coord_offsetD = DIM*jnrD;
850 j_coord_offsetE = DIM*jnrE;
851 j_coord_offsetF = DIM*jnrF;
852 j_coord_offsetG = DIM*jnrG;
853 j_coord_offsetH = DIM*jnrH;
855 /* load j atom coordinates */
856 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
857 x+j_coord_offsetC,x+j_coord_offsetD,
858 x+j_coord_offsetE,x+j_coord_offsetF,
859 x+j_coord_offsetG,x+j_coord_offsetH,
860 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
862 /* Calculate displacement vector */
863 dx00 = _mm256_sub_ps(ix0,jx0);
864 dy00 = _mm256_sub_ps(iy0,jy0);
865 dz00 = _mm256_sub_ps(iz0,jz0);
866 dx01 = _mm256_sub_ps(ix0,jx1);
867 dy01 = _mm256_sub_ps(iy0,jy1);
868 dz01 = _mm256_sub_ps(iz0,jz1);
869 dx02 = _mm256_sub_ps(ix0,jx2);
870 dy02 = _mm256_sub_ps(iy0,jy2);
871 dz02 = _mm256_sub_ps(iz0,jz2);
872 dx10 = _mm256_sub_ps(ix1,jx0);
873 dy10 = _mm256_sub_ps(iy1,jy0);
874 dz10 = _mm256_sub_ps(iz1,jz0);
875 dx11 = _mm256_sub_ps(ix1,jx1);
876 dy11 = _mm256_sub_ps(iy1,jy1);
877 dz11 = _mm256_sub_ps(iz1,jz1);
878 dx12 = _mm256_sub_ps(ix1,jx2);
879 dy12 = _mm256_sub_ps(iy1,jy2);
880 dz12 = _mm256_sub_ps(iz1,jz2);
881 dx20 = _mm256_sub_ps(ix2,jx0);
882 dy20 = _mm256_sub_ps(iy2,jy0);
883 dz20 = _mm256_sub_ps(iz2,jz0);
884 dx21 = _mm256_sub_ps(ix2,jx1);
885 dy21 = _mm256_sub_ps(iy2,jy1);
886 dz21 = _mm256_sub_ps(iz2,jz1);
887 dx22 = _mm256_sub_ps(ix2,jx2);
888 dy22 = _mm256_sub_ps(iy2,jy2);
889 dz22 = _mm256_sub_ps(iz2,jz2);
891 /* Calculate squared distance and things based on it */
892 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
893 rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
894 rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
895 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
896 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
897 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
898 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
899 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
900 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
902 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
903 rinv01 = gmx_mm256_invsqrt_ps(rsq01);
904 rinv02 = gmx_mm256_invsqrt_ps(rsq02);
905 rinv10 = gmx_mm256_invsqrt_ps(rsq10);
906 rinv11 = gmx_mm256_invsqrt_ps(rsq11);
907 rinv12 = gmx_mm256_invsqrt_ps(rsq12);
908 rinv20 = gmx_mm256_invsqrt_ps(rsq20);
909 rinv21 = gmx_mm256_invsqrt_ps(rsq21);
910 rinv22 = gmx_mm256_invsqrt_ps(rsq22);
912 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
913 rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
914 rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
915 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
916 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
917 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
918 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
919 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
920 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
922 fjx0 = _mm256_setzero_ps();
923 fjy0 = _mm256_setzero_ps();
924 fjz0 = _mm256_setzero_ps();
925 fjx1 = _mm256_setzero_ps();
926 fjy1 = _mm256_setzero_ps();
927 fjz1 = _mm256_setzero_ps();
928 fjx2 = _mm256_setzero_ps();
929 fjy2 = _mm256_setzero_ps();
930 fjz2 = _mm256_setzero_ps();
932 /**************************
933 * CALCULATE INTERACTIONS *
934 **************************/
936 if (gmx_mm256_any_lt(rsq00,rcutoff2))
939 r00 = _mm256_mul_ps(rsq00,rinv00);
940 r00 = _mm256_andnot_ps(dummy_mask,r00);
942 /* EWALD ELECTROSTATICS */
944 /* Analytical PME correction */
945 zeta2 = _mm256_mul_ps(beta2,rsq00);
946 rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
947 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
948 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
949 felec = _mm256_mul_ps(qq00,felec);
950 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
951 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
952 velec = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
953 velec = _mm256_mul_ps(qq00,velec);
955 /* Analytical LJ-PME */
956 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
957 ewcljrsq = _mm256_mul_ps(ewclj2,rsq00);
958 ewclj6 = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
959 exponent = gmx_simd_exp_r(ewcljrsq);
960 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
961 poly = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
962 /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
963 vvdw6 = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
964 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
965 vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
966 _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
967 /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
968 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
970 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
972 /* Update potential sum for this i atom from the interaction with this j atom. */
973 velec = _mm256_and_ps(velec,cutoff_mask);
974 velec = _mm256_andnot_ps(dummy_mask,velec);
975 velecsum = _mm256_add_ps(velecsum,velec);
976 vvdw = _mm256_and_ps(vvdw,cutoff_mask);
977 vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
978 vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
980 fscal = _mm256_add_ps(felec,fvdw);
982 fscal = _mm256_and_ps(fscal,cutoff_mask);
984 fscal = _mm256_andnot_ps(dummy_mask,fscal);
986 /* Calculate temporary vectorial force */
987 tx = _mm256_mul_ps(fscal,dx00);
988 ty = _mm256_mul_ps(fscal,dy00);
989 tz = _mm256_mul_ps(fscal,dz00);
991 /* Update vectorial force */
992 fix0 = _mm256_add_ps(fix0,tx);
993 fiy0 = _mm256_add_ps(fiy0,ty);
994 fiz0 = _mm256_add_ps(fiz0,tz);
996 fjx0 = _mm256_add_ps(fjx0,tx);
997 fjy0 = _mm256_add_ps(fjy0,ty);
998 fjz0 = _mm256_add_ps(fjz0,tz);
1002 /**************************
1003 * CALCULATE INTERACTIONS *
1004 **************************/
1006 if (gmx_mm256_any_lt(rsq01,rcutoff2))
1009 r01 = _mm256_mul_ps(rsq01,rinv01);
1010 r01 = _mm256_andnot_ps(dummy_mask,r01);
1012 /* EWALD ELECTROSTATICS */
1014 /* Analytical PME correction */
1015 zeta2 = _mm256_mul_ps(beta2,rsq01);
1016 rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
1017 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1018 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1019 felec = _mm256_mul_ps(qq01,felec);
1020 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1021 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1022 velec = _mm256_sub_ps(_mm256_sub_ps(rinv01,sh_ewald),pmecorrV);
1023 velec = _mm256_mul_ps(qq01,velec);
1025 cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
1027 /* Update potential sum for this i atom from the interaction with this j atom. */
1028 velec = _mm256_and_ps(velec,cutoff_mask);
1029 velec = _mm256_andnot_ps(dummy_mask,velec);
1030 velecsum = _mm256_add_ps(velecsum,velec);
1034 fscal = _mm256_and_ps(fscal,cutoff_mask);
1036 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1038 /* Calculate temporary vectorial force */
1039 tx = _mm256_mul_ps(fscal,dx01);
1040 ty = _mm256_mul_ps(fscal,dy01);
1041 tz = _mm256_mul_ps(fscal,dz01);
1043 /* Update vectorial force */
1044 fix0 = _mm256_add_ps(fix0,tx);
1045 fiy0 = _mm256_add_ps(fiy0,ty);
1046 fiz0 = _mm256_add_ps(fiz0,tz);
1048 fjx1 = _mm256_add_ps(fjx1,tx);
1049 fjy1 = _mm256_add_ps(fjy1,ty);
1050 fjz1 = _mm256_add_ps(fjz1,tz);
1054 /**************************
1055 * CALCULATE INTERACTIONS *
1056 **************************/
1058 if (gmx_mm256_any_lt(rsq02,rcutoff2))
1061 r02 = _mm256_mul_ps(rsq02,rinv02);
1062 r02 = _mm256_andnot_ps(dummy_mask,r02);
1064 /* EWALD ELECTROSTATICS */
1066 /* Analytical PME correction */
1067 zeta2 = _mm256_mul_ps(beta2,rsq02);
1068 rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
1069 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1070 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1071 felec = _mm256_mul_ps(qq02,felec);
1072 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1073 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1074 velec = _mm256_sub_ps(_mm256_sub_ps(rinv02,sh_ewald),pmecorrV);
1075 velec = _mm256_mul_ps(qq02,velec);
1077 cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
1079 /* Update potential sum for this i atom from the interaction with this j atom. */
1080 velec = _mm256_and_ps(velec,cutoff_mask);
1081 velec = _mm256_andnot_ps(dummy_mask,velec);
1082 velecsum = _mm256_add_ps(velecsum,velec);
1086 fscal = _mm256_and_ps(fscal,cutoff_mask);
1088 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1090 /* Calculate temporary vectorial force */
1091 tx = _mm256_mul_ps(fscal,dx02);
1092 ty = _mm256_mul_ps(fscal,dy02);
1093 tz = _mm256_mul_ps(fscal,dz02);
1095 /* Update vectorial force */
1096 fix0 = _mm256_add_ps(fix0,tx);
1097 fiy0 = _mm256_add_ps(fiy0,ty);
1098 fiz0 = _mm256_add_ps(fiz0,tz);
1100 fjx2 = _mm256_add_ps(fjx2,tx);
1101 fjy2 = _mm256_add_ps(fjy2,ty);
1102 fjz2 = _mm256_add_ps(fjz2,tz);
1106 /**************************
1107 * CALCULATE INTERACTIONS *
1108 **************************/
1110 if (gmx_mm256_any_lt(rsq10,rcutoff2))
1113 r10 = _mm256_mul_ps(rsq10,rinv10);
1114 r10 = _mm256_andnot_ps(dummy_mask,r10);
1116 /* EWALD ELECTROSTATICS */
1118 /* Analytical PME correction */
1119 zeta2 = _mm256_mul_ps(beta2,rsq10);
1120 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
1121 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1122 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1123 felec = _mm256_mul_ps(qq10,felec);
1124 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1125 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1126 velec = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
1127 velec = _mm256_mul_ps(qq10,velec);
1129 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1131 /* Update potential sum for this i atom from the interaction with this j atom. */
1132 velec = _mm256_and_ps(velec,cutoff_mask);
1133 velec = _mm256_andnot_ps(dummy_mask,velec);
1134 velecsum = _mm256_add_ps(velecsum,velec);
1138 fscal = _mm256_and_ps(fscal,cutoff_mask);
1140 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1142 /* Calculate temporary vectorial force */
1143 tx = _mm256_mul_ps(fscal,dx10);
1144 ty = _mm256_mul_ps(fscal,dy10);
1145 tz = _mm256_mul_ps(fscal,dz10);
1147 /* Update vectorial force */
1148 fix1 = _mm256_add_ps(fix1,tx);
1149 fiy1 = _mm256_add_ps(fiy1,ty);
1150 fiz1 = _mm256_add_ps(fiz1,tz);
1152 fjx0 = _mm256_add_ps(fjx0,tx);
1153 fjy0 = _mm256_add_ps(fjy0,ty);
1154 fjz0 = _mm256_add_ps(fjz0,tz);
1158 /**************************
1159 * CALCULATE INTERACTIONS *
1160 **************************/
1162 if (gmx_mm256_any_lt(rsq11,rcutoff2))
1165 r11 = _mm256_mul_ps(rsq11,rinv11);
1166 r11 = _mm256_andnot_ps(dummy_mask,r11);
1168 /* EWALD ELECTROSTATICS */
1170 /* Analytical PME correction */
1171 zeta2 = _mm256_mul_ps(beta2,rsq11);
1172 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
1173 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1174 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1175 felec = _mm256_mul_ps(qq11,felec);
1176 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1177 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1178 velec = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
1179 velec = _mm256_mul_ps(qq11,velec);
1181 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1183 /* Update potential sum for this i atom from the interaction with this j atom. */
1184 velec = _mm256_and_ps(velec,cutoff_mask);
1185 velec = _mm256_andnot_ps(dummy_mask,velec);
1186 velecsum = _mm256_add_ps(velecsum,velec);
1190 fscal = _mm256_and_ps(fscal,cutoff_mask);
1192 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1194 /* Calculate temporary vectorial force */
1195 tx = _mm256_mul_ps(fscal,dx11);
1196 ty = _mm256_mul_ps(fscal,dy11);
1197 tz = _mm256_mul_ps(fscal,dz11);
1199 /* Update vectorial force */
1200 fix1 = _mm256_add_ps(fix1,tx);
1201 fiy1 = _mm256_add_ps(fiy1,ty);
1202 fiz1 = _mm256_add_ps(fiz1,tz);
1204 fjx1 = _mm256_add_ps(fjx1,tx);
1205 fjy1 = _mm256_add_ps(fjy1,ty);
1206 fjz1 = _mm256_add_ps(fjz1,tz);
1210 /**************************
1211 * CALCULATE INTERACTIONS *
1212 **************************/
1214 if (gmx_mm256_any_lt(rsq12,rcutoff2))
1217 r12 = _mm256_mul_ps(rsq12,rinv12);
1218 r12 = _mm256_andnot_ps(dummy_mask,r12);
1220 /* EWALD ELECTROSTATICS */
1222 /* Analytical PME correction */
1223 zeta2 = _mm256_mul_ps(beta2,rsq12);
1224 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
1225 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1226 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1227 felec = _mm256_mul_ps(qq12,felec);
1228 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1229 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1230 velec = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
1231 velec = _mm256_mul_ps(qq12,velec);
1233 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1235 /* Update potential sum for this i atom from the interaction with this j atom. */
1236 velec = _mm256_and_ps(velec,cutoff_mask);
1237 velec = _mm256_andnot_ps(dummy_mask,velec);
1238 velecsum = _mm256_add_ps(velecsum,velec);
1242 fscal = _mm256_and_ps(fscal,cutoff_mask);
1244 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1246 /* Calculate temporary vectorial force */
1247 tx = _mm256_mul_ps(fscal,dx12);
1248 ty = _mm256_mul_ps(fscal,dy12);
1249 tz = _mm256_mul_ps(fscal,dz12);
1251 /* Update vectorial force */
1252 fix1 = _mm256_add_ps(fix1,tx);
1253 fiy1 = _mm256_add_ps(fiy1,ty);
1254 fiz1 = _mm256_add_ps(fiz1,tz);
1256 fjx2 = _mm256_add_ps(fjx2,tx);
1257 fjy2 = _mm256_add_ps(fjy2,ty);
1258 fjz2 = _mm256_add_ps(fjz2,tz);
1262 /**************************
1263 * CALCULATE INTERACTIONS *
1264 **************************/
1266 if (gmx_mm256_any_lt(rsq20,rcutoff2))
1269 r20 = _mm256_mul_ps(rsq20,rinv20);
1270 r20 = _mm256_andnot_ps(dummy_mask,r20);
1272 /* EWALD ELECTROSTATICS */
1274 /* Analytical PME correction */
1275 zeta2 = _mm256_mul_ps(beta2,rsq20);
1276 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
1277 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1278 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1279 felec = _mm256_mul_ps(qq20,felec);
1280 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1281 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1282 velec = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
1283 velec = _mm256_mul_ps(qq20,velec);
1285 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1287 /* Update potential sum for this i atom from the interaction with this j atom. */
1288 velec = _mm256_and_ps(velec,cutoff_mask);
1289 velec = _mm256_andnot_ps(dummy_mask,velec);
1290 velecsum = _mm256_add_ps(velecsum,velec);
1294 fscal = _mm256_and_ps(fscal,cutoff_mask);
1296 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1298 /* Calculate temporary vectorial force */
1299 tx = _mm256_mul_ps(fscal,dx20);
1300 ty = _mm256_mul_ps(fscal,dy20);
1301 tz = _mm256_mul_ps(fscal,dz20);
1303 /* Update vectorial force */
1304 fix2 = _mm256_add_ps(fix2,tx);
1305 fiy2 = _mm256_add_ps(fiy2,ty);
1306 fiz2 = _mm256_add_ps(fiz2,tz);
1308 fjx0 = _mm256_add_ps(fjx0,tx);
1309 fjy0 = _mm256_add_ps(fjy0,ty);
1310 fjz0 = _mm256_add_ps(fjz0,tz);
1314 /**************************
1315 * CALCULATE INTERACTIONS *
1316 **************************/
1318 if (gmx_mm256_any_lt(rsq21,rcutoff2))
1321 r21 = _mm256_mul_ps(rsq21,rinv21);
1322 r21 = _mm256_andnot_ps(dummy_mask,r21);
1324 /* EWALD ELECTROSTATICS */
1326 /* Analytical PME correction */
1327 zeta2 = _mm256_mul_ps(beta2,rsq21);
1328 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
1329 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1330 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1331 felec = _mm256_mul_ps(qq21,felec);
1332 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1333 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1334 velec = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
1335 velec = _mm256_mul_ps(qq21,velec);
1337 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1339 /* Update potential sum for this i atom from the interaction with this j atom. */
1340 velec = _mm256_and_ps(velec,cutoff_mask);
1341 velec = _mm256_andnot_ps(dummy_mask,velec);
1342 velecsum = _mm256_add_ps(velecsum,velec);
1346 fscal = _mm256_and_ps(fscal,cutoff_mask);
1348 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1350 /* Calculate temporary vectorial force */
1351 tx = _mm256_mul_ps(fscal,dx21);
1352 ty = _mm256_mul_ps(fscal,dy21);
1353 tz = _mm256_mul_ps(fscal,dz21);
1355 /* Update vectorial force */
1356 fix2 = _mm256_add_ps(fix2,tx);
1357 fiy2 = _mm256_add_ps(fiy2,ty);
1358 fiz2 = _mm256_add_ps(fiz2,tz);
1360 fjx1 = _mm256_add_ps(fjx1,tx);
1361 fjy1 = _mm256_add_ps(fjy1,ty);
1362 fjz1 = _mm256_add_ps(fjz1,tz);
1366 /**************************
1367 * CALCULATE INTERACTIONS *
1368 **************************/
1370 if (gmx_mm256_any_lt(rsq22,rcutoff2))
1373 r22 = _mm256_mul_ps(rsq22,rinv22);
1374 r22 = _mm256_andnot_ps(dummy_mask,r22);
1376 /* EWALD ELECTROSTATICS */
1378 /* Analytical PME correction */
1379 zeta2 = _mm256_mul_ps(beta2,rsq22);
1380 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
1381 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1382 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1383 felec = _mm256_mul_ps(qq22,felec);
1384 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1385 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1386 velec = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
1387 velec = _mm256_mul_ps(qq22,velec);
1389 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1391 /* Update potential sum for this i atom from the interaction with this j atom. */
1392 velec = _mm256_and_ps(velec,cutoff_mask);
1393 velec = _mm256_andnot_ps(dummy_mask,velec);
1394 velecsum = _mm256_add_ps(velecsum,velec);
1398 fscal = _mm256_and_ps(fscal,cutoff_mask);
1400 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1402 /* Calculate temporary vectorial force */
1403 tx = _mm256_mul_ps(fscal,dx22);
1404 ty = _mm256_mul_ps(fscal,dy22);
1405 tz = _mm256_mul_ps(fscal,dz22);
1407 /* Update vectorial force */
1408 fix2 = _mm256_add_ps(fix2,tx);
1409 fiy2 = _mm256_add_ps(fiy2,ty);
1410 fiz2 = _mm256_add_ps(fiz2,tz);
1412 fjx2 = _mm256_add_ps(fjx2,tx);
1413 fjy2 = _mm256_add_ps(fjy2,ty);
1414 fjz2 = _mm256_add_ps(fjz2,tz);
1418 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1419 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1420 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1421 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1422 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1423 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1424 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1425 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1427 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1428 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1430 /* Inner loop uses 1026 flops */
1433 /* End of innermost loop */
1435 gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1436 f+i_coord_offset,fshift+i_shift_offset);
1439 /* Update potential energies */
1440 gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1441 gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1443 /* Increment number of inner iterations */
1444 inneriter += j_index_end - j_index_start;
1446 /* Outer loop uses 20 flops */
1449 /* Increment number of outer iterations */
1452 /* Update outer/inner flops */
1454 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*1026);
1457 * Gromacs nonbonded kernel: nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_F_avx_256_single
1458 * Electrostatics interaction: Ewald
1459 * VdW interaction: LJEwald
1460 * Geometry: Water3-Water3
1461 * Calculate force/pot: Force
1464 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_F_avx_256_single
1465 (t_nblist * gmx_restrict nlist,
1466 rvec * gmx_restrict xx,
1467 rvec * gmx_restrict ff,
1468 t_forcerec * gmx_restrict fr,
1469 t_mdatoms * gmx_restrict mdatoms,
1470 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1471 t_nrnb * gmx_restrict nrnb)
1473 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
1474 * just 0 for non-waters.
1475 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1476 * jnr indices corresponding to data put in the four positions in the SIMD register.
1478 int i_shift_offset,i_coord_offset,outeriter,inneriter;
1479 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1480 int jnrA,jnrB,jnrC,jnrD;
1481 int jnrE,jnrF,jnrG,jnrH;
1482 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1483 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1484 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1485 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1486 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
1487 real rcutoff_scalar;
1488 real *shiftvec,*fshift,*x,*f;
1489 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1490 real scratch[4*DIM];
1491 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1492 real * vdwioffsetptr0;
1493 real * vdwgridioffsetptr0;
1494 __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1495 real * vdwioffsetptr1;
1496 real * vdwgridioffsetptr1;
1497 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1498 real * vdwioffsetptr2;
1499 real * vdwgridioffsetptr2;
1500 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1501 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1502 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1503 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1504 __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1505 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1506 __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1507 __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1508 __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1509 __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1510 __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1511 __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1512 __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1513 __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1514 __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1515 __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1516 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
1519 __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1522 __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
1523 __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
1534 __m256 ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
1535 __m256 one_half = _mm256_set1_ps(0.5);
1536 __m256 minus_one = _mm256_set1_ps(-1.0);
1538 __m128i ewitab_lo,ewitab_hi;
1539 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1540 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1542 __m256 dummy_mask,cutoff_mask;
1543 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1544 __m256 one = _mm256_set1_ps(1.0);
1545 __m256 two = _mm256_set1_ps(2.0);
1551 jindex = nlist->jindex;
1553 shiftidx = nlist->shift;
1555 shiftvec = fr->shift_vec[0];
1556 fshift = fr->fshift[0];
1557 facel = _mm256_set1_ps(fr->epsfac);
1558 charge = mdatoms->chargeA;
1559 nvdwtype = fr->ntype;
1560 vdwparam = fr->nbfp;
1561 vdwtype = mdatoms->typeA;
1562 vdwgridparam = fr->ljpme_c6grid;
1563 sh_lj_ewald = _mm256_set1_ps(fr->ic->sh_lj_ewald);
1564 ewclj = _mm256_set1_ps(fr->ewaldcoeff_lj);
1565 ewclj2 = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
1567 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
1568 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1569 beta2 = _mm256_mul_ps(beta,beta);
1570 beta3 = _mm256_mul_ps(beta,beta2);
1572 ewtab = fr->ic->tabq_coul_F;
1573 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
1574 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1576 /* Setup water-specific parameters */
1577 inr = nlist->iinr[0];
1578 iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
1579 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1580 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1581 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
1582 vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
1584 jq0 = _mm256_set1_ps(charge[inr+0]);
1585 jq1 = _mm256_set1_ps(charge[inr+1]);
1586 jq2 = _mm256_set1_ps(charge[inr+2]);
1587 vdwjidx0A = 2*vdwtype[inr+0];
1588 qq00 = _mm256_mul_ps(iq0,jq0);
1589 c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1590 c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1591 c6grid_00 = _mm256_set1_ps(vdwgridioffsetptr0[vdwjidx0A]);
1592 qq01 = _mm256_mul_ps(iq0,jq1);
1593 qq02 = _mm256_mul_ps(iq0,jq2);
1594 qq10 = _mm256_mul_ps(iq1,jq0);
1595 qq11 = _mm256_mul_ps(iq1,jq1);
1596 qq12 = _mm256_mul_ps(iq1,jq2);
1597 qq20 = _mm256_mul_ps(iq2,jq0);
1598 qq21 = _mm256_mul_ps(iq2,jq1);
1599 qq22 = _mm256_mul_ps(iq2,jq2);
1601 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1602 rcutoff_scalar = fr->rcoulomb;
1603 rcutoff = _mm256_set1_ps(rcutoff_scalar);
1604 rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
1606 sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
1607 rvdw = _mm256_set1_ps(fr->rvdw);
1609 /* Avoid stupid compiler warnings */
1610 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1611 j_coord_offsetA = 0;
1612 j_coord_offsetB = 0;
1613 j_coord_offsetC = 0;
1614 j_coord_offsetD = 0;
1615 j_coord_offsetE = 0;
1616 j_coord_offsetF = 0;
1617 j_coord_offsetG = 0;
1618 j_coord_offsetH = 0;
1623 for(iidx=0;iidx<4*DIM;iidx++)
1625 scratch[iidx] = 0.0;
1628 /* Start outer loop over neighborlists */
1629 for(iidx=0; iidx<nri; iidx++)
1631 /* Load shift vector for this list */
1632 i_shift_offset = DIM*shiftidx[iidx];
1634 /* Load limits for loop over neighbors */
1635 j_index_start = jindex[iidx];
1636 j_index_end = jindex[iidx+1];
1638 /* Get outer coordinate index */
1640 i_coord_offset = DIM*inr;
1642 /* Load i particle coords and add shift vector */
1643 gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1644 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1646 fix0 = _mm256_setzero_ps();
1647 fiy0 = _mm256_setzero_ps();
1648 fiz0 = _mm256_setzero_ps();
1649 fix1 = _mm256_setzero_ps();
1650 fiy1 = _mm256_setzero_ps();
1651 fiz1 = _mm256_setzero_ps();
1652 fix2 = _mm256_setzero_ps();
1653 fiy2 = _mm256_setzero_ps();
1654 fiz2 = _mm256_setzero_ps();
1656 /* Start inner kernel loop */
1657 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1660 /* Get j neighbor index, and coordinate index */
1662 jnrB = jjnr[jidx+1];
1663 jnrC = jjnr[jidx+2];
1664 jnrD = jjnr[jidx+3];
1665 jnrE = jjnr[jidx+4];
1666 jnrF = jjnr[jidx+5];
1667 jnrG = jjnr[jidx+6];
1668 jnrH = jjnr[jidx+7];
1669 j_coord_offsetA = DIM*jnrA;
1670 j_coord_offsetB = DIM*jnrB;
1671 j_coord_offsetC = DIM*jnrC;
1672 j_coord_offsetD = DIM*jnrD;
1673 j_coord_offsetE = DIM*jnrE;
1674 j_coord_offsetF = DIM*jnrF;
1675 j_coord_offsetG = DIM*jnrG;
1676 j_coord_offsetH = DIM*jnrH;
1678 /* load j atom coordinates */
1679 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1680 x+j_coord_offsetC,x+j_coord_offsetD,
1681 x+j_coord_offsetE,x+j_coord_offsetF,
1682 x+j_coord_offsetG,x+j_coord_offsetH,
1683 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1685 /* Calculate displacement vector */
1686 dx00 = _mm256_sub_ps(ix0,jx0);
1687 dy00 = _mm256_sub_ps(iy0,jy0);
1688 dz00 = _mm256_sub_ps(iz0,jz0);
1689 dx01 = _mm256_sub_ps(ix0,jx1);
1690 dy01 = _mm256_sub_ps(iy0,jy1);
1691 dz01 = _mm256_sub_ps(iz0,jz1);
1692 dx02 = _mm256_sub_ps(ix0,jx2);
1693 dy02 = _mm256_sub_ps(iy0,jy2);
1694 dz02 = _mm256_sub_ps(iz0,jz2);
1695 dx10 = _mm256_sub_ps(ix1,jx0);
1696 dy10 = _mm256_sub_ps(iy1,jy0);
1697 dz10 = _mm256_sub_ps(iz1,jz0);
1698 dx11 = _mm256_sub_ps(ix1,jx1);
1699 dy11 = _mm256_sub_ps(iy1,jy1);
1700 dz11 = _mm256_sub_ps(iz1,jz1);
1701 dx12 = _mm256_sub_ps(ix1,jx2);
1702 dy12 = _mm256_sub_ps(iy1,jy2);
1703 dz12 = _mm256_sub_ps(iz1,jz2);
1704 dx20 = _mm256_sub_ps(ix2,jx0);
1705 dy20 = _mm256_sub_ps(iy2,jy0);
1706 dz20 = _mm256_sub_ps(iz2,jz0);
1707 dx21 = _mm256_sub_ps(ix2,jx1);
1708 dy21 = _mm256_sub_ps(iy2,jy1);
1709 dz21 = _mm256_sub_ps(iz2,jz1);
1710 dx22 = _mm256_sub_ps(ix2,jx2);
1711 dy22 = _mm256_sub_ps(iy2,jy2);
1712 dz22 = _mm256_sub_ps(iz2,jz2);
1714 /* Calculate squared distance and things based on it */
1715 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1716 rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1717 rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1718 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1719 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1720 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1721 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1722 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1723 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1725 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
1726 rinv01 = gmx_mm256_invsqrt_ps(rsq01);
1727 rinv02 = gmx_mm256_invsqrt_ps(rsq02);
1728 rinv10 = gmx_mm256_invsqrt_ps(rsq10);
1729 rinv11 = gmx_mm256_invsqrt_ps(rsq11);
1730 rinv12 = gmx_mm256_invsqrt_ps(rsq12);
1731 rinv20 = gmx_mm256_invsqrt_ps(rsq20);
1732 rinv21 = gmx_mm256_invsqrt_ps(rsq21);
1733 rinv22 = gmx_mm256_invsqrt_ps(rsq22);
1735 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
1736 rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
1737 rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
1738 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
1739 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
1740 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
1741 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
1742 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
1743 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
1745 fjx0 = _mm256_setzero_ps();
1746 fjy0 = _mm256_setzero_ps();
1747 fjz0 = _mm256_setzero_ps();
1748 fjx1 = _mm256_setzero_ps();
1749 fjy1 = _mm256_setzero_ps();
1750 fjz1 = _mm256_setzero_ps();
1751 fjx2 = _mm256_setzero_ps();
1752 fjy2 = _mm256_setzero_ps();
1753 fjz2 = _mm256_setzero_ps();
1755 /**************************
1756 * CALCULATE INTERACTIONS *
1757 **************************/
1759 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1762 r00 = _mm256_mul_ps(rsq00,rinv00);
1764 /* EWALD ELECTROSTATICS */
1766 /* Analytical PME correction */
1767 zeta2 = _mm256_mul_ps(beta2,rsq00);
1768 rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
1769 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1770 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1771 felec = _mm256_mul_ps(qq00,felec);
1773 /* Analytical LJ-PME */
1774 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1775 ewcljrsq = _mm256_mul_ps(ewclj2,rsq00);
1776 ewclj6 = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1777 exponent = gmx_simd_exp_r(ewcljrsq);
1778 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1779 poly = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1780 /* f6A = 6 * C6grid * (1 - poly) */
1781 f6A = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
1782 /* f6B = C6grid * exponent * beta^6 */
1783 f6B = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
1784 /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1785 fvdw = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1787 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1789 fscal = _mm256_add_ps(felec,fvdw);
1791 fscal = _mm256_and_ps(fscal,cutoff_mask);
1793 /* Calculate temporary vectorial force */
1794 tx = _mm256_mul_ps(fscal,dx00);
1795 ty = _mm256_mul_ps(fscal,dy00);
1796 tz = _mm256_mul_ps(fscal,dz00);
1798 /* Update vectorial force */
1799 fix0 = _mm256_add_ps(fix0,tx);
1800 fiy0 = _mm256_add_ps(fiy0,ty);
1801 fiz0 = _mm256_add_ps(fiz0,tz);
1803 fjx0 = _mm256_add_ps(fjx0,tx);
1804 fjy0 = _mm256_add_ps(fjy0,ty);
1805 fjz0 = _mm256_add_ps(fjz0,tz);
1809 /**************************
1810 * CALCULATE INTERACTIONS *
1811 **************************/
1813 if (gmx_mm256_any_lt(rsq01,rcutoff2))
1816 r01 = _mm256_mul_ps(rsq01,rinv01);
1818 /* EWALD ELECTROSTATICS */
1820 /* Analytical PME correction */
1821 zeta2 = _mm256_mul_ps(beta2,rsq01);
1822 rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
1823 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1824 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1825 felec = _mm256_mul_ps(qq01,felec);
1827 cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
1831 fscal = _mm256_and_ps(fscal,cutoff_mask);
1833 /* Calculate temporary vectorial force */
1834 tx = _mm256_mul_ps(fscal,dx01);
1835 ty = _mm256_mul_ps(fscal,dy01);
1836 tz = _mm256_mul_ps(fscal,dz01);
1838 /* Update vectorial force */
1839 fix0 = _mm256_add_ps(fix0,tx);
1840 fiy0 = _mm256_add_ps(fiy0,ty);
1841 fiz0 = _mm256_add_ps(fiz0,tz);
1843 fjx1 = _mm256_add_ps(fjx1,tx);
1844 fjy1 = _mm256_add_ps(fjy1,ty);
1845 fjz1 = _mm256_add_ps(fjz1,tz);
1849 /**************************
1850 * CALCULATE INTERACTIONS *
1851 **************************/
1853 if (gmx_mm256_any_lt(rsq02,rcutoff2))
1856 r02 = _mm256_mul_ps(rsq02,rinv02);
1858 /* EWALD ELECTROSTATICS */
1860 /* Analytical PME correction */
1861 zeta2 = _mm256_mul_ps(beta2,rsq02);
1862 rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
1863 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1864 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1865 felec = _mm256_mul_ps(qq02,felec);
1867 cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
1871 fscal = _mm256_and_ps(fscal,cutoff_mask);
1873 /* Calculate temporary vectorial force */
1874 tx = _mm256_mul_ps(fscal,dx02);
1875 ty = _mm256_mul_ps(fscal,dy02);
1876 tz = _mm256_mul_ps(fscal,dz02);
1878 /* Update vectorial force */
1879 fix0 = _mm256_add_ps(fix0,tx);
1880 fiy0 = _mm256_add_ps(fiy0,ty);
1881 fiz0 = _mm256_add_ps(fiz0,tz);
1883 fjx2 = _mm256_add_ps(fjx2,tx);
1884 fjy2 = _mm256_add_ps(fjy2,ty);
1885 fjz2 = _mm256_add_ps(fjz2,tz);
1889 /**************************
1890 * CALCULATE INTERACTIONS *
1891 **************************/
1893 if (gmx_mm256_any_lt(rsq10,rcutoff2))
1896 r10 = _mm256_mul_ps(rsq10,rinv10);
1898 /* EWALD ELECTROSTATICS */
1900 /* Analytical PME correction */
1901 zeta2 = _mm256_mul_ps(beta2,rsq10);
1902 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
1903 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1904 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1905 felec = _mm256_mul_ps(qq10,felec);
1907 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1911 fscal = _mm256_and_ps(fscal,cutoff_mask);
1913 /* Calculate temporary vectorial force */
1914 tx = _mm256_mul_ps(fscal,dx10);
1915 ty = _mm256_mul_ps(fscal,dy10);
1916 tz = _mm256_mul_ps(fscal,dz10);
1918 /* Update vectorial force */
1919 fix1 = _mm256_add_ps(fix1,tx);
1920 fiy1 = _mm256_add_ps(fiy1,ty);
1921 fiz1 = _mm256_add_ps(fiz1,tz);
1923 fjx0 = _mm256_add_ps(fjx0,tx);
1924 fjy0 = _mm256_add_ps(fjy0,ty);
1925 fjz0 = _mm256_add_ps(fjz0,tz);
1929 /**************************
1930 * CALCULATE INTERACTIONS *
1931 **************************/
1933 if (gmx_mm256_any_lt(rsq11,rcutoff2))
1936 r11 = _mm256_mul_ps(rsq11,rinv11);
1938 /* EWALD ELECTROSTATICS */
1940 /* Analytical PME correction */
1941 zeta2 = _mm256_mul_ps(beta2,rsq11);
1942 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
1943 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1944 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1945 felec = _mm256_mul_ps(qq11,felec);
1947 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1951 fscal = _mm256_and_ps(fscal,cutoff_mask);
1953 /* Calculate temporary vectorial force */
1954 tx = _mm256_mul_ps(fscal,dx11);
1955 ty = _mm256_mul_ps(fscal,dy11);
1956 tz = _mm256_mul_ps(fscal,dz11);
1958 /* Update vectorial force */
1959 fix1 = _mm256_add_ps(fix1,tx);
1960 fiy1 = _mm256_add_ps(fiy1,ty);
1961 fiz1 = _mm256_add_ps(fiz1,tz);
1963 fjx1 = _mm256_add_ps(fjx1,tx);
1964 fjy1 = _mm256_add_ps(fjy1,ty);
1965 fjz1 = _mm256_add_ps(fjz1,tz);
1969 /**************************
1970 * CALCULATE INTERACTIONS *
1971 **************************/
1973 if (gmx_mm256_any_lt(rsq12,rcutoff2))
1976 r12 = _mm256_mul_ps(rsq12,rinv12);
1978 /* EWALD ELECTROSTATICS */
1980 /* Analytical PME correction */
1981 zeta2 = _mm256_mul_ps(beta2,rsq12);
1982 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
1983 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1984 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1985 felec = _mm256_mul_ps(qq12,felec);
1987 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1991 fscal = _mm256_and_ps(fscal,cutoff_mask);
1993 /* Calculate temporary vectorial force */
1994 tx = _mm256_mul_ps(fscal,dx12);
1995 ty = _mm256_mul_ps(fscal,dy12);
1996 tz = _mm256_mul_ps(fscal,dz12);
1998 /* Update vectorial force */
1999 fix1 = _mm256_add_ps(fix1,tx);
2000 fiy1 = _mm256_add_ps(fiy1,ty);
2001 fiz1 = _mm256_add_ps(fiz1,tz);
2003 fjx2 = _mm256_add_ps(fjx2,tx);
2004 fjy2 = _mm256_add_ps(fjy2,ty);
2005 fjz2 = _mm256_add_ps(fjz2,tz);
2009 /**************************
2010 * CALCULATE INTERACTIONS *
2011 **************************/
2013 if (gmx_mm256_any_lt(rsq20,rcutoff2))
2016 r20 = _mm256_mul_ps(rsq20,rinv20);
2018 /* EWALD ELECTROSTATICS */
2020 /* Analytical PME correction */
2021 zeta2 = _mm256_mul_ps(beta2,rsq20);
2022 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
2023 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2024 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2025 felec = _mm256_mul_ps(qq20,felec);
2027 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
2031 fscal = _mm256_and_ps(fscal,cutoff_mask);
2033 /* Calculate temporary vectorial force */
2034 tx = _mm256_mul_ps(fscal,dx20);
2035 ty = _mm256_mul_ps(fscal,dy20);
2036 tz = _mm256_mul_ps(fscal,dz20);
2038 /* Update vectorial force */
2039 fix2 = _mm256_add_ps(fix2,tx);
2040 fiy2 = _mm256_add_ps(fiy2,ty);
2041 fiz2 = _mm256_add_ps(fiz2,tz);
2043 fjx0 = _mm256_add_ps(fjx0,tx);
2044 fjy0 = _mm256_add_ps(fjy0,ty);
2045 fjz0 = _mm256_add_ps(fjz0,tz);
2049 /**************************
2050 * CALCULATE INTERACTIONS *
2051 **************************/
2053 if (gmx_mm256_any_lt(rsq21,rcutoff2))
2056 r21 = _mm256_mul_ps(rsq21,rinv21);
2058 /* EWALD ELECTROSTATICS */
2060 /* Analytical PME correction */
2061 zeta2 = _mm256_mul_ps(beta2,rsq21);
2062 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
2063 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2064 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2065 felec = _mm256_mul_ps(qq21,felec);
2067 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2071 fscal = _mm256_and_ps(fscal,cutoff_mask);
2073 /* Calculate temporary vectorial force */
2074 tx = _mm256_mul_ps(fscal,dx21);
2075 ty = _mm256_mul_ps(fscal,dy21);
2076 tz = _mm256_mul_ps(fscal,dz21);
2078 /* Update vectorial force */
2079 fix2 = _mm256_add_ps(fix2,tx);
2080 fiy2 = _mm256_add_ps(fiy2,ty);
2081 fiz2 = _mm256_add_ps(fiz2,tz);
2083 fjx1 = _mm256_add_ps(fjx1,tx);
2084 fjy1 = _mm256_add_ps(fjy1,ty);
2085 fjz1 = _mm256_add_ps(fjz1,tz);
2089 /**************************
2090 * CALCULATE INTERACTIONS *
2091 **************************/
2093 if (gmx_mm256_any_lt(rsq22,rcutoff2))
2096 r22 = _mm256_mul_ps(rsq22,rinv22);
2098 /* EWALD ELECTROSTATICS */
2100 /* Analytical PME correction */
2101 zeta2 = _mm256_mul_ps(beta2,rsq22);
2102 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
2103 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2104 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2105 felec = _mm256_mul_ps(qq22,felec);
2107 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2111 fscal = _mm256_and_ps(fscal,cutoff_mask);
2113 /* Calculate temporary vectorial force */
2114 tx = _mm256_mul_ps(fscal,dx22);
2115 ty = _mm256_mul_ps(fscal,dy22);
2116 tz = _mm256_mul_ps(fscal,dz22);
2118 /* Update vectorial force */
2119 fix2 = _mm256_add_ps(fix2,tx);
2120 fiy2 = _mm256_add_ps(fiy2,ty);
2121 fiz2 = _mm256_add_ps(fiz2,tz);
2123 fjx2 = _mm256_add_ps(fjx2,tx);
2124 fjy2 = _mm256_add_ps(fjy2,ty);
2125 fjz2 = _mm256_add_ps(fjz2,tz);
2129 fjptrA = f+j_coord_offsetA;
2130 fjptrB = f+j_coord_offsetB;
2131 fjptrC = f+j_coord_offsetC;
2132 fjptrD = f+j_coord_offsetD;
2133 fjptrE = f+j_coord_offsetE;
2134 fjptrF = f+j_coord_offsetF;
2135 fjptrG = f+j_coord_offsetG;
2136 fjptrH = f+j_coord_offsetH;
2138 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2139 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2141 /* Inner loop uses 554 flops */
2144 if(jidx<j_index_end)
2147 /* Get j neighbor index, and coordinate index */
2148 jnrlistA = jjnr[jidx];
2149 jnrlistB = jjnr[jidx+1];
2150 jnrlistC = jjnr[jidx+2];
2151 jnrlistD = jjnr[jidx+3];
2152 jnrlistE = jjnr[jidx+4];
2153 jnrlistF = jjnr[jidx+5];
2154 jnrlistG = jjnr[jidx+6];
2155 jnrlistH = jjnr[jidx+7];
2156 /* Sign of each element will be negative for non-real atoms.
2157 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2158 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
2160 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
2161 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
2163 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
2164 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
2165 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
2166 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
2167 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
2168 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
2169 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
2170 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
2171 j_coord_offsetA = DIM*jnrA;
2172 j_coord_offsetB = DIM*jnrB;
2173 j_coord_offsetC = DIM*jnrC;
2174 j_coord_offsetD = DIM*jnrD;
2175 j_coord_offsetE = DIM*jnrE;
2176 j_coord_offsetF = DIM*jnrF;
2177 j_coord_offsetG = DIM*jnrG;
2178 j_coord_offsetH = DIM*jnrH;
2180 /* load j atom coordinates */
2181 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
2182 x+j_coord_offsetC,x+j_coord_offsetD,
2183 x+j_coord_offsetE,x+j_coord_offsetF,
2184 x+j_coord_offsetG,x+j_coord_offsetH,
2185 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
2187 /* Calculate displacement vector */
2188 dx00 = _mm256_sub_ps(ix0,jx0);
2189 dy00 = _mm256_sub_ps(iy0,jy0);
2190 dz00 = _mm256_sub_ps(iz0,jz0);
2191 dx01 = _mm256_sub_ps(ix0,jx1);
2192 dy01 = _mm256_sub_ps(iy0,jy1);
2193 dz01 = _mm256_sub_ps(iz0,jz1);
2194 dx02 = _mm256_sub_ps(ix0,jx2);
2195 dy02 = _mm256_sub_ps(iy0,jy2);
2196 dz02 = _mm256_sub_ps(iz0,jz2);
2197 dx10 = _mm256_sub_ps(ix1,jx0);
2198 dy10 = _mm256_sub_ps(iy1,jy0);
2199 dz10 = _mm256_sub_ps(iz1,jz0);
2200 dx11 = _mm256_sub_ps(ix1,jx1);
2201 dy11 = _mm256_sub_ps(iy1,jy1);
2202 dz11 = _mm256_sub_ps(iz1,jz1);
2203 dx12 = _mm256_sub_ps(ix1,jx2);
2204 dy12 = _mm256_sub_ps(iy1,jy2);
2205 dz12 = _mm256_sub_ps(iz1,jz2);
2206 dx20 = _mm256_sub_ps(ix2,jx0);
2207 dy20 = _mm256_sub_ps(iy2,jy0);
2208 dz20 = _mm256_sub_ps(iz2,jz0);
2209 dx21 = _mm256_sub_ps(ix2,jx1);
2210 dy21 = _mm256_sub_ps(iy2,jy1);
2211 dz21 = _mm256_sub_ps(iz2,jz1);
2212 dx22 = _mm256_sub_ps(ix2,jx2);
2213 dy22 = _mm256_sub_ps(iy2,jy2);
2214 dz22 = _mm256_sub_ps(iz2,jz2);
2216 /* Calculate squared distance and things based on it */
2217 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
2218 rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
2219 rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
2220 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
2221 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2222 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2223 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
2224 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2225 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2227 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
2228 rinv01 = gmx_mm256_invsqrt_ps(rsq01);
2229 rinv02 = gmx_mm256_invsqrt_ps(rsq02);
2230 rinv10 = gmx_mm256_invsqrt_ps(rsq10);
2231 rinv11 = gmx_mm256_invsqrt_ps(rsq11);
2232 rinv12 = gmx_mm256_invsqrt_ps(rsq12);
2233 rinv20 = gmx_mm256_invsqrt_ps(rsq20);
2234 rinv21 = gmx_mm256_invsqrt_ps(rsq21);
2235 rinv22 = gmx_mm256_invsqrt_ps(rsq22);
2237 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
2238 rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
2239 rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
2240 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
2241 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
2242 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
2243 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
2244 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
2245 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
2247 fjx0 = _mm256_setzero_ps();
2248 fjy0 = _mm256_setzero_ps();
2249 fjz0 = _mm256_setzero_ps();
2250 fjx1 = _mm256_setzero_ps();
2251 fjy1 = _mm256_setzero_ps();
2252 fjz1 = _mm256_setzero_ps();
2253 fjx2 = _mm256_setzero_ps();
2254 fjy2 = _mm256_setzero_ps();
2255 fjz2 = _mm256_setzero_ps();
2257 /**************************
2258 * CALCULATE INTERACTIONS *
2259 **************************/
2261 if (gmx_mm256_any_lt(rsq00,rcutoff2))
2264 r00 = _mm256_mul_ps(rsq00,rinv00);
2265 r00 = _mm256_andnot_ps(dummy_mask,r00);
2267 /* EWALD ELECTROSTATICS */
2269 /* Analytical PME correction */
2270 zeta2 = _mm256_mul_ps(beta2,rsq00);
2271 rinv3 = _mm256_mul_ps(rinvsq00,rinv00);
2272 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2273 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2274 felec = _mm256_mul_ps(qq00,felec);
2276 /* Analytical LJ-PME */
2277 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
2278 ewcljrsq = _mm256_mul_ps(ewclj2,rsq00);
2279 ewclj6 = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
2280 exponent = gmx_simd_exp_r(ewcljrsq);
2281 /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
2282 poly = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
2283 /* f6A = 6 * C6grid * (1 - poly) */
2284 f6A = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
2285 /* f6B = C6grid * exponent * beta^6 */
2286 f6B = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
2287 /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
2288 fvdw = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
2290 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
2292 fscal = _mm256_add_ps(felec,fvdw);
2294 fscal = _mm256_and_ps(fscal,cutoff_mask);
2296 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2298 /* Calculate temporary vectorial force */
2299 tx = _mm256_mul_ps(fscal,dx00);
2300 ty = _mm256_mul_ps(fscal,dy00);
2301 tz = _mm256_mul_ps(fscal,dz00);
2303 /* Update vectorial force */
2304 fix0 = _mm256_add_ps(fix0,tx);
2305 fiy0 = _mm256_add_ps(fiy0,ty);
2306 fiz0 = _mm256_add_ps(fiz0,tz);
2308 fjx0 = _mm256_add_ps(fjx0,tx);
2309 fjy0 = _mm256_add_ps(fjy0,ty);
2310 fjz0 = _mm256_add_ps(fjz0,tz);
2314 /**************************
2315 * CALCULATE INTERACTIONS *
2316 **************************/
2318 if (gmx_mm256_any_lt(rsq01,rcutoff2))
2321 r01 = _mm256_mul_ps(rsq01,rinv01);
2322 r01 = _mm256_andnot_ps(dummy_mask,r01);
2324 /* EWALD ELECTROSTATICS */
2326 /* Analytical PME correction */
2327 zeta2 = _mm256_mul_ps(beta2,rsq01);
2328 rinv3 = _mm256_mul_ps(rinvsq01,rinv01);
2329 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2330 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2331 felec = _mm256_mul_ps(qq01,felec);
2333 cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
2337 fscal = _mm256_and_ps(fscal,cutoff_mask);
2339 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2341 /* Calculate temporary vectorial force */
2342 tx = _mm256_mul_ps(fscal,dx01);
2343 ty = _mm256_mul_ps(fscal,dy01);
2344 tz = _mm256_mul_ps(fscal,dz01);
2346 /* Update vectorial force */
2347 fix0 = _mm256_add_ps(fix0,tx);
2348 fiy0 = _mm256_add_ps(fiy0,ty);
2349 fiz0 = _mm256_add_ps(fiz0,tz);
2351 fjx1 = _mm256_add_ps(fjx1,tx);
2352 fjy1 = _mm256_add_ps(fjy1,ty);
2353 fjz1 = _mm256_add_ps(fjz1,tz);
2357 /**************************
2358 * CALCULATE INTERACTIONS *
2359 **************************/
2361 if (gmx_mm256_any_lt(rsq02,rcutoff2))
2364 r02 = _mm256_mul_ps(rsq02,rinv02);
2365 r02 = _mm256_andnot_ps(dummy_mask,r02);
2367 /* EWALD ELECTROSTATICS */
2369 /* Analytical PME correction */
2370 zeta2 = _mm256_mul_ps(beta2,rsq02);
2371 rinv3 = _mm256_mul_ps(rinvsq02,rinv02);
2372 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2373 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2374 felec = _mm256_mul_ps(qq02,felec);
2376 cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
2380 fscal = _mm256_and_ps(fscal,cutoff_mask);
2382 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2384 /* Calculate temporary vectorial force */
2385 tx = _mm256_mul_ps(fscal,dx02);
2386 ty = _mm256_mul_ps(fscal,dy02);
2387 tz = _mm256_mul_ps(fscal,dz02);
2389 /* Update vectorial force */
2390 fix0 = _mm256_add_ps(fix0,tx);
2391 fiy0 = _mm256_add_ps(fiy0,ty);
2392 fiz0 = _mm256_add_ps(fiz0,tz);
2394 fjx2 = _mm256_add_ps(fjx2,tx);
2395 fjy2 = _mm256_add_ps(fjy2,ty);
2396 fjz2 = _mm256_add_ps(fjz2,tz);
2400 /**************************
2401 * CALCULATE INTERACTIONS *
2402 **************************/
2404 if (gmx_mm256_any_lt(rsq10,rcutoff2))
2407 r10 = _mm256_mul_ps(rsq10,rinv10);
2408 r10 = _mm256_andnot_ps(dummy_mask,r10);
2410 /* EWALD ELECTROSTATICS */
2412 /* Analytical PME correction */
2413 zeta2 = _mm256_mul_ps(beta2,rsq10);
2414 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
2415 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2416 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2417 felec = _mm256_mul_ps(qq10,felec);
2419 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
2423 fscal = _mm256_and_ps(fscal,cutoff_mask);
2425 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2427 /* Calculate temporary vectorial force */
2428 tx = _mm256_mul_ps(fscal,dx10);
2429 ty = _mm256_mul_ps(fscal,dy10);
2430 tz = _mm256_mul_ps(fscal,dz10);
2432 /* Update vectorial force */
2433 fix1 = _mm256_add_ps(fix1,tx);
2434 fiy1 = _mm256_add_ps(fiy1,ty);
2435 fiz1 = _mm256_add_ps(fiz1,tz);
2437 fjx0 = _mm256_add_ps(fjx0,tx);
2438 fjy0 = _mm256_add_ps(fjy0,ty);
2439 fjz0 = _mm256_add_ps(fjz0,tz);
2443 /**************************
2444 * CALCULATE INTERACTIONS *
2445 **************************/
2447 if (gmx_mm256_any_lt(rsq11,rcutoff2))
2450 r11 = _mm256_mul_ps(rsq11,rinv11);
2451 r11 = _mm256_andnot_ps(dummy_mask,r11);
2453 /* EWALD ELECTROSTATICS */
2455 /* Analytical PME correction */
2456 zeta2 = _mm256_mul_ps(beta2,rsq11);
2457 rinv3 = _mm256_mul_ps(rinvsq11,rinv11);
2458 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2459 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2460 felec = _mm256_mul_ps(qq11,felec);
2462 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2466 fscal = _mm256_and_ps(fscal,cutoff_mask);
2468 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2470 /* Calculate temporary vectorial force */
2471 tx = _mm256_mul_ps(fscal,dx11);
2472 ty = _mm256_mul_ps(fscal,dy11);
2473 tz = _mm256_mul_ps(fscal,dz11);
2475 /* Update vectorial force */
2476 fix1 = _mm256_add_ps(fix1,tx);
2477 fiy1 = _mm256_add_ps(fiy1,ty);
2478 fiz1 = _mm256_add_ps(fiz1,tz);
2480 fjx1 = _mm256_add_ps(fjx1,tx);
2481 fjy1 = _mm256_add_ps(fjy1,ty);
2482 fjz1 = _mm256_add_ps(fjz1,tz);
2486 /**************************
2487 * CALCULATE INTERACTIONS *
2488 **************************/
2490 if (gmx_mm256_any_lt(rsq12,rcutoff2))
2493 r12 = _mm256_mul_ps(rsq12,rinv12);
2494 r12 = _mm256_andnot_ps(dummy_mask,r12);
2496 /* EWALD ELECTROSTATICS */
2498 /* Analytical PME correction */
2499 zeta2 = _mm256_mul_ps(beta2,rsq12);
2500 rinv3 = _mm256_mul_ps(rinvsq12,rinv12);
2501 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2502 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2503 felec = _mm256_mul_ps(qq12,felec);
2505 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2509 fscal = _mm256_and_ps(fscal,cutoff_mask);
2511 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2513 /* Calculate temporary vectorial force */
2514 tx = _mm256_mul_ps(fscal,dx12);
2515 ty = _mm256_mul_ps(fscal,dy12);
2516 tz = _mm256_mul_ps(fscal,dz12);
2518 /* Update vectorial force */
2519 fix1 = _mm256_add_ps(fix1,tx);
2520 fiy1 = _mm256_add_ps(fiy1,ty);
2521 fiz1 = _mm256_add_ps(fiz1,tz);
2523 fjx2 = _mm256_add_ps(fjx2,tx);
2524 fjy2 = _mm256_add_ps(fjy2,ty);
2525 fjz2 = _mm256_add_ps(fjz2,tz);
2529 /**************************
2530 * CALCULATE INTERACTIONS *
2531 **************************/
2533 if (gmx_mm256_any_lt(rsq20,rcutoff2))
2536 r20 = _mm256_mul_ps(rsq20,rinv20);
2537 r20 = _mm256_andnot_ps(dummy_mask,r20);
2539 /* EWALD ELECTROSTATICS */
2541 /* Analytical PME correction */
2542 zeta2 = _mm256_mul_ps(beta2,rsq20);
2543 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
2544 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2545 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2546 felec = _mm256_mul_ps(qq20,felec);
2548 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
2552 fscal = _mm256_and_ps(fscal,cutoff_mask);
2554 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2556 /* Calculate temporary vectorial force */
2557 tx = _mm256_mul_ps(fscal,dx20);
2558 ty = _mm256_mul_ps(fscal,dy20);
2559 tz = _mm256_mul_ps(fscal,dz20);
2561 /* Update vectorial force */
2562 fix2 = _mm256_add_ps(fix2,tx);
2563 fiy2 = _mm256_add_ps(fiy2,ty);
2564 fiz2 = _mm256_add_ps(fiz2,tz);
2566 fjx0 = _mm256_add_ps(fjx0,tx);
2567 fjy0 = _mm256_add_ps(fjy0,ty);
2568 fjz0 = _mm256_add_ps(fjz0,tz);
2572 /**************************
2573 * CALCULATE INTERACTIONS *
2574 **************************/
2576 if (gmx_mm256_any_lt(rsq21,rcutoff2))
2579 r21 = _mm256_mul_ps(rsq21,rinv21);
2580 r21 = _mm256_andnot_ps(dummy_mask,r21);
2582 /* EWALD ELECTROSTATICS */
2584 /* Analytical PME correction */
2585 zeta2 = _mm256_mul_ps(beta2,rsq21);
2586 rinv3 = _mm256_mul_ps(rinvsq21,rinv21);
2587 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2588 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2589 felec = _mm256_mul_ps(qq21,felec);
2591 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2595 fscal = _mm256_and_ps(fscal,cutoff_mask);
2597 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2599 /* Calculate temporary vectorial force */
2600 tx = _mm256_mul_ps(fscal,dx21);
2601 ty = _mm256_mul_ps(fscal,dy21);
2602 tz = _mm256_mul_ps(fscal,dz21);
2604 /* Update vectorial force */
2605 fix2 = _mm256_add_ps(fix2,tx);
2606 fiy2 = _mm256_add_ps(fiy2,ty);
2607 fiz2 = _mm256_add_ps(fiz2,tz);
2609 fjx1 = _mm256_add_ps(fjx1,tx);
2610 fjy1 = _mm256_add_ps(fjy1,ty);
2611 fjz1 = _mm256_add_ps(fjz1,tz);
2615 /**************************
2616 * CALCULATE INTERACTIONS *
2617 **************************/
2619 if (gmx_mm256_any_lt(rsq22,rcutoff2))
2622 r22 = _mm256_mul_ps(rsq22,rinv22);
2623 r22 = _mm256_andnot_ps(dummy_mask,r22);
2625 /* EWALD ELECTROSTATICS */
2627 /* Analytical PME correction */
2628 zeta2 = _mm256_mul_ps(beta2,rsq22);
2629 rinv3 = _mm256_mul_ps(rinvsq22,rinv22);
2630 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
2631 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2632 felec = _mm256_mul_ps(qq22,felec);
2634 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2638 fscal = _mm256_and_ps(fscal,cutoff_mask);
2640 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2642 /* Calculate temporary vectorial force */
2643 tx = _mm256_mul_ps(fscal,dx22);
2644 ty = _mm256_mul_ps(fscal,dy22);
2645 tz = _mm256_mul_ps(fscal,dz22);
2647 /* Update vectorial force */
2648 fix2 = _mm256_add_ps(fix2,tx);
2649 fiy2 = _mm256_add_ps(fiy2,ty);
2650 fiz2 = _mm256_add_ps(fiz2,tz);
2652 fjx2 = _mm256_add_ps(fjx2,tx);
2653 fjy2 = _mm256_add_ps(fjy2,ty);
2654 fjz2 = _mm256_add_ps(fjz2,tz);
2658 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2659 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2660 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2661 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2662 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2663 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2664 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2665 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2667 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2668 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2670 /* Inner loop uses 563 flops */
2673 /* End of innermost loop */
2675 gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
2676 f+i_coord_offset,fshift+i_shift_offset);
2678 /* Increment number of inner iterations */
2679 inneriter += j_index_end - j_index_start;
2681 /* Outer loop uses 18 flops */
2684 /* Increment number of outer iterations */
2687 /* Update outer/inner flops */
2689 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*563);