2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_double kernel generator.
42 #include "../nb_kernel.h"
43 #include "types/simple.h"
44 #include "gromacs/math/vec.h"
47 #include "gromacs/simd/math_x86_avx_256_double.h"
48 #include "kernelutil_x86_avx_256_double.h"
51 * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_VF_avx_256_double
52 * Electrostatics interaction: ReactionField
53 * VdW interaction: CubicSplineTable
54 * Geometry: Water3-Water3
55 * Calculate force/pot: PotentialAndForce
58 nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_VF_avx_256_double
59 (t_nblist * gmx_restrict nlist,
60 rvec * gmx_restrict xx,
61 rvec * gmx_restrict ff,
62 t_forcerec * gmx_restrict fr,
63 t_mdatoms * gmx_restrict mdatoms,
64 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65 t_nrnb * gmx_restrict nrnb)
67 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
68 * just 0 for non-waters.
69 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
70 * jnr indices corresponding to data put in the four positions in the SIMD register.
72 int i_shift_offset,i_coord_offset,outeriter,inneriter;
73 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74 int jnrA,jnrB,jnrC,jnrD;
75 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
76 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
77 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
78 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
80 real *shiftvec,*fshift,*x,*f;
81 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
83 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
84 real * vdwioffsetptr0;
85 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
86 real * vdwioffsetptr1;
87 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
88 real * vdwioffsetptr2;
89 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
90 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
91 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
92 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
93 __m256d jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
94 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
95 __m256d jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
96 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
97 __m256d dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
98 __m256d dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
99 __m256d dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
100 __m256d dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
101 __m256d dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
102 __m256d dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
103 __m256d dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
104 __m256d dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
105 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
108 __m256d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
111 __m256d one_sixth = _mm256_set1_pd(1.0/6.0);
112 __m256d one_twelfth = _mm256_set1_pd(1.0/12.0);
114 __m128i ifour = _mm_set1_epi32(4);
115 __m256d rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
117 __m256d dummy_mask,cutoff_mask;
118 __m128 tmpmask0,tmpmask1;
119 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
120 __m256d one = _mm256_set1_pd(1.0);
121 __m256d two = _mm256_set1_pd(2.0);
127 jindex = nlist->jindex;
129 shiftidx = nlist->shift;
131 shiftvec = fr->shift_vec[0];
132 fshift = fr->fshift[0];
133 facel = _mm256_set1_pd(fr->epsfac);
134 charge = mdatoms->chargeA;
135 krf = _mm256_set1_pd(fr->ic->k_rf);
136 krf2 = _mm256_set1_pd(fr->ic->k_rf*2.0);
137 crf = _mm256_set1_pd(fr->ic->c_rf);
138 nvdwtype = fr->ntype;
140 vdwtype = mdatoms->typeA;
142 vftab = kernel_data->table_vdw->data;
143 vftabscale = _mm256_set1_pd(kernel_data->table_vdw->scale);
145 /* Setup water-specific parameters */
146 inr = nlist->iinr[0];
147 iq0 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
148 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
149 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
150 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
152 jq0 = _mm256_set1_pd(charge[inr+0]);
153 jq1 = _mm256_set1_pd(charge[inr+1]);
154 jq2 = _mm256_set1_pd(charge[inr+2]);
155 vdwjidx0A = 2*vdwtype[inr+0];
156 qq00 = _mm256_mul_pd(iq0,jq0);
157 c6_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
158 c12_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
159 qq01 = _mm256_mul_pd(iq0,jq1);
160 qq02 = _mm256_mul_pd(iq0,jq2);
161 qq10 = _mm256_mul_pd(iq1,jq0);
162 qq11 = _mm256_mul_pd(iq1,jq1);
163 qq12 = _mm256_mul_pd(iq1,jq2);
164 qq20 = _mm256_mul_pd(iq2,jq0);
165 qq21 = _mm256_mul_pd(iq2,jq1);
166 qq22 = _mm256_mul_pd(iq2,jq2);
168 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
169 rcutoff_scalar = fr->rcoulomb;
170 rcutoff = _mm256_set1_pd(rcutoff_scalar);
171 rcutoff2 = _mm256_mul_pd(rcutoff,rcutoff);
173 /* Avoid stupid compiler warnings */
174 jnrA = jnrB = jnrC = jnrD = 0;
183 for(iidx=0;iidx<4*DIM;iidx++)
188 /* Start outer loop over neighborlists */
189 for(iidx=0; iidx<nri; iidx++)
191 /* Load shift vector for this list */
192 i_shift_offset = DIM*shiftidx[iidx];
194 /* Load limits for loop over neighbors */
195 j_index_start = jindex[iidx];
196 j_index_end = jindex[iidx+1];
198 /* Get outer coordinate index */
200 i_coord_offset = DIM*inr;
202 /* Load i particle coords and add shift vector */
203 gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
204 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
206 fix0 = _mm256_setzero_pd();
207 fiy0 = _mm256_setzero_pd();
208 fiz0 = _mm256_setzero_pd();
209 fix1 = _mm256_setzero_pd();
210 fiy1 = _mm256_setzero_pd();
211 fiz1 = _mm256_setzero_pd();
212 fix2 = _mm256_setzero_pd();
213 fiy2 = _mm256_setzero_pd();
214 fiz2 = _mm256_setzero_pd();
216 /* Reset potential sums */
217 velecsum = _mm256_setzero_pd();
218 vvdwsum = _mm256_setzero_pd();
220 /* Start inner kernel loop */
221 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
224 /* Get j neighbor index, and coordinate index */
229 j_coord_offsetA = DIM*jnrA;
230 j_coord_offsetB = DIM*jnrB;
231 j_coord_offsetC = DIM*jnrC;
232 j_coord_offsetD = DIM*jnrD;
234 /* load j atom coordinates */
235 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
236 x+j_coord_offsetC,x+j_coord_offsetD,
237 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
239 /* Calculate displacement vector */
240 dx00 = _mm256_sub_pd(ix0,jx0);
241 dy00 = _mm256_sub_pd(iy0,jy0);
242 dz00 = _mm256_sub_pd(iz0,jz0);
243 dx01 = _mm256_sub_pd(ix0,jx1);
244 dy01 = _mm256_sub_pd(iy0,jy1);
245 dz01 = _mm256_sub_pd(iz0,jz1);
246 dx02 = _mm256_sub_pd(ix0,jx2);
247 dy02 = _mm256_sub_pd(iy0,jy2);
248 dz02 = _mm256_sub_pd(iz0,jz2);
249 dx10 = _mm256_sub_pd(ix1,jx0);
250 dy10 = _mm256_sub_pd(iy1,jy0);
251 dz10 = _mm256_sub_pd(iz1,jz0);
252 dx11 = _mm256_sub_pd(ix1,jx1);
253 dy11 = _mm256_sub_pd(iy1,jy1);
254 dz11 = _mm256_sub_pd(iz1,jz1);
255 dx12 = _mm256_sub_pd(ix1,jx2);
256 dy12 = _mm256_sub_pd(iy1,jy2);
257 dz12 = _mm256_sub_pd(iz1,jz2);
258 dx20 = _mm256_sub_pd(ix2,jx0);
259 dy20 = _mm256_sub_pd(iy2,jy0);
260 dz20 = _mm256_sub_pd(iz2,jz0);
261 dx21 = _mm256_sub_pd(ix2,jx1);
262 dy21 = _mm256_sub_pd(iy2,jy1);
263 dz21 = _mm256_sub_pd(iz2,jz1);
264 dx22 = _mm256_sub_pd(ix2,jx2);
265 dy22 = _mm256_sub_pd(iy2,jy2);
266 dz22 = _mm256_sub_pd(iz2,jz2);
268 /* Calculate squared distance and things based on it */
269 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
270 rsq01 = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
271 rsq02 = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
272 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
273 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
274 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
275 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
276 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
277 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
279 rinv00 = gmx_mm256_invsqrt_pd(rsq00);
280 rinv01 = gmx_mm256_invsqrt_pd(rsq01);
281 rinv02 = gmx_mm256_invsqrt_pd(rsq02);
282 rinv10 = gmx_mm256_invsqrt_pd(rsq10);
283 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
284 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
285 rinv20 = gmx_mm256_invsqrt_pd(rsq20);
286 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
287 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
289 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
290 rinvsq01 = _mm256_mul_pd(rinv01,rinv01);
291 rinvsq02 = _mm256_mul_pd(rinv02,rinv02);
292 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
293 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
294 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
295 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
296 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
297 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
299 fjx0 = _mm256_setzero_pd();
300 fjy0 = _mm256_setzero_pd();
301 fjz0 = _mm256_setzero_pd();
302 fjx1 = _mm256_setzero_pd();
303 fjy1 = _mm256_setzero_pd();
304 fjz1 = _mm256_setzero_pd();
305 fjx2 = _mm256_setzero_pd();
306 fjy2 = _mm256_setzero_pd();
307 fjz2 = _mm256_setzero_pd();
309 /**************************
310 * CALCULATE INTERACTIONS *
311 **************************/
313 if (gmx_mm256_any_lt(rsq00,rcutoff2))
316 r00 = _mm256_mul_pd(rsq00,rinv00);
318 /* Calculate table index by multiplying r with table scale and truncate to integer */
319 rt = _mm256_mul_pd(r00,vftabscale);
320 vfitab = _mm256_cvttpd_epi32(rt);
321 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
322 vfitab = _mm_slli_epi32(vfitab,3);
324 /* REACTION-FIELD ELECTROSTATICS */
325 velec = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
326 felec = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
328 /* CUBIC SPLINE TABLE DISPERSION */
329 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
330 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
331 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
332 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
333 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
334 Heps = _mm256_mul_pd(vfeps,H);
335 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
336 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
337 vvdw6 = _mm256_mul_pd(c6_00,VV);
338 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
339 fvdw6 = _mm256_mul_pd(c6_00,FF);
341 /* CUBIC SPLINE TABLE REPULSION */
342 vfitab = _mm_add_epi32(vfitab,ifour);
343 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
344 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
345 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
346 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
347 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
348 Heps = _mm256_mul_pd(vfeps,H);
349 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
350 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
351 vvdw12 = _mm256_mul_pd(c12_00,VV);
352 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
353 fvdw12 = _mm256_mul_pd(c12_00,FF);
354 vvdw = _mm256_add_pd(vvdw12,vvdw6);
355 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
357 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
359 /* Update potential sum for this i atom from the interaction with this j atom. */
360 velec = _mm256_and_pd(velec,cutoff_mask);
361 velecsum = _mm256_add_pd(velecsum,velec);
362 vvdw = _mm256_and_pd(vvdw,cutoff_mask);
363 vvdwsum = _mm256_add_pd(vvdwsum,vvdw);
365 fscal = _mm256_add_pd(felec,fvdw);
367 fscal = _mm256_and_pd(fscal,cutoff_mask);
369 /* Calculate temporary vectorial force */
370 tx = _mm256_mul_pd(fscal,dx00);
371 ty = _mm256_mul_pd(fscal,dy00);
372 tz = _mm256_mul_pd(fscal,dz00);
374 /* Update vectorial force */
375 fix0 = _mm256_add_pd(fix0,tx);
376 fiy0 = _mm256_add_pd(fiy0,ty);
377 fiz0 = _mm256_add_pd(fiz0,tz);
379 fjx0 = _mm256_add_pd(fjx0,tx);
380 fjy0 = _mm256_add_pd(fjy0,ty);
381 fjz0 = _mm256_add_pd(fjz0,tz);
385 /**************************
386 * CALCULATE INTERACTIONS *
387 **************************/
389 if (gmx_mm256_any_lt(rsq01,rcutoff2))
392 /* REACTION-FIELD ELECTROSTATICS */
393 velec = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
394 felec = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
396 cutoff_mask = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
398 /* Update potential sum for this i atom from the interaction with this j atom. */
399 velec = _mm256_and_pd(velec,cutoff_mask);
400 velecsum = _mm256_add_pd(velecsum,velec);
404 fscal = _mm256_and_pd(fscal,cutoff_mask);
406 /* Calculate temporary vectorial force */
407 tx = _mm256_mul_pd(fscal,dx01);
408 ty = _mm256_mul_pd(fscal,dy01);
409 tz = _mm256_mul_pd(fscal,dz01);
411 /* Update vectorial force */
412 fix0 = _mm256_add_pd(fix0,tx);
413 fiy0 = _mm256_add_pd(fiy0,ty);
414 fiz0 = _mm256_add_pd(fiz0,tz);
416 fjx1 = _mm256_add_pd(fjx1,tx);
417 fjy1 = _mm256_add_pd(fjy1,ty);
418 fjz1 = _mm256_add_pd(fjz1,tz);
422 /**************************
423 * CALCULATE INTERACTIONS *
424 **************************/
426 if (gmx_mm256_any_lt(rsq02,rcutoff2))
429 /* REACTION-FIELD ELECTROSTATICS */
430 velec = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
431 felec = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
433 cutoff_mask = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
435 /* Update potential sum for this i atom from the interaction with this j atom. */
436 velec = _mm256_and_pd(velec,cutoff_mask);
437 velecsum = _mm256_add_pd(velecsum,velec);
441 fscal = _mm256_and_pd(fscal,cutoff_mask);
443 /* Calculate temporary vectorial force */
444 tx = _mm256_mul_pd(fscal,dx02);
445 ty = _mm256_mul_pd(fscal,dy02);
446 tz = _mm256_mul_pd(fscal,dz02);
448 /* Update vectorial force */
449 fix0 = _mm256_add_pd(fix0,tx);
450 fiy0 = _mm256_add_pd(fiy0,ty);
451 fiz0 = _mm256_add_pd(fiz0,tz);
453 fjx2 = _mm256_add_pd(fjx2,tx);
454 fjy2 = _mm256_add_pd(fjy2,ty);
455 fjz2 = _mm256_add_pd(fjz2,tz);
459 /**************************
460 * CALCULATE INTERACTIONS *
461 **************************/
463 if (gmx_mm256_any_lt(rsq10,rcutoff2))
466 /* REACTION-FIELD ELECTROSTATICS */
467 velec = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
468 felec = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
470 cutoff_mask = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
472 /* Update potential sum for this i atom from the interaction with this j atom. */
473 velec = _mm256_and_pd(velec,cutoff_mask);
474 velecsum = _mm256_add_pd(velecsum,velec);
478 fscal = _mm256_and_pd(fscal,cutoff_mask);
480 /* Calculate temporary vectorial force */
481 tx = _mm256_mul_pd(fscal,dx10);
482 ty = _mm256_mul_pd(fscal,dy10);
483 tz = _mm256_mul_pd(fscal,dz10);
485 /* Update vectorial force */
486 fix1 = _mm256_add_pd(fix1,tx);
487 fiy1 = _mm256_add_pd(fiy1,ty);
488 fiz1 = _mm256_add_pd(fiz1,tz);
490 fjx0 = _mm256_add_pd(fjx0,tx);
491 fjy0 = _mm256_add_pd(fjy0,ty);
492 fjz0 = _mm256_add_pd(fjz0,tz);
496 /**************************
497 * CALCULATE INTERACTIONS *
498 **************************/
500 if (gmx_mm256_any_lt(rsq11,rcutoff2))
503 /* REACTION-FIELD ELECTROSTATICS */
504 velec = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
505 felec = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
507 cutoff_mask = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
509 /* Update potential sum for this i atom from the interaction with this j atom. */
510 velec = _mm256_and_pd(velec,cutoff_mask);
511 velecsum = _mm256_add_pd(velecsum,velec);
515 fscal = _mm256_and_pd(fscal,cutoff_mask);
517 /* Calculate temporary vectorial force */
518 tx = _mm256_mul_pd(fscal,dx11);
519 ty = _mm256_mul_pd(fscal,dy11);
520 tz = _mm256_mul_pd(fscal,dz11);
522 /* Update vectorial force */
523 fix1 = _mm256_add_pd(fix1,tx);
524 fiy1 = _mm256_add_pd(fiy1,ty);
525 fiz1 = _mm256_add_pd(fiz1,tz);
527 fjx1 = _mm256_add_pd(fjx1,tx);
528 fjy1 = _mm256_add_pd(fjy1,ty);
529 fjz1 = _mm256_add_pd(fjz1,tz);
533 /**************************
534 * CALCULATE INTERACTIONS *
535 **************************/
537 if (gmx_mm256_any_lt(rsq12,rcutoff2))
540 /* REACTION-FIELD ELECTROSTATICS */
541 velec = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
542 felec = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
544 cutoff_mask = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
546 /* Update potential sum for this i atom from the interaction with this j atom. */
547 velec = _mm256_and_pd(velec,cutoff_mask);
548 velecsum = _mm256_add_pd(velecsum,velec);
552 fscal = _mm256_and_pd(fscal,cutoff_mask);
554 /* Calculate temporary vectorial force */
555 tx = _mm256_mul_pd(fscal,dx12);
556 ty = _mm256_mul_pd(fscal,dy12);
557 tz = _mm256_mul_pd(fscal,dz12);
559 /* Update vectorial force */
560 fix1 = _mm256_add_pd(fix1,tx);
561 fiy1 = _mm256_add_pd(fiy1,ty);
562 fiz1 = _mm256_add_pd(fiz1,tz);
564 fjx2 = _mm256_add_pd(fjx2,tx);
565 fjy2 = _mm256_add_pd(fjy2,ty);
566 fjz2 = _mm256_add_pd(fjz2,tz);
570 /**************************
571 * CALCULATE INTERACTIONS *
572 **************************/
574 if (gmx_mm256_any_lt(rsq20,rcutoff2))
577 /* REACTION-FIELD ELECTROSTATICS */
578 velec = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
579 felec = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
581 cutoff_mask = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
583 /* Update potential sum for this i atom from the interaction with this j atom. */
584 velec = _mm256_and_pd(velec,cutoff_mask);
585 velecsum = _mm256_add_pd(velecsum,velec);
589 fscal = _mm256_and_pd(fscal,cutoff_mask);
591 /* Calculate temporary vectorial force */
592 tx = _mm256_mul_pd(fscal,dx20);
593 ty = _mm256_mul_pd(fscal,dy20);
594 tz = _mm256_mul_pd(fscal,dz20);
596 /* Update vectorial force */
597 fix2 = _mm256_add_pd(fix2,tx);
598 fiy2 = _mm256_add_pd(fiy2,ty);
599 fiz2 = _mm256_add_pd(fiz2,tz);
601 fjx0 = _mm256_add_pd(fjx0,tx);
602 fjy0 = _mm256_add_pd(fjy0,ty);
603 fjz0 = _mm256_add_pd(fjz0,tz);
607 /**************************
608 * CALCULATE INTERACTIONS *
609 **************************/
611 if (gmx_mm256_any_lt(rsq21,rcutoff2))
614 /* REACTION-FIELD ELECTROSTATICS */
615 velec = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
616 felec = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
618 cutoff_mask = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
620 /* Update potential sum for this i atom from the interaction with this j atom. */
621 velec = _mm256_and_pd(velec,cutoff_mask);
622 velecsum = _mm256_add_pd(velecsum,velec);
626 fscal = _mm256_and_pd(fscal,cutoff_mask);
628 /* Calculate temporary vectorial force */
629 tx = _mm256_mul_pd(fscal,dx21);
630 ty = _mm256_mul_pd(fscal,dy21);
631 tz = _mm256_mul_pd(fscal,dz21);
633 /* Update vectorial force */
634 fix2 = _mm256_add_pd(fix2,tx);
635 fiy2 = _mm256_add_pd(fiy2,ty);
636 fiz2 = _mm256_add_pd(fiz2,tz);
638 fjx1 = _mm256_add_pd(fjx1,tx);
639 fjy1 = _mm256_add_pd(fjy1,ty);
640 fjz1 = _mm256_add_pd(fjz1,tz);
644 /**************************
645 * CALCULATE INTERACTIONS *
646 **************************/
648 if (gmx_mm256_any_lt(rsq22,rcutoff2))
651 /* REACTION-FIELD ELECTROSTATICS */
652 velec = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
653 felec = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
655 cutoff_mask = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
657 /* Update potential sum for this i atom from the interaction with this j atom. */
658 velec = _mm256_and_pd(velec,cutoff_mask);
659 velecsum = _mm256_add_pd(velecsum,velec);
663 fscal = _mm256_and_pd(fscal,cutoff_mask);
665 /* Calculate temporary vectorial force */
666 tx = _mm256_mul_pd(fscal,dx22);
667 ty = _mm256_mul_pd(fscal,dy22);
668 tz = _mm256_mul_pd(fscal,dz22);
670 /* Update vectorial force */
671 fix2 = _mm256_add_pd(fix2,tx);
672 fiy2 = _mm256_add_pd(fiy2,ty);
673 fiz2 = _mm256_add_pd(fiz2,tz);
675 fjx2 = _mm256_add_pd(fjx2,tx);
676 fjy2 = _mm256_add_pd(fjy2,ty);
677 fjz2 = _mm256_add_pd(fjz2,tz);
681 fjptrA = f+j_coord_offsetA;
682 fjptrB = f+j_coord_offsetB;
683 fjptrC = f+j_coord_offsetC;
684 fjptrD = f+j_coord_offsetD;
686 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
687 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
689 /* Inner loop uses 360 flops */
695 /* Get j neighbor index, and coordinate index */
696 jnrlistA = jjnr[jidx];
697 jnrlistB = jjnr[jidx+1];
698 jnrlistC = jjnr[jidx+2];
699 jnrlistD = jjnr[jidx+3];
700 /* Sign of each element will be negative for non-real atoms.
701 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
702 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
704 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
706 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
707 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
708 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
710 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
711 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
712 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
713 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
714 j_coord_offsetA = DIM*jnrA;
715 j_coord_offsetB = DIM*jnrB;
716 j_coord_offsetC = DIM*jnrC;
717 j_coord_offsetD = DIM*jnrD;
719 /* load j atom coordinates */
720 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
721 x+j_coord_offsetC,x+j_coord_offsetD,
722 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
724 /* Calculate displacement vector */
725 dx00 = _mm256_sub_pd(ix0,jx0);
726 dy00 = _mm256_sub_pd(iy0,jy0);
727 dz00 = _mm256_sub_pd(iz0,jz0);
728 dx01 = _mm256_sub_pd(ix0,jx1);
729 dy01 = _mm256_sub_pd(iy0,jy1);
730 dz01 = _mm256_sub_pd(iz0,jz1);
731 dx02 = _mm256_sub_pd(ix0,jx2);
732 dy02 = _mm256_sub_pd(iy0,jy2);
733 dz02 = _mm256_sub_pd(iz0,jz2);
734 dx10 = _mm256_sub_pd(ix1,jx0);
735 dy10 = _mm256_sub_pd(iy1,jy0);
736 dz10 = _mm256_sub_pd(iz1,jz0);
737 dx11 = _mm256_sub_pd(ix1,jx1);
738 dy11 = _mm256_sub_pd(iy1,jy1);
739 dz11 = _mm256_sub_pd(iz1,jz1);
740 dx12 = _mm256_sub_pd(ix1,jx2);
741 dy12 = _mm256_sub_pd(iy1,jy2);
742 dz12 = _mm256_sub_pd(iz1,jz2);
743 dx20 = _mm256_sub_pd(ix2,jx0);
744 dy20 = _mm256_sub_pd(iy2,jy0);
745 dz20 = _mm256_sub_pd(iz2,jz0);
746 dx21 = _mm256_sub_pd(ix2,jx1);
747 dy21 = _mm256_sub_pd(iy2,jy1);
748 dz21 = _mm256_sub_pd(iz2,jz1);
749 dx22 = _mm256_sub_pd(ix2,jx2);
750 dy22 = _mm256_sub_pd(iy2,jy2);
751 dz22 = _mm256_sub_pd(iz2,jz2);
753 /* Calculate squared distance and things based on it */
754 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
755 rsq01 = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
756 rsq02 = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
757 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
758 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
759 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
760 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
761 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
762 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
764 rinv00 = gmx_mm256_invsqrt_pd(rsq00);
765 rinv01 = gmx_mm256_invsqrt_pd(rsq01);
766 rinv02 = gmx_mm256_invsqrt_pd(rsq02);
767 rinv10 = gmx_mm256_invsqrt_pd(rsq10);
768 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
769 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
770 rinv20 = gmx_mm256_invsqrt_pd(rsq20);
771 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
772 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
774 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
775 rinvsq01 = _mm256_mul_pd(rinv01,rinv01);
776 rinvsq02 = _mm256_mul_pd(rinv02,rinv02);
777 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
778 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
779 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
780 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
781 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
782 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
784 fjx0 = _mm256_setzero_pd();
785 fjy0 = _mm256_setzero_pd();
786 fjz0 = _mm256_setzero_pd();
787 fjx1 = _mm256_setzero_pd();
788 fjy1 = _mm256_setzero_pd();
789 fjz1 = _mm256_setzero_pd();
790 fjx2 = _mm256_setzero_pd();
791 fjy2 = _mm256_setzero_pd();
792 fjz2 = _mm256_setzero_pd();
794 /**************************
795 * CALCULATE INTERACTIONS *
796 **************************/
798 if (gmx_mm256_any_lt(rsq00,rcutoff2))
801 r00 = _mm256_mul_pd(rsq00,rinv00);
802 r00 = _mm256_andnot_pd(dummy_mask,r00);
804 /* Calculate table index by multiplying r with table scale and truncate to integer */
805 rt = _mm256_mul_pd(r00,vftabscale);
806 vfitab = _mm256_cvttpd_epi32(rt);
807 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
808 vfitab = _mm_slli_epi32(vfitab,3);
810 /* REACTION-FIELD ELECTROSTATICS */
811 velec = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
812 felec = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
814 /* CUBIC SPLINE TABLE DISPERSION */
815 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
816 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
817 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
818 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
819 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
820 Heps = _mm256_mul_pd(vfeps,H);
821 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
822 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
823 vvdw6 = _mm256_mul_pd(c6_00,VV);
824 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
825 fvdw6 = _mm256_mul_pd(c6_00,FF);
827 /* CUBIC SPLINE TABLE REPULSION */
828 vfitab = _mm_add_epi32(vfitab,ifour);
829 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
830 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
831 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
832 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
833 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
834 Heps = _mm256_mul_pd(vfeps,H);
835 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
836 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
837 vvdw12 = _mm256_mul_pd(c12_00,VV);
838 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
839 fvdw12 = _mm256_mul_pd(c12_00,FF);
840 vvdw = _mm256_add_pd(vvdw12,vvdw6);
841 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
843 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
845 /* Update potential sum for this i atom from the interaction with this j atom. */
846 velec = _mm256_and_pd(velec,cutoff_mask);
847 velec = _mm256_andnot_pd(dummy_mask,velec);
848 velecsum = _mm256_add_pd(velecsum,velec);
849 vvdw = _mm256_and_pd(vvdw,cutoff_mask);
850 vvdw = _mm256_andnot_pd(dummy_mask,vvdw);
851 vvdwsum = _mm256_add_pd(vvdwsum,vvdw);
853 fscal = _mm256_add_pd(felec,fvdw);
855 fscal = _mm256_and_pd(fscal,cutoff_mask);
857 fscal = _mm256_andnot_pd(dummy_mask,fscal);
859 /* Calculate temporary vectorial force */
860 tx = _mm256_mul_pd(fscal,dx00);
861 ty = _mm256_mul_pd(fscal,dy00);
862 tz = _mm256_mul_pd(fscal,dz00);
864 /* Update vectorial force */
865 fix0 = _mm256_add_pd(fix0,tx);
866 fiy0 = _mm256_add_pd(fiy0,ty);
867 fiz0 = _mm256_add_pd(fiz0,tz);
869 fjx0 = _mm256_add_pd(fjx0,tx);
870 fjy0 = _mm256_add_pd(fjy0,ty);
871 fjz0 = _mm256_add_pd(fjz0,tz);
875 /**************************
876 * CALCULATE INTERACTIONS *
877 **************************/
879 if (gmx_mm256_any_lt(rsq01,rcutoff2))
882 /* REACTION-FIELD ELECTROSTATICS */
883 velec = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
884 felec = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
886 cutoff_mask = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
888 /* Update potential sum for this i atom from the interaction with this j atom. */
889 velec = _mm256_and_pd(velec,cutoff_mask);
890 velec = _mm256_andnot_pd(dummy_mask,velec);
891 velecsum = _mm256_add_pd(velecsum,velec);
895 fscal = _mm256_and_pd(fscal,cutoff_mask);
897 fscal = _mm256_andnot_pd(dummy_mask,fscal);
899 /* Calculate temporary vectorial force */
900 tx = _mm256_mul_pd(fscal,dx01);
901 ty = _mm256_mul_pd(fscal,dy01);
902 tz = _mm256_mul_pd(fscal,dz01);
904 /* Update vectorial force */
905 fix0 = _mm256_add_pd(fix0,tx);
906 fiy0 = _mm256_add_pd(fiy0,ty);
907 fiz0 = _mm256_add_pd(fiz0,tz);
909 fjx1 = _mm256_add_pd(fjx1,tx);
910 fjy1 = _mm256_add_pd(fjy1,ty);
911 fjz1 = _mm256_add_pd(fjz1,tz);
915 /**************************
916 * CALCULATE INTERACTIONS *
917 **************************/
919 if (gmx_mm256_any_lt(rsq02,rcutoff2))
922 /* REACTION-FIELD ELECTROSTATICS */
923 velec = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
924 felec = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
926 cutoff_mask = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
928 /* Update potential sum for this i atom from the interaction with this j atom. */
929 velec = _mm256_and_pd(velec,cutoff_mask);
930 velec = _mm256_andnot_pd(dummy_mask,velec);
931 velecsum = _mm256_add_pd(velecsum,velec);
935 fscal = _mm256_and_pd(fscal,cutoff_mask);
937 fscal = _mm256_andnot_pd(dummy_mask,fscal);
939 /* Calculate temporary vectorial force */
940 tx = _mm256_mul_pd(fscal,dx02);
941 ty = _mm256_mul_pd(fscal,dy02);
942 tz = _mm256_mul_pd(fscal,dz02);
944 /* Update vectorial force */
945 fix0 = _mm256_add_pd(fix0,tx);
946 fiy0 = _mm256_add_pd(fiy0,ty);
947 fiz0 = _mm256_add_pd(fiz0,tz);
949 fjx2 = _mm256_add_pd(fjx2,tx);
950 fjy2 = _mm256_add_pd(fjy2,ty);
951 fjz2 = _mm256_add_pd(fjz2,tz);
955 /**************************
956 * CALCULATE INTERACTIONS *
957 **************************/
959 if (gmx_mm256_any_lt(rsq10,rcutoff2))
962 /* REACTION-FIELD ELECTROSTATICS */
963 velec = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
964 felec = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
966 cutoff_mask = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
968 /* Update potential sum for this i atom from the interaction with this j atom. */
969 velec = _mm256_and_pd(velec,cutoff_mask);
970 velec = _mm256_andnot_pd(dummy_mask,velec);
971 velecsum = _mm256_add_pd(velecsum,velec);
975 fscal = _mm256_and_pd(fscal,cutoff_mask);
977 fscal = _mm256_andnot_pd(dummy_mask,fscal);
979 /* Calculate temporary vectorial force */
980 tx = _mm256_mul_pd(fscal,dx10);
981 ty = _mm256_mul_pd(fscal,dy10);
982 tz = _mm256_mul_pd(fscal,dz10);
984 /* Update vectorial force */
985 fix1 = _mm256_add_pd(fix1,tx);
986 fiy1 = _mm256_add_pd(fiy1,ty);
987 fiz1 = _mm256_add_pd(fiz1,tz);
989 fjx0 = _mm256_add_pd(fjx0,tx);
990 fjy0 = _mm256_add_pd(fjy0,ty);
991 fjz0 = _mm256_add_pd(fjz0,tz);
995 /**************************
996 * CALCULATE INTERACTIONS *
997 **************************/
999 if (gmx_mm256_any_lt(rsq11,rcutoff2))
1002 /* REACTION-FIELD ELECTROSTATICS */
1003 velec = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
1004 felec = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
1006 cutoff_mask = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
1008 /* Update potential sum for this i atom from the interaction with this j atom. */
1009 velec = _mm256_and_pd(velec,cutoff_mask);
1010 velec = _mm256_andnot_pd(dummy_mask,velec);
1011 velecsum = _mm256_add_pd(velecsum,velec);
1015 fscal = _mm256_and_pd(fscal,cutoff_mask);
1017 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1019 /* Calculate temporary vectorial force */
1020 tx = _mm256_mul_pd(fscal,dx11);
1021 ty = _mm256_mul_pd(fscal,dy11);
1022 tz = _mm256_mul_pd(fscal,dz11);
1024 /* Update vectorial force */
1025 fix1 = _mm256_add_pd(fix1,tx);
1026 fiy1 = _mm256_add_pd(fiy1,ty);
1027 fiz1 = _mm256_add_pd(fiz1,tz);
1029 fjx1 = _mm256_add_pd(fjx1,tx);
1030 fjy1 = _mm256_add_pd(fjy1,ty);
1031 fjz1 = _mm256_add_pd(fjz1,tz);
1035 /**************************
1036 * CALCULATE INTERACTIONS *
1037 **************************/
1039 if (gmx_mm256_any_lt(rsq12,rcutoff2))
1042 /* REACTION-FIELD ELECTROSTATICS */
1043 velec = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
1044 felec = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
1046 cutoff_mask = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
1048 /* Update potential sum for this i atom from the interaction with this j atom. */
1049 velec = _mm256_and_pd(velec,cutoff_mask);
1050 velec = _mm256_andnot_pd(dummy_mask,velec);
1051 velecsum = _mm256_add_pd(velecsum,velec);
1055 fscal = _mm256_and_pd(fscal,cutoff_mask);
1057 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1059 /* Calculate temporary vectorial force */
1060 tx = _mm256_mul_pd(fscal,dx12);
1061 ty = _mm256_mul_pd(fscal,dy12);
1062 tz = _mm256_mul_pd(fscal,dz12);
1064 /* Update vectorial force */
1065 fix1 = _mm256_add_pd(fix1,tx);
1066 fiy1 = _mm256_add_pd(fiy1,ty);
1067 fiz1 = _mm256_add_pd(fiz1,tz);
1069 fjx2 = _mm256_add_pd(fjx2,tx);
1070 fjy2 = _mm256_add_pd(fjy2,ty);
1071 fjz2 = _mm256_add_pd(fjz2,tz);
1075 /**************************
1076 * CALCULATE INTERACTIONS *
1077 **************************/
1079 if (gmx_mm256_any_lt(rsq20,rcutoff2))
1082 /* REACTION-FIELD ELECTROSTATICS */
1083 velec = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
1084 felec = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
1086 cutoff_mask = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1088 /* Update potential sum for this i atom from the interaction with this j atom. */
1089 velec = _mm256_and_pd(velec,cutoff_mask);
1090 velec = _mm256_andnot_pd(dummy_mask,velec);
1091 velecsum = _mm256_add_pd(velecsum,velec);
1095 fscal = _mm256_and_pd(fscal,cutoff_mask);
1097 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1099 /* Calculate temporary vectorial force */
1100 tx = _mm256_mul_pd(fscal,dx20);
1101 ty = _mm256_mul_pd(fscal,dy20);
1102 tz = _mm256_mul_pd(fscal,dz20);
1104 /* Update vectorial force */
1105 fix2 = _mm256_add_pd(fix2,tx);
1106 fiy2 = _mm256_add_pd(fiy2,ty);
1107 fiz2 = _mm256_add_pd(fiz2,tz);
1109 fjx0 = _mm256_add_pd(fjx0,tx);
1110 fjy0 = _mm256_add_pd(fjy0,ty);
1111 fjz0 = _mm256_add_pd(fjz0,tz);
1115 /**************************
1116 * CALCULATE INTERACTIONS *
1117 **************************/
1119 if (gmx_mm256_any_lt(rsq21,rcutoff2))
1122 /* REACTION-FIELD ELECTROSTATICS */
1123 velec = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
1124 felec = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
1126 cutoff_mask = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
1128 /* Update potential sum for this i atom from the interaction with this j atom. */
1129 velec = _mm256_and_pd(velec,cutoff_mask);
1130 velec = _mm256_andnot_pd(dummy_mask,velec);
1131 velecsum = _mm256_add_pd(velecsum,velec);
1135 fscal = _mm256_and_pd(fscal,cutoff_mask);
1137 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1139 /* Calculate temporary vectorial force */
1140 tx = _mm256_mul_pd(fscal,dx21);
1141 ty = _mm256_mul_pd(fscal,dy21);
1142 tz = _mm256_mul_pd(fscal,dz21);
1144 /* Update vectorial force */
1145 fix2 = _mm256_add_pd(fix2,tx);
1146 fiy2 = _mm256_add_pd(fiy2,ty);
1147 fiz2 = _mm256_add_pd(fiz2,tz);
1149 fjx1 = _mm256_add_pd(fjx1,tx);
1150 fjy1 = _mm256_add_pd(fjy1,ty);
1151 fjz1 = _mm256_add_pd(fjz1,tz);
1155 /**************************
1156 * CALCULATE INTERACTIONS *
1157 **************************/
1159 if (gmx_mm256_any_lt(rsq22,rcutoff2))
1162 /* REACTION-FIELD ELECTROSTATICS */
1163 velec = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
1164 felec = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
1166 cutoff_mask = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
1168 /* Update potential sum for this i atom from the interaction with this j atom. */
1169 velec = _mm256_and_pd(velec,cutoff_mask);
1170 velec = _mm256_andnot_pd(dummy_mask,velec);
1171 velecsum = _mm256_add_pd(velecsum,velec);
1175 fscal = _mm256_and_pd(fscal,cutoff_mask);
1177 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1179 /* Calculate temporary vectorial force */
1180 tx = _mm256_mul_pd(fscal,dx22);
1181 ty = _mm256_mul_pd(fscal,dy22);
1182 tz = _mm256_mul_pd(fscal,dz22);
1184 /* Update vectorial force */
1185 fix2 = _mm256_add_pd(fix2,tx);
1186 fiy2 = _mm256_add_pd(fiy2,ty);
1187 fiz2 = _mm256_add_pd(fiz2,tz);
1189 fjx2 = _mm256_add_pd(fjx2,tx);
1190 fjy2 = _mm256_add_pd(fjy2,ty);
1191 fjz2 = _mm256_add_pd(fjz2,tz);
1195 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1196 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1197 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1198 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1200 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1201 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1203 /* Inner loop uses 361 flops */
1206 /* End of innermost loop */
1208 gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1209 f+i_coord_offset,fshift+i_shift_offset);
1212 /* Update potential energies */
1213 gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1214 gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
1216 /* Increment number of inner iterations */
1217 inneriter += j_index_end - j_index_start;
1219 /* Outer loop uses 20 flops */
1222 /* Increment number of outer iterations */
1225 /* Update outer/inner flops */
1227 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*361);
1230 * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_F_avx_256_double
1231 * Electrostatics interaction: ReactionField
1232 * VdW interaction: CubicSplineTable
1233 * Geometry: Water3-Water3
1234 * Calculate force/pot: Force
1237 nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_F_avx_256_double
1238 (t_nblist * gmx_restrict nlist,
1239 rvec * gmx_restrict xx,
1240 rvec * gmx_restrict ff,
1241 t_forcerec * gmx_restrict fr,
1242 t_mdatoms * gmx_restrict mdatoms,
1243 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1244 t_nrnb * gmx_restrict nrnb)
1246 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
1247 * just 0 for non-waters.
1248 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1249 * jnr indices corresponding to data put in the four positions in the SIMD register.
1251 int i_shift_offset,i_coord_offset,outeriter,inneriter;
1252 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1253 int jnrA,jnrB,jnrC,jnrD;
1254 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1255 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1256 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1257 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
1258 real rcutoff_scalar;
1259 real *shiftvec,*fshift,*x,*f;
1260 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1261 real scratch[4*DIM];
1262 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1263 real * vdwioffsetptr0;
1264 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1265 real * vdwioffsetptr1;
1266 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1267 real * vdwioffsetptr2;
1268 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1269 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
1270 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1271 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1272 __m256d jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1273 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1274 __m256d jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1275 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1276 __m256d dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1277 __m256d dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1278 __m256d dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1279 __m256d dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1280 __m256d dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1281 __m256d dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1282 __m256d dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1283 __m256d dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1284 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
1287 __m256d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1290 __m256d one_sixth = _mm256_set1_pd(1.0/6.0);
1291 __m256d one_twelfth = _mm256_set1_pd(1.0/12.0);
1293 __m128i ifour = _mm_set1_epi32(4);
1294 __m256d rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
1296 __m256d dummy_mask,cutoff_mask;
1297 __m128 tmpmask0,tmpmask1;
1298 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1299 __m256d one = _mm256_set1_pd(1.0);
1300 __m256d two = _mm256_set1_pd(2.0);
1306 jindex = nlist->jindex;
1308 shiftidx = nlist->shift;
1310 shiftvec = fr->shift_vec[0];
1311 fshift = fr->fshift[0];
1312 facel = _mm256_set1_pd(fr->epsfac);
1313 charge = mdatoms->chargeA;
1314 krf = _mm256_set1_pd(fr->ic->k_rf);
1315 krf2 = _mm256_set1_pd(fr->ic->k_rf*2.0);
1316 crf = _mm256_set1_pd(fr->ic->c_rf);
1317 nvdwtype = fr->ntype;
1318 vdwparam = fr->nbfp;
1319 vdwtype = mdatoms->typeA;
1321 vftab = kernel_data->table_vdw->data;
1322 vftabscale = _mm256_set1_pd(kernel_data->table_vdw->scale);
1324 /* Setup water-specific parameters */
1325 inr = nlist->iinr[0];
1326 iq0 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
1327 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1328 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1329 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
1331 jq0 = _mm256_set1_pd(charge[inr+0]);
1332 jq1 = _mm256_set1_pd(charge[inr+1]);
1333 jq2 = _mm256_set1_pd(charge[inr+2]);
1334 vdwjidx0A = 2*vdwtype[inr+0];
1335 qq00 = _mm256_mul_pd(iq0,jq0);
1336 c6_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
1337 c12_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
1338 qq01 = _mm256_mul_pd(iq0,jq1);
1339 qq02 = _mm256_mul_pd(iq0,jq2);
1340 qq10 = _mm256_mul_pd(iq1,jq0);
1341 qq11 = _mm256_mul_pd(iq1,jq1);
1342 qq12 = _mm256_mul_pd(iq1,jq2);
1343 qq20 = _mm256_mul_pd(iq2,jq0);
1344 qq21 = _mm256_mul_pd(iq2,jq1);
1345 qq22 = _mm256_mul_pd(iq2,jq2);
1347 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1348 rcutoff_scalar = fr->rcoulomb;
1349 rcutoff = _mm256_set1_pd(rcutoff_scalar);
1350 rcutoff2 = _mm256_mul_pd(rcutoff,rcutoff);
1352 /* Avoid stupid compiler warnings */
1353 jnrA = jnrB = jnrC = jnrD = 0;
1354 j_coord_offsetA = 0;
1355 j_coord_offsetB = 0;
1356 j_coord_offsetC = 0;
1357 j_coord_offsetD = 0;
1362 for(iidx=0;iidx<4*DIM;iidx++)
1364 scratch[iidx] = 0.0;
1367 /* Start outer loop over neighborlists */
1368 for(iidx=0; iidx<nri; iidx++)
1370 /* Load shift vector for this list */
1371 i_shift_offset = DIM*shiftidx[iidx];
1373 /* Load limits for loop over neighbors */
1374 j_index_start = jindex[iidx];
1375 j_index_end = jindex[iidx+1];
1377 /* Get outer coordinate index */
1379 i_coord_offset = DIM*inr;
1381 /* Load i particle coords and add shift vector */
1382 gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1383 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1385 fix0 = _mm256_setzero_pd();
1386 fiy0 = _mm256_setzero_pd();
1387 fiz0 = _mm256_setzero_pd();
1388 fix1 = _mm256_setzero_pd();
1389 fiy1 = _mm256_setzero_pd();
1390 fiz1 = _mm256_setzero_pd();
1391 fix2 = _mm256_setzero_pd();
1392 fiy2 = _mm256_setzero_pd();
1393 fiz2 = _mm256_setzero_pd();
1395 /* Start inner kernel loop */
1396 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1399 /* Get j neighbor index, and coordinate index */
1401 jnrB = jjnr[jidx+1];
1402 jnrC = jjnr[jidx+2];
1403 jnrD = jjnr[jidx+3];
1404 j_coord_offsetA = DIM*jnrA;
1405 j_coord_offsetB = DIM*jnrB;
1406 j_coord_offsetC = DIM*jnrC;
1407 j_coord_offsetD = DIM*jnrD;
1409 /* load j atom coordinates */
1410 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1411 x+j_coord_offsetC,x+j_coord_offsetD,
1412 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1414 /* Calculate displacement vector */
1415 dx00 = _mm256_sub_pd(ix0,jx0);
1416 dy00 = _mm256_sub_pd(iy0,jy0);
1417 dz00 = _mm256_sub_pd(iz0,jz0);
1418 dx01 = _mm256_sub_pd(ix0,jx1);
1419 dy01 = _mm256_sub_pd(iy0,jy1);
1420 dz01 = _mm256_sub_pd(iz0,jz1);
1421 dx02 = _mm256_sub_pd(ix0,jx2);
1422 dy02 = _mm256_sub_pd(iy0,jy2);
1423 dz02 = _mm256_sub_pd(iz0,jz2);
1424 dx10 = _mm256_sub_pd(ix1,jx0);
1425 dy10 = _mm256_sub_pd(iy1,jy0);
1426 dz10 = _mm256_sub_pd(iz1,jz0);
1427 dx11 = _mm256_sub_pd(ix1,jx1);
1428 dy11 = _mm256_sub_pd(iy1,jy1);
1429 dz11 = _mm256_sub_pd(iz1,jz1);
1430 dx12 = _mm256_sub_pd(ix1,jx2);
1431 dy12 = _mm256_sub_pd(iy1,jy2);
1432 dz12 = _mm256_sub_pd(iz1,jz2);
1433 dx20 = _mm256_sub_pd(ix2,jx0);
1434 dy20 = _mm256_sub_pd(iy2,jy0);
1435 dz20 = _mm256_sub_pd(iz2,jz0);
1436 dx21 = _mm256_sub_pd(ix2,jx1);
1437 dy21 = _mm256_sub_pd(iy2,jy1);
1438 dz21 = _mm256_sub_pd(iz2,jz1);
1439 dx22 = _mm256_sub_pd(ix2,jx2);
1440 dy22 = _mm256_sub_pd(iy2,jy2);
1441 dz22 = _mm256_sub_pd(iz2,jz2);
1443 /* Calculate squared distance and things based on it */
1444 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1445 rsq01 = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1446 rsq02 = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1447 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1448 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1449 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1450 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1451 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1452 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1454 rinv00 = gmx_mm256_invsqrt_pd(rsq00);
1455 rinv01 = gmx_mm256_invsqrt_pd(rsq01);
1456 rinv02 = gmx_mm256_invsqrt_pd(rsq02);
1457 rinv10 = gmx_mm256_invsqrt_pd(rsq10);
1458 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
1459 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
1460 rinv20 = gmx_mm256_invsqrt_pd(rsq20);
1461 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
1462 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
1464 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
1465 rinvsq01 = _mm256_mul_pd(rinv01,rinv01);
1466 rinvsq02 = _mm256_mul_pd(rinv02,rinv02);
1467 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
1468 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
1469 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
1470 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
1471 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
1472 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
1474 fjx0 = _mm256_setzero_pd();
1475 fjy0 = _mm256_setzero_pd();
1476 fjz0 = _mm256_setzero_pd();
1477 fjx1 = _mm256_setzero_pd();
1478 fjy1 = _mm256_setzero_pd();
1479 fjz1 = _mm256_setzero_pd();
1480 fjx2 = _mm256_setzero_pd();
1481 fjy2 = _mm256_setzero_pd();
1482 fjz2 = _mm256_setzero_pd();
1484 /**************************
1485 * CALCULATE INTERACTIONS *
1486 **************************/
1488 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1491 r00 = _mm256_mul_pd(rsq00,rinv00);
1493 /* Calculate table index by multiplying r with table scale and truncate to integer */
1494 rt = _mm256_mul_pd(r00,vftabscale);
1495 vfitab = _mm256_cvttpd_epi32(rt);
1496 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1497 vfitab = _mm_slli_epi32(vfitab,3);
1499 /* REACTION-FIELD ELECTROSTATICS */
1500 felec = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
1502 /* CUBIC SPLINE TABLE DISPERSION */
1503 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1504 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1505 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1506 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1507 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1508 Heps = _mm256_mul_pd(vfeps,H);
1509 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1510 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1511 fvdw6 = _mm256_mul_pd(c6_00,FF);
1513 /* CUBIC SPLINE TABLE REPULSION */
1514 vfitab = _mm_add_epi32(vfitab,ifour);
1515 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1516 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1517 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1518 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1519 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1520 Heps = _mm256_mul_pd(vfeps,H);
1521 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1522 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1523 fvdw12 = _mm256_mul_pd(c12_00,FF);
1524 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1526 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1528 fscal = _mm256_add_pd(felec,fvdw);
1530 fscal = _mm256_and_pd(fscal,cutoff_mask);
1532 /* Calculate temporary vectorial force */
1533 tx = _mm256_mul_pd(fscal,dx00);
1534 ty = _mm256_mul_pd(fscal,dy00);
1535 tz = _mm256_mul_pd(fscal,dz00);
1537 /* Update vectorial force */
1538 fix0 = _mm256_add_pd(fix0,tx);
1539 fiy0 = _mm256_add_pd(fiy0,ty);
1540 fiz0 = _mm256_add_pd(fiz0,tz);
1542 fjx0 = _mm256_add_pd(fjx0,tx);
1543 fjy0 = _mm256_add_pd(fjy0,ty);
1544 fjz0 = _mm256_add_pd(fjz0,tz);
1548 /**************************
1549 * CALCULATE INTERACTIONS *
1550 **************************/
1552 if (gmx_mm256_any_lt(rsq01,rcutoff2))
1555 /* REACTION-FIELD ELECTROSTATICS */
1556 felec = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
1558 cutoff_mask = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
1562 fscal = _mm256_and_pd(fscal,cutoff_mask);
1564 /* Calculate temporary vectorial force */
1565 tx = _mm256_mul_pd(fscal,dx01);
1566 ty = _mm256_mul_pd(fscal,dy01);
1567 tz = _mm256_mul_pd(fscal,dz01);
1569 /* Update vectorial force */
1570 fix0 = _mm256_add_pd(fix0,tx);
1571 fiy0 = _mm256_add_pd(fiy0,ty);
1572 fiz0 = _mm256_add_pd(fiz0,tz);
1574 fjx1 = _mm256_add_pd(fjx1,tx);
1575 fjy1 = _mm256_add_pd(fjy1,ty);
1576 fjz1 = _mm256_add_pd(fjz1,tz);
1580 /**************************
1581 * CALCULATE INTERACTIONS *
1582 **************************/
1584 if (gmx_mm256_any_lt(rsq02,rcutoff2))
1587 /* REACTION-FIELD ELECTROSTATICS */
1588 felec = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
1590 cutoff_mask = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
1594 fscal = _mm256_and_pd(fscal,cutoff_mask);
1596 /* Calculate temporary vectorial force */
1597 tx = _mm256_mul_pd(fscal,dx02);
1598 ty = _mm256_mul_pd(fscal,dy02);
1599 tz = _mm256_mul_pd(fscal,dz02);
1601 /* Update vectorial force */
1602 fix0 = _mm256_add_pd(fix0,tx);
1603 fiy0 = _mm256_add_pd(fiy0,ty);
1604 fiz0 = _mm256_add_pd(fiz0,tz);
1606 fjx2 = _mm256_add_pd(fjx2,tx);
1607 fjy2 = _mm256_add_pd(fjy2,ty);
1608 fjz2 = _mm256_add_pd(fjz2,tz);
1612 /**************************
1613 * CALCULATE INTERACTIONS *
1614 **************************/
1616 if (gmx_mm256_any_lt(rsq10,rcutoff2))
1619 /* REACTION-FIELD ELECTROSTATICS */
1620 felec = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
1622 cutoff_mask = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1626 fscal = _mm256_and_pd(fscal,cutoff_mask);
1628 /* Calculate temporary vectorial force */
1629 tx = _mm256_mul_pd(fscal,dx10);
1630 ty = _mm256_mul_pd(fscal,dy10);
1631 tz = _mm256_mul_pd(fscal,dz10);
1633 /* Update vectorial force */
1634 fix1 = _mm256_add_pd(fix1,tx);
1635 fiy1 = _mm256_add_pd(fiy1,ty);
1636 fiz1 = _mm256_add_pd(fiz1,tz);
1638 fjx0 = _mm256_add_pd(fjx0,tx);
1639 fjy0 = _mm256_add_pd(fjy0,ty);
1640 fjz0 = _mm256_add_pd(fjz0,tz);
1644 /**************************
1645 * CALCULATE INTERACTIONS *
1646 **************************/
1648 if (gmx_mm256_any_lt(rsq11,rcutoff2))
1651 /* REACTION-FIELD ELECTROSTATICS */
1652 felec = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
1654 cutoff_mask = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
1658 fscal = _mm256_and_pd(fscal,cutoff_mask);
1660 /* Calculate temporary vectorial force */
1661 tx = _mm256_mul_pd(fscal,dx11);
1662 ty = _mm256_mul_pd(fscal,dy11);
1663 tz = _mm256_mul_pd(fscal,dz11);
1665 /* Update vectorial force */
1666 fix1 = _mm256_add_pd(fix1,tx);
1667 fiy1 = _mm256_add_pd(fiy1,ty);
1668 fiz1 = _mm256_add_pd(fiz1,tz);
1670 fjx1 = _mm256_add_pd(fjx1,tx);
1671 fjy1 = _mm256_add_pd(fjy1,ty);
1672 fjz1 = _mm256_add_pd(fjz1,tz);
1676 /**************************
1677 * CALCULATE INTERACTIONS *
1678 **************************/
1680 if (gmx_mm256_any_lt(rsq12,rcutoff2))
1683 /* REACTION-FIELD ELECTROSTATICS */
1684 felec = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
1686 cutoff_mask = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
1690 fscal = _mm256_and_pd(fscal,cutoff_mask);
1692 /* Calculate temporary vectorial force */
1693 tx = _mm256_mul_pd(fscal,dx12);
1694 ty = _mm256_mul_pd(fscal,dy12);
1695 tz = _mm256_mul_pd(fscal,dz12);
1697 /* Update vectorial force */
1698 fix1 = _mm256_add_pd(fix1,tx);
1699 fiy1 = _mm256_add_pd(fiy1,ty);
1700 fiz1 = _mm256_add_pd(fiz1,tz);
1702 fjx2 = _mm256_add_pd(fjx2,tx);
1703 fjy2 = _mm256_add_pd(fjy2,ty);
1704 fjz2 = _mm256_add_pd(fjz2,tz);
1708 /**************************
1709 * CALCULATE INTERACTIONS *
1710 **************************/
1712 if (gmx_mm256_any_lt(rsq20,rcutoff2))
1715 /* REACTION-FIELD ELECTROSTATICS */
1716 felec = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
1718 cutoff_mask = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1722 fscal = _mm256_and_pd(fscal,cutoff_mask);
1724 /* Calculate temporary vectorial force */
1725 tx = _mm256_mul_pd(fscal,dx20);
1726 ty = _mm256_mul_pd(fscal,dy20);
1727 tz = _mm256_mul_pd(fscal,dz20);
1729 /* Update vectorial force */
1730 fix2 = _mm256_add_pd(fix2,tx);
1731 fiy2 = _mm256_add_pd(fiy2,ty);
1732 fiz2 = _mm256_add_pd(fiz2,tz);
1734 fjx0 = _mm256_add_pd(fjx0,tx);
1735 fjy0 = _mm256_add_pd(fjy0,ty);
1736 fjz0 = _mm256_add_pd(fjz0,tz);
1740 /**************************
1741 * CALCULATE INTERACTIONS *
1742 **************************/
1744 if (gmx_mm256_any_lt(rsq21,rcutoff2))
1747 /* REACTION-FIELD ELECTROSTATICS */
1748 felec = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
1750 cutoff_mask = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
1754 fscal = _mm256_and_pd(fscal,cutoff_mask);
1756 /* Calculate temporary vectorial force */
1757 tx = _mm256_mul_pd(fscal,dx21);
1758 ty = _mm256_mul_pd(fscal,dy21);
1759 tz = _mm256_mul_pd(fscal,dz21);
1761 /* Update vectorial force */
1762 fix2 = _mm256_add_pd(fix2,tx);
1763 fiy2 = _mm256_add_pd(fiy2,ty);
1764 fiz2 = _mm256_add_pd(fiz2,tz);
1766 fjx1 = _mm256_add_pd(fjx1,tx);
1767 fjy1 = _mm256_add_pd(fjy1,ty);
1768 fjz1 = _mm256_add_pd(fjz1,tz);
1772 /**************************
1773 * CALCULATE INTERACTIONS *
1774 **************************/
1776 if (gmx_mm256_any_lt(rsq22,rcutoff2))
1779 /* REACTION-FIELD ELECTROSTATICS */
1780 felec = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
1782 cutoff_mask = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
1786 fscal = _mm256_and_pd(fscal,cutoff_mask);
1788 /* Calculate temporary vectorial force */
1789 tx = _mm256_mul_pd(fscal,dx22);
1790 ty = _mm256_mul_pd(fscal,dy22);
1791 tz = _mm256_mul_pd(fscal,dz22);
1793 /* Update vectorial force */
1794 fix2 = _mm256_add_pd(fix2,tx);
1795 fiy2 = _mm256_add_pd(fiy2,ty);
1796 fiz2 = _mm256_add_pd(fiz2,tz);
1798 fjx2 = _mm256_add_pd(fjx2,tx);
1799 fjy2 = _mm256_add_pd(fjy2,ty);
1800 fjz2 = _mm256_add_pd(fjz2,tz);
1804 fjptrA = f+j_coord_offsetA;
1805 fjptrB = f+j_coord_offsetB;
1806 fjptrC = f+j_coord_offsetC;
1807 fjptrD = f+j_coord_offsetD;
1809 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1810 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1812 /* Inner loop uses 297 flops */
1815 if(jidx<j_index_end)
1818 /* Get j neighbor index, and coordinate index */
1819 jnrlistA = jjnr[jidx];
1820 jnrlistB = jjnr[jidx+1];
1821 jnrlistC = jjnr[jidx+2];
1822 jnrlistD = jjnr[jidx+3];
1823 /* Sign of each element will be negative for non-real atoms.
1824 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1825 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
1827 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
1829 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
1830 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
1831 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
1833 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
1834 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
1835 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
1836 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
1837 j_coord_offsetA = DIM*jnrA;
1838 j_coord_offsetB = DIM*jnrB;
1839 j_coord_offsetC = DIM*jnrC;
1840 j_coord_offsetD = DIM*jnrD;
1842 /* load j atom coordinates */
1843 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1844 x+j_coord_offsetC,x+j_coord_offsetD,
1845 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1847 /* Calculate displacement vector */
1848 dx00 = _mm256_sub_pd(ix0,jx0);
1849 dy00 = _mm256_sub_pd(iy0,jy0);
1850 dz00 = _mm256_sub_pd(iz0,jz0);
1851 dx01 = _mm256_sub_pd(ix0,jx1);
1852 dy01 = _mm256_sub_pd(iy0,jy1);
1853 dz01 = _mm256_sub_pd(iz0,jz1);
1854 dx02 = _mm256_sub_pd(ix0,jx2);
1855 dy02 = _mm256_sub_pd(iy0,jy2);
1856 dz02 = _mm256_sub_pd(iz0,jz2);
1857 dx10 = _mm256_sub_pd(ix1,jx0);
1858 dy10 = _mm256_sub_pd(iy1,jy0);
1859 dz10 = _mm256_sub_pd(iz1,jz0);
1860 dx11 = _mm256_sub_pd(ix1,jx1);
1861 dy11 = _mm256_sub_pd(iy1,jy1);
1862 dz11 = _mm256_sub_pd(iz1,jz1);
1863 dx12 = _mm256_sub_pd(ix1,jx2);
1864 dy12 = _mm256_sub_pd(iy1,jy2);
1865 dz12 = _mm256_sub_pd(iz1,jz2);
1866 dx20 = _mm256_sub_pd(ix2,jx0);
1867 dy20 = _mm256_sub_pd(iy2,jy0);
1868 dz20 = _mm256_sub_pd(iz2,jz0);
1869 dx21 = _mm256_sub_pd(ix2,jx1);
1870 dy21 = _mm256_sub_pd(iy2,jy1);
1871 dz21 = _mm256_sub_pd(iz2,jz1);
1872 dx22 = _mm256_sub_pd(ix2,jx2);
1873 dy22 = _mm256_sub_pd(iy2,jy2);
1874 dz22 = _mm256_sub_pd(iz2,jz2);
1876 /* Calculate squared distance and things based on it */
1877 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1878 rsq01 = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1879 rsq02 = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1880 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1881 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1882 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1883 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1884 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1885 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1887 rinv00 = gmx_mm256_invsqrt_pd(rsq00);
1888 rinv01 = gmx_mm256_invsqrt_pd(rsq01);
1889 rinv02 = gmx_mm256_invsqrt_pd(rsq02);
1890 rinv10 = gmx_mm256_invsqrt_pd(rsq10);
1891 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
1892 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
1893 rinv20 = gmx_mm256_invsqrt_pd(rsq20);
1894 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
1895 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
1897 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
1898 rinvsq01 = _mm256_mul_pd(rinv01,rinv01);
1899 rinvsq02 = _mm256_mul_pd(rinv02,rinv02);
1900 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
1901 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
1902 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
1903 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
1904 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
1905 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
1907 fjx0 = _mm256_setzero_pd();
1908 fjy0 = _mm256_setzero_pd();
1909 fjz0 = _mm256_setzero_pd();
1910 fjx1 = _mm256_setzero_pd();
1911 fjy1 = _mm256_setzero_pd();
1912 fjz1 = _mm256_setzero_pd();
1913 fjx2 = _mm256_setzero_pd();
1914 fjy2 = _mm256_setzero_pd();
1915 fjz2 = _mm256_setzero_pd();
1917 /**************************
1918 * CALCULATE INTERACTIONS *
1919 **************************/
1921 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1924 r00 = _mm256_mul_pd(rsq00,rinv00);
1925 r00 = _mm256_andnot_pd(dummy_mask,r00);
1927 /* Calculate table index by multiplying r with table scale and truncate to integer */
1928 rt = _mm256_mul_pd(r00,vftabscale);
1929 vfitab = _mm256_cvttpd_epi32(rt);
1930 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1931 vfitab = _mm_slli_epi32(vfitab,3);
1933 /* REACTION-FIELD ELECTROSTATICS */
1934 felec = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
1936 /* CUBIC SPLINE TABLE DISPERSION */
1937 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1938 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1939 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1940 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1941 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1942 Heps = _mm256_mul_pd(vfeps,H);
1943 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1944 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1945 fvdw6 = _mm256_mul_pd(c6_00,FF);
1947 /* CUBIC SPLINE TABLE REPULSION */
1948 vfitab = _mm_add_epi32(vfitab,ifour);
1949 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1950 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1951 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1952 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1953 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1954 Heps = _mm256_mul_pd(vfeps,H);
1955 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1956 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1957 fvdw12 = _mm256_mul_pd(c12_00,FF);
1958 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1960 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1962 fscal = _mm256_add_pd(felec,fvdw);
1964 fscal = _mm256_and_pd(fscal,cutoff_mask);
1966 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1968 /* Calculate temporary vectorial force */
1969 tx = _mm256_mul_pd(fscal,dx00);
1970 ty = _mm256_mul_pd(fscal,dy00);
1971 tz = _mm256_mul_pd(fscal,dz00);
1973 /* Update vectorial force */
1974 fix0 = _mm256_add_pd(fix0,tx);
1975 fiy0 = _mm256_add_pd(fiy0,ty);
1976 fiz0 = _mm256_add_pd(fiz0,tz);
1978 fjx0 = _mm256_add_pd(fjx0,tx);
1979 fjy0 = _mm256_add_pd(fjy0,ty);
1980 fjz0 = _mm256_add_pd(fjz0,tz);
1984 /**************************
1985 * CALCULATE INTERACTIONS *
1986 **************************/
1988 if (gmx_mm256_any_lt(rsq01,rcutoff2))
1991 /* REACTION-FIELD ELECTROSTATICS */
1992 felec = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
1994 cutoff_mask = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
1998 fscal = _mm256_and_pd(fscal,cutoff_mask);
2000 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2002 /* Calculate temporary vectorial force */
2003 tx = _mm256_mul_pd(fscal,dx01);
2004 ty = _mm256_mul_pd(fscal,dy01);
2005 tz = _mm256_mul_pd(fscal,dz01);
2007 /* Update vectorial force */
2008 fix0 = _mm256_add_pd(fix0,tx);
2009 fiy0 = _mm256_add_pd(fiy0,ty);
2010 fiz0 = _mm256_add_pd(fiz0,tz);
2012 fjx1 = _mm256_add_pd(fjx1,tx);
2013 fjy1 = _mm256_add_pd(fjy1,ty);
2014 fjz1 = _mm256_add_pd(fjz1,tz);
2018 /**************************
2019 * CALCULATE INTERACTIONS *
2020 **************************/
2022 if (gmx_mm256_any_lt(rsq02,rcutoff2))
2025 /* REACTION-FIELD ELECTROSTATICS */
2026 felec = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
2028 cutoff_mask = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
2032 fscal = _mm256_and_pd(fscal,cutoff_mask);
2034 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2036 /* Calculate temporary vectorial force */
2037 tx = _mm256_mul_pd(fscal,dx02);
2038 ty = _mm256_mul_pd(fscal,dy02);
2039 tz = _mm256_mul_pd(fscal,dz02);
2041 /* Update vectorial force */
2042 fix0 = _mm256_add_pd(fix0,tx);
2043 fiy0 = _mm256_add_pd(fiy0,ty);
2044 fiz0 = _mm256_add_pd(fiz0,tz);
2046 fjx2 = _mm256_add_pd(fjx2,tx);
2047 fjy2 = _mm256_add_pd(fjy2,ty);
2048 fjz2 = _mm256_add_pd(fjz2,tz);
2052 /**************************
2053 * CALCULATE INTERACTIONS *
2054 **************************/
2056 if (gmx_mm256_any_lt(rsq10,rcutoff2))
2059 /* REACTION-FIELD ELECTROSTATICS */
2060 felec = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
2062 cutoff_mask = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
2066 fscal = _mm256_and_pd(fscal,cutoff_mask);
2068 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2070 /* Calculate temporary vectorial force */
2071 tx = _mm256_mul_pd(fscal,dx10);
2072 ty = _mm256_mul_pd(fscal,dy10);
2073 tz = _mm256_mul_pd(fscal,dz10);
2075 /* Update vectorial force */
2076 fix1 = _mm256_add_pd(fix1,tx);
2077 fiy1 = _mm256_add_pd(fiy1,ty);
2078 fiz1 = _mm256_add_pd(fiz1,tz);
2080 fjx0 = _mm256_add_pd(fjx0,tx);
2081 fjy0 = _mm256_add_pd(fjy0,ty);
2082 fjz0 = _mm256_add_pd(fjz0,tz);
2086 /**************************
2087 * CALCULATE INTERACTIONS *
2088 **************************/
2090 if (gmx_mm256_any_lt(rsq11,rcutoff2))
2093 /* REACTION-FIELD ELECTROSTATICS */
2094 felec = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
2096 cutoff_mask = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
2100 fscal = _mm256_and_pd(fscal,cutoff_mask);
2102 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2104 /* Calculate temporary vectorial force */
2105 tx = _mm256_mul_pd(fscal,dx11);
2106 ty = _mm256_mul_pd(fscal,dy11);
2107 tz = _mm256_mul_pd(fscal,dz11);
2109 /* Update vectorial force */
2110 fix1 = _mm256_add_pd(fix1,tx);
2111 fiy1 = _mm256_add_pd(fiy1,ty);
2112 fiz1 = _mm256_add_pd(fiz1,tz);
2114 fjx1 = _mm256_add_pd(fjx1,tx);
2115 fjy1 = _mm256_add_pd(fjy1,ty);
2116 fjz1 = _mm256_add_pd(fjz1,tz);
2120 /**************************
2121 * CALCULATE INTERACTIONS *
2122 **************************/
2124 if (gmx_mm256_any_lt(rsq12,rcutoff2))
2127 /* REACTION-FIELD ELECTROSTATICS */
2128 felec = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
2130 cutoff_mask = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2134 fscal = _mm256_and_pd(fscal,cutoff_mask);
2136 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2138 /* Calculate temporary vectorial force */
2139 tx = _mm256_mul_pd(fscal,dx12);
2140 ty = _mm256_mul_pd(fscal,dy12);
2141 tz = _mm256_mul_pd(fscal,dz12);
2143 /* Update vectorial force */
2144 fix1 = _mm256_add_pd(fix1,tx);
2145 fiy1 = _mm256_add_pd(fiy1,ty);
2146 fiz1 = _mm256_add_pd(fiz1,tz);
2148 fjx2 = _mm256_add_pd(fjx2,tx);
2149 fjy2 = _mm256_add_pd(fjy2,ty);
2150 fjz2 = _mm256_add_pd(fjz2,tz);
2154 /**************************
2155 * CALCULATE INTERACTIONS *
2156 **************************/
2158 if (gmx_mm256_any_lt(rsq20,rcutoff2))
2161 /* REACTION-FIELD ELECTROSTATICS */
2162 felec = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
2164 cutoff_mask = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
2168 fscal = _mm256_and_pd(fscal,cutoff_mask);
2170 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2172 /* Calculate temporary vectorial force */
2173 tx = _mm256_mul_pd(fscal,dx20);
2174 ty = _mm256_mul_pd(fscal,dy20);
2175 tz = _mm256_mul_pd(fscal,dz20);
2177 /* Update vectorial force */
2178 fix2 = _mm256_add_pd(fix2,tx);
2179 fiy2 = _mm256_add_pd(fiy2,ty);
2180 fiz2 = _mm256_add_pd(fiz2,tz);
2182 fjx0 = _mm256_add_pd(fjx0,tx);
2183 fjy0 = _mm256_add_pd(fjy0,ty);
2184 fjz0 = _mm256_add_pd(fjz0,tz);
2188 /**************************
2189 * CALCULATE INTERACTIONS *
2190 **************************/
2192 if (gmx_mm256_any_lt(rsq21,rcutoff2))
2195 /* REACTION-FIELD ELECTROSTATICS */
2196 felec = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
2198 cutoff_mask = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
2202 fscal = _mm256_and_pd(fscal,cutoff_mask);
2204 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2206 /* Calculate temporary vectorial force */
2207 tx = _mm256_mul_pd(fscal,dx21);
2208 ty = _mm256_mul_pd(fscal,dy21);
2209 tz = _mm256_mul_pd(fscal,dz21);
2211 /* Update vectorial force */
2212 fix2 = _mm256_add_pd(fix2,tx);
2213 fiy2 = _mm256_add_pd(fiy2,ty);
2214 fiz2 = _mm256_add_pd(fiz2,tz);
2216 fjx1 = _mm256_add_pd(fjx1,tx);
2217 fjy1 = _mm256_add_pd(fjy1,ty);
2218 fjz1 = _mm256_add_pd(fjz1,tz);
2222 /**************************
2223 * CALCULATE INTERACTIONS *
2224 **************************/
2226 if (gmx_mm256_any_lt(rsq22,rcutoff2))
2229 /* REACTION-FIELD ELECTROSTATICS */
2230 felec = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
2232 cutoff_mask = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
2236 fscal = _mm256_and_pd(fscal,cutoff_mask);
2238 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2240 /* Calculate temporary vectorial force */
2241 tx = _mm256_mul_pd(fscal,dx22);
2242 ty = _mm256_mul_pd(fscal,dy22);
2243 tz = _mm256_mul_pd(fscal,dz22);
2245 /* Update vectorial force */
2246 fix2 = _mm256_add_pd(fix2,tx);
2247 fiy2 = _mm256_add_pd(fiy2,ty);
2248 fiz2 = _mm256_add_pd(fiz2,tz);
2250 fjx2 = _mm256_add_pd(fjx2,tx);
2251 fjy2 = _mm256_add_pd(fjy2,ty);
2252 fjz2 = _mm256_add_pd(fjz2,tz);
2256 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2257 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2258 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2259 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2261 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
2262 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2264 /* Inner loop uses 298 flops */
2267 /* End of innermost loop */
2269 gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
2270 f+i_coord_offset,fshift+i_shift_offset);
2272 /* Increment number of inner iterations */
2273 inneriter += j_index_end - j_index_start;
2275 /* Outer loop uses 18 flops */
2278 /* Increment number of outer iterations */
2281 /* Update outer/inner flops */
2283 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*298);