2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_double kernel generator.
42 #include "../nb_kernel.h"
43 #include "gromacs/legacyheaders/types/simple.h"
44 #include "gromacs/math/vec.h"
45 #include "gromacs/legacyheaders/nrnb.h"
47 #include "gromacs/simd/math_x86_avx_256_double.h"
48 #include "kernelutil_x86_avx_256_double.h"
51 * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwNone_GeomW4W4_VF_avx_256_double
52 * Electrostatics interaction: CubicSplineTable
53 * VdW interaction: None
54 * Geometry: Water4-Water4
55 * Calculate force/pot: PotentialAndForce
58 nb_kernel_ElecCSTab_VdwNone_GeomW4W4_VF_avx_256_double
59 (t_nblist * gmx_restrict nlist,
60 rvec * gmx_restrict xx,
61 rvec * gmx_restrict ff,
62 t_forcerec * gmx_restrict fr,
63 t_mdatoms * gmx_restrict mdatoms,
64 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65 t_nrnb * gmx_restrict nrnb)
67 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
68 * just 0 for non-waters.
69 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
70 * jnr indices corresponding to data put in the four positions in the SIMD register.
72 int i_shift_offset,i_coord_offset,outeriter,inneriter;
73 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74 int jnrA,jnrB,jnrC,jnrD;
75 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
76 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
77 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
78 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
80 real *shiftvec,*fshift,*x,*f;
81 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
83 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
84 real * vdwioffsetptr1;
85 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
86 real * vdwioffsetptr2;
87 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
88 real * vdwioffsetptr3;
89 __m256d ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
90 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
91 __m256d jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
92 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
93 __m256d jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
94 int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
95 __m256d jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
96 __m256d dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
97 __m256d dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
98 __m256d dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
99 __m256d dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
100 __m256d dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
101 __m256d dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
102 __m256d dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
103 __m256d dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
104 __m256d dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
105 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
108 __m128i ifour = _mm_set1_epi32(4);
109 __m256d rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
111 __m256d dummy_mask,cutoff_mask;
112 __m128 tmpmask0,tmpmask1;
113 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
114 __m256d one = _mm256_set1_pd(1.0);
115 __m256d two = _mm256_set1_pd(2.0);
121 jindex = nlist->jindex;
123 shiftidx = nlist->shift;
125 shiftvec = fr->shift_vec[0];
126 fshift = fr->fshift[0];
127 facel = _mm256_set1_pd(fr->epsfac);
128 charge = mdatoms->chargeA;
130 vftab = kernel_data->table_elec->data;
131 vftabscale = _mm256_set1_pd(kernel_data->table_elec->scale);
133 /* Setup water-specific parameters */
134 inr = nlist->iinr[0];
135 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
136 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
137 iq3 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
139 jq1 = _mm256_set1_pd(charge[inr+1]);
140 jq2 = _mm256_set1_pd(charge[inr+2]);
141 jq3 = _mm256_set1_pd(charge[inr+3]);
142 qq11 = _mm256_mul_pd(iq1,jq1);
143 qq12 = _mm256_mul_pd(iq1,jq2);
144 qq13 = _mm256_mul_pd(iq1,jq3);
145 qq21 = _mm256_mul_pd(iq2,jq1);
146 qq22 = _mm256_mul_pd(iq2,jq2);
147 qq23 = _mm256_mul_pd(iq2,jq3);
148 qq31 = _mm256_mul_pd(iq3,jq1);
149 qq32 = _mm256_mul_pd(iq3,jq2);
150 qq33 = _mm256_mul_pd(iq3,jq3);
152 /* Avoid stupid compiler warnings */
153 jnrA = jnrB = jnrC = jnrD = 0;
162 for(iidx=0;iidx<4*DIM;iidx++)
167 /* Start outer loop over neighborlists */
168 for(iidx=0; iidx<nri; iidx++)
170 /* Load shift vector for this list */
171 i_shift_offset = DIM*shiftidx[iidx];
173 /* Load limits for loop over neighbors */
174 j_index_start = jindex[iidx];
175 j_index_end = jindex[iidx+1];
177 /* Get outer coordinate index */
179 i_coord_offset = DIM*inr;
181 /* Load i particle coords and add shift vector */
182 gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
183 &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
185 fix1 = _mm256_setzero_pd();
186 fiy1 = _mm256_setzero_pd();
187 fiz1 = _mm256_setzero_pd();
188 fix2 = _mm256_setzero_pd();
189 fiy2 = _mm256_setzero_pd();
190 fiz2 = _mm256_setzero_pd();
191 fix3 = _mm256_setzero_pd();
192 fiy3 = _mm256_setzero_pd();
193 fiz3 = _mm256_setzero_pd();
195 /* Reset potential sums */
196 velecsum = _mm256_setzero_pd();
198 /* Start inner kernel loop */
199 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
202 /* Get j neighbor index, and coordinate index */
207 j_coord_offsetA = DIM*jnrA;
208 j_coord_offsetB = DIM*jnrB;
209 j_coord_offsetC = DIM*jnrC;
210 j_coord_offsetD = DIM*jnrD;
212 /* load j atom coordinates */
213 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
214 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
215 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
217 /* Calculate displacement vector */
218 dx11 = _mm256_sub_pd(ix1,jx1);
219 dy11 = _mm256_sub_pd(iy1,jy1);
220 dz11 = _mm256_sub_pd(iz1,jz1);
221 dx12 = _mm256_sub_pd(ix1,jx2);
222 dy12 = _mm256_sub_pd(iy1,jy2);
223 dz12 = _mm256_sub_pd(iz1,jz2);
224 dx13 = _mm256_sub_pd(ix1,jx3);
225 dy13 = _mm256_sub_pd(iy1,jy3);
226 dz13 = _mm256_sub_pd(iz1,jz3);
227 dx21 = _mm256_sub_pd(ix2,jx1);
228 dy21 = _mm256_sub_pd(iy2,jy1);
229 dz21 = _mm256_sub_pd(iz2,jz1);
230 dx22 = _mm256_sub_pd(ix2,jx2);
231 dy22 = _mm256_sub_pd(iy2,jy2);
232 dz22 = _mm256_sub_pd(iz2,jz2);
233 dx23 = _mm256_sub_pd(ix2,jx3);
234 dy23 = _mm256_sub_pd(iy2,jy3);
235 dz23 = _mm256_sub_pd(iz2,jz3);
236 dx31 = _mm256_sub_pd(ix3,jx1);
237 dy31 = _mm256_sub_pd(iy3,jy1);
238 dz31 = _mm256_sub_pd(iz3,jz1);
239 dx32 = _mm256_sub_pd(ix3,jx2);
240 dy32 = _mm256_sub_pd(iy3,jy2);
241 dz32 = _mm256_sub_pd(iz3,jz2);
242 dx33 = _mm256_sub_pd(ix3,jx3);
243 dy33 = _mm256_sub_pd(iy3,jy3);
244 dz33 = _mm256_sub_pd(iz3,jz3);
246 /* Calculate squared distance and things based on it */
247 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
248 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
249 rsq13 = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
250 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
251 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
252 rsq23 = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
253 rsq31 = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
254 rsq32 = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
255 rsq33 = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
257 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
258 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
259 rinv13 = gmx_mm256_invsqrt_pd(rsq13);
260 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
261 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
262 rinv23 = gmx_mm256_invsqrt_pd(rsq23);
263 rinv31 = gmx_mm256_invsqrt_pd(rsq31);
264 rinv32 = gmx_mm256_invsqrt_pd(rsq32);
265 rinv33 = gmx_mm256_invsqrt_pd(rsq33);
267 fjx1 = _mm256_setzero_pd();
268 fjy1 = _mm256_setzero_pd();
269 fjz1 = _mm256_setzero_pd();
270 fjx2 = _mm256_setzero_pd();
271 fjy2 = _mm256_setzero_pd();
272 fjz2 = _mm256_setzero_pd();
273 fjx3 = _mm256_setzero_pd();
274 fjy3 = _mm256_setzero_pd();
275 fjz3 = _mm256_setzero_pd();
277 /**************************
278 * CALCULATE INTERACTIONS *
279 **************************/
281 r11 = _mm256_mul_pd(rsq11,rinv11);
283 /* Calculate table index by multiplying r with table scale and truncate to integer */
284 rt = _mm256_mul_pd(r11,vftabscale);
285 vfitab = _mm256_cvttpd_epi32(rt);
286 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
287 vfitab = _mm_slli_epi32(vfitab,2);
289 /* CUBIC SPLINE TABLE ELECTROSTATICS */
290 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
291 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
292 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
293 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
294 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
295 Heps = _mm256_mul_pd(vfeps,H);
296 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
297 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
298 velec = _mm256_mul_pd(qq11,VV);
299 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
300 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
302 /* Update potential sum for this i atom from the interaction with this j atom. */
303 velecsum = _mm256_add_pd(velecsum,velec);
307 /* Calculate temporary vectorial force */
308 tx = _mm256_mul_pd(fscal,dx11);
309 ty = _mm256_mul_pd(fscal,dy11);
310 tz = _mm256_mul_pd(fscal,dz11);
312 /* Update vectorial force */
313 fix1 = _mm256_add_pd(fix1,tx);
314 fiy1 = _mm256_add_pd(fiy1,ty);
315 fiz1 = _mm256_add_pd(fiz1,tz);
317 fjx1 = _mm256_add_pd(fjx1,tx);
318 fjy1 = _mm256_add_pd(fjy1,ty);
319 fjz1 = _mm256_add_pd(fjz1,tz);
321 /**************************
322 * CALCULATE INTERACTIONS *
323 **************************/
325 r12 = _mm256_mul_pd(rsq12,rinv12);
327 /* Calculate table index by multiplying r with table scale and truncate to integer */
328 rt = _mm256_mul_pd(r12,vftabscale);
329 vfitab = _mm256_cvttpd_epi32(rt);
330 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
331 vfitab = _mm_slli_epi32(vfitab,2);
333 /* CUBIC SPLINE TABLE ELECTROSTATICS */
334 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
335 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
336 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
337 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
338 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
339 Heps = _mm256_mul_pd(vfeps,H);
340 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
341 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
342 velec = _mm256_mul_pd(qq12,VV);
343 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
344 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
346 /* Update potential sum for this i atom from the interaction with this j atom. */
347 velecsum = _mm256_add_pd(velecsum,velec);
351 /* Calculate temporary vectorial force */
352 tx = _mm256_mul_pd(fscal,dx12);
353 ty = _mm256_mul_pd(fscal,dy12);
354 tz = _mm256_mul_pd(fscal,dz12);
356 /* Update vectorial force */
357 fix1 = _mm256_add_pd(fix1,tx);
358 fiy1 = _mm256_add_pd(fiy1,ty);
359 fiz1 = _mm256_add_pd(fiz1,tz);
361 fjx2 = _mm256_add_pd(fjx2,tx);
362 fjy2 = _mm256_add_pd(fjy2,ty);
363 fjz2 = _mm256_add_pd(fjz2,tz);
365 /**************************
366 * CALCULATE INTERACTIONS *
367 **************************/
369 r13 = _mm256_mul_pd(rsq13,rinv13);
371 /* Calculate table index by multiplying r with table scale and truncate to integer */
372 rt = _mm256_mul_pd(r13,vftabscale);
373 vfitab = _mm256_cvttpd_epi32(rt);
374 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
375 vfitab = _mm_slli_epi32(vfitab,2);
377 /* CUBIC SPLINE TABLE ELECTROSTATICS */
378 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
379 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
380 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
381 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
382 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
383 Heps = _mm256_mul_pd(vfeps,H);
384 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
385 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
386 velec = _mm256_mul_pd(qq13,VV);
387 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
388 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq13,FF),_mm256_mul_pd(vftabscale,rinv13)));
390 /* Update potential sum for this i atom from the interaction with this j atom. */
391 velecsum = _mm256_add_pd(velecsum,velec);
395 /* Calculate temporary vectorial force */
396 tx = _mm256_mul_pd(fscal,dx13);
397 ty = _mm256_mul_pd(fscal,dy13);
398 tz = _mm256_mul_pd(fscal,dz13);
400 /* Update vectorial force */
401 fix1 = _mm256_add_pd(fix1,tx);
402 fiy1 = _mm256_add_pd(fiy1,ty);
403 fiz1 = _mm256_add_pd(fiz1,tz);
405 fjx3 = _mm256_add_pd(fjx3,tx);
406 fjy3 = _mm256_add_pd(fjy3,ty);
407 fjz3 = _mm256_add_pd(fjz3,tz);
409 /**************************
410 * CALCULATE INTERACTIONS *
411 **************************/
413 r21 = _mm256_mul_pd(rsq21,rinv21);
415 /* Calculate table index by multiplying r with table scale and truncate to integer */
416 rt = _mm256_mul_pd(r21,vftabscale);
417 vfitab = _mm256_cvttpd_epi32(rt);
418 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
419 vfitab = _mm_slli_epi32(vfitab,2);
421 /* CUBIC SPLINE TABLE ELECTROSTATICS */
422 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
423 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
424 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
425 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
426 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
427 Heps = _mm256_mul_pd(vfeps,H);
428 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
429 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
430 velec = _mm256_mul_pd(qq21,VV);
431 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
432 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
434 /* Update potential sum for this i atom from the interaction with this j atom. */
435 velecsum = _mm256_add_pd(velecsum,velec);
439 /* Calculate temporary vectorial force */
440 tx = _mm256_mul_pd(fscal,dx21);
441 ty = _mm256_mul_pd(fscal,dy21);
442 tz = _mm256_mul_pd(fscal,dz21);
444 /* Update vectorial force */
445 fix2 = _mm256_add_pd(fix2,tx);
446 fiy2 = _mm256_add_pd(fiy2,ty);
447 fiz2 = _mm256_add_pd(fiz2,tz);
449 fjx1 = _mm256_add_pd(fjx1,tx);
450 fjy1 = _mm256_add_pd(fjy1,ty);
451 fjz1 = _mm256_add_pd(fjz1,tz);
453 /**************************
454 * CALCULATE INTERACTIONS *
455 **************************/
457 r22 = _mm256_mul_pd(rsq22,rinv22);
459 /* Calculate table index by multiplying r with table scale and truncate to integer */
460 rt = _mm256_mul_pd(r22,vftabscale);
461 vfitab = _mm256_cvttpd_epi32(rt);
462 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
463 vfitab = _mm_slli_epi32(vfitab,2);
465 /* CUBIC SPLINE TABLE ELECTROSTATICS */
466 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
467 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
468 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
469 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
470 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
471 Heps = _mm256_mul_pd(vfeps,H);
472 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
473 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
474 velec = _mm256_mul_pd(qq22,VV);
475 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
476 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
478 /* Update potential sum for this i atom from the interaction with this j atom. */
479 velecsum = _mm256_add_pd(velecsum,velec);
483 /* Calculate temporary vectorial force */
484 tx = _mm256_mul_pd(fscal,dx22);
485 ty = _mm256_mul_pd(fscal,dy22);
486 tz = _mm256_mul_pd(fscal,dz22);
488 /* Update vectorial force */
489 fix2 = _mm256_add_pd(fix2,tx);
490 fiy2 = _mm256_add_pd(fiy2,ty);
491 fiz2 = _mm256_add_pd(fiz2,tz);
493 fjx2 = _mm256_add_pd(fjx2,tx);
494 fjy2 = _mm256_add_pd(fjy2,ty);
495 fjz2 = _mm256_add_pd(fjz2,tz);
497 /**************************
498 * CALCULATE INTERACTIONS *
499 **************************/
501 r23 = _mm256_mul_pd(rsq23,rinv23);
503 /* Calculate table index by multiplying r with table scale and truncate to integer */
504 rt = _mm256_mul_pd(r23,vftabscale);
505 vfitab = _mm256_cvttpd_epi32(rt);
506 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
507 vfitab = _mm_slli_epi32(vfitab,2);
509 /* CUBIC SPLINE TABLE ELECTROSTATICS */
510 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
511 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
512 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
513 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
514 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
515 Heps = _mm256_mul_pd(vfeps,H);
516 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
517 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
518 velec = _mm256_mul_pd(qq23,VV);
519 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
520 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq23,FF),_mm256_mul_pd(vftabscale,rinv23)));
522 /* Update potential sum for this i atom from the interaction with this j atom. */
523 velecsum = _mm256_add_pd(velecsum,velec);
527 /* Calculate temporary vectorial force */
528 tx = _mm256_mul_pd(fscal,dx23);
529 ty = _mm256_mul_pd(fscal,dy23);
530 tz = _mm256_mul_pd(fscal,dz23);
532 /* Update vectorial force */
533 fix2 = _mm256_add_pd(fix2,tx);
534 fiy2 = _mm256_add_pd(fiy2,ty);
535 fiz2 = _mm256_add_pd(fiz2,tz);
537 fjx3 = _mm256_add_pd(fjx3,tx);
538 fjy3 = _mm256_add_pd(fjy3,ty);
539 fjz3 = _mm256_add_pd(fjz3,tz);
541 /**************************
542 * CALCULATE INTERACTIONS *
543 **************************/
545 r31 = _mm256_mul_pd(rsq31,rinv31);
547 /* Calculate table index by multiplying r with table scale and truncate to integer */
548 rt = _mm256_mul_pd(r31,vftabscale);
549 vfitab = _mm256_cvttpd_epi32(rt);
550 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
551 vfitab = _mm_slli_epi32(vfitab,2);
553 /* CUBIC SPLINE TABLE ELECTROSTATICS */
554 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
555 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
556 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
557 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
558 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
559 Heps = _mm256_mul_pd(vfeps,H);
560 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
561 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
562 velec = _mm256_mul_pd(qq31,VV);
563 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
564 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq31,FF),_mm256_mul_pd(vftabscale,rinv31)));
566 /* Update potential sum for this i atom from the interaction with this j atom. */
567 velecsum = _mm256_add_pd(velecsum,velec);
571 /* Calculate temporary vectorial force */
572 tx = _mm256_mul_pd(fscal,dx31);
573 ty = _mm256_mul_pd(fscal,dy31);
574 tz = _mm256_mul_pd(fscal,dz31);
576 /* Update vectorial force */
577 fix3 = _mm256_add_pd(fix3,tx);
578 fiy3 = _mm256_add_pd(fiy3,ty);
579 fiz3 = _mm256_add_pd(fiz3,tz);
581 fjx1 = _mm256_add_pd(fjx1,tx);
582 fjy1 = _mm256_add_pd(fjy1,ty);
583 fjz1 = _mm256_add_pd(fjz1,tz);
585 /**************************
586 * CALCULATE INTERACTIONS *
587 **************************/
589 r32 = _mm256_mul_pd(rsq32,rinv32);
591 /* Calculate table index by multiplying r with table scale and truncate to integer */
592 rt = _mm256_mul_pd(r32,vftabscale);
593 vfitab = _mm256_cvttpd_epi32(rt);
594 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
595 vfitab = _mm_slli_epi32(vfitab,2);
597 /* CUBIC SPLINE TABLE ELECTROSTATICS */
598 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
599 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
600 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
601 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
602 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
603 Heps = _mm256_mul_pd(vfeps,H);
604 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
605 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
606 velec = _mm256_mul_pd(qq32,VV);
607 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
608 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq32,FF),_mm256_mul_pd(vftabscale,rinv32)));
610 /* Update potential sum for this i atom from the interaction with this j atom. */
611 velecsum = _mm256_add_pd(velecsum,velec);
615 /* Calculate temporary vectorial force */
616 tx = _mm256_mul_pd(fscal,dx32);
617 ty = _mm256_mul_pd(fscal,dy32);
618 tz = _mm256_mul_pd(fscal,dz32);
620 /* Update vectorial force */
621 fix3 = _mm256_add_pd(fix3,tx);
622 fiy3 = _mm256_add_pd(fiy3,ty);
623 fiz3 = _mm256_add_pd(fiz3,tz);
625 fjx2 = _mm256_add_pd(fjx2,tx);
626 fjy2 = _mm256_add_pd(fjy2,ty);
627 fjz2 = _mm256_add_pd(fjz2,tz);
629 /**************************
630 * CALCULATE INTERACTIONS *
631 **************************/
633 r33 = _mm256_mul_pd(rsq33,rinv33);
635 /* Calculate table index by multiplying r with table scale and truncate to integer */
636 rt = _mm256_mul_pd(r33,vftabscale);
637 vfitab = _mm256_cvttpd_epi32(rt);
638 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
639 vfitab = _mm_slli_epi32(vfitab,2);
641 /* CUBIC SPLINE TABLE ELECTROSTATICS */
642 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
643 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
644 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
645 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
646 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
647 Heps = _mm256_mul_pd(vfeps,H);
648 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
649 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
650 velec = _mm256_mul_pd(qq33,VV);
651 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
652 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq33,FF),_mm256_mul_pd(vftabscale,rinv33)));
654 /* Update potential sum for this i atom from the interaction with this j atom. */
655 velecsum = _mm256_add_pd(velecsum,velec);
659 /* Calculate temporary vectorial force */
660 tx = _mm256_mul_pd(fscal,dx33);
661 ty = _mm256_mul_pd(fscal,dy33);
662 tz = _mm256_mul_pd(fscal,dz33);
664 /* Update vectorial force */
665 fix3 = _mm256_add_pd(fix3,tx);
666 fiy3 = _mm256_add_pd(fiy3,ty);
667 fiz3 = _mm256_add_pd(fiz3,tz);
669 fjx3 = _mm256_add_pd(fjx3,tx);
670 fjy3 = _mm256_add_pd(fjy3,ty);
671 fjz3 = _mm256_add_pd(fjz3,tz);
673 fjptrA = f+j_coord_offsetA;
674 fjptrB = f+j_coord_offsetB;
675 fjptrC = f+j_coord_offsetC;
676 fjptrD = f+j_coord_offsetD;
678 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
679 fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
681 /* Inner loop uses 387 flops */
687 /* Get j neighbor index, and coordinate index */
688 jnrlistA = jjnr[jidx];
689 jnrlistB = jjnr[jidx+1];
690 jnrlistC = jjnr[jidx+2];
691 jnrlistD = jjnr[jidx+3];
692 /* Sign of each element will be negative for non-real atoms.
693 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
694 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
696 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
698 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
699 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
700 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
702 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
703 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
704 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
705 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
706 j_coord_offsetA = DIM*jnrA;
707 j_coord_offsetB = DIM*jnrB;
708 j_coord_offsetC = DIM*jnrC;
709 j_coord_offsetD = DIM*jnrD;
711 /* load j atom coordinates */
712 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
713 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
714 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
716 /* Calculate displacement vector */
717 dx11 = _mm256_sub_pd(ix1,jx1);
718 dy11 = _mm256_sub_pd(iy1,jy1);
719 dz11 = _mm256_sub_pd(iz1,jz1);
720 dx12 = _mm256_sub_pd(ix1,jx2);
721 dy12 = _mm256_sub_pd(iy1,jy2);
722 dz12 = _mm256_sub_pd(iz1,jz2);
723 dx13 = _mm256_sub_pd(ix1,jx3);
724 dy13 = _mm256_sub_pd(iy1,jy3);
725 dz13 = _mm256_sub_pd(iz1,jz3);
726 dx21 = _mm256_sub_pd(ix2,jx1);
727 dy21 = _mm256_sub_pd(iy2,jy1);
728 dz21 = _mm256_sub_pd(iz2,jz1);
729 dx22 = _mm256_sub_pd(ix2,jx2);
730 dy22 = _mm256_sub_pd(iy2,jy2);
731 dz22 = _mm256_sub_pd(iz2,jz2);
732 dx23 = _mm256_sub_pd(ix2,jx3);
733 dy23 = _mm256_sub_pd(iy2,jy3);
734 dz23 = _mm256_sub_pd(iz2,jz3);
735 dx31 = _mm256_sub_pd(ix3,jx1);
736 dy31 = _mm256_sub_pd(iy3,jy1);
737 dz31 = _mm256_sub_pd(iz3,jz1);
738 dx32 = _mm256_sub_pd(ix3,jx2);
739 dy32 = _mm256_sub_pd(iy3,jy2);
740 dz32 = _mm256_sub_pd(iz3,jz2);
741 dx33 = _mm256_sub_pd(ix3,jx3);
742 dy33 = _mm256_sub_pd(iy3,jy3);
743 dz33 = _mm256_sub_pd(iz3,jz3);
745 /* Calculate squared distance and things based on it */
746 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
747 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
748 rsq13 = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
749 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
750 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
751 rsq23 = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
752 rsq31 = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
753 rsq32 = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
754 rsq33 = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
756 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
757 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
758 rinv13 = gmx_mm256_invsqrt_pd(rsq13);
759 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
760 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
761 rinv23 = gmx_mm256_invsqrt_pd(rsq23);
762 rinv31 = gmx_mm256_invsqrt_pd(rsq31);
763 rinv32 = gmx_mm256_invsqrt_pd(rsq32);
764 rinv33 = gmx_mm256_invsqrt_pd(rsq33);
766 fjx1 = _mm256_setzero_pd();
767 fjy1 = _mm256_setzero_pd();
768 fjz1 = _mm256_setzero_pd();
769 fjx2 = _mm256_setzero_pd();
770 fjy2 = _mm256_setzero_pd();
771 fjz2 = _mm256_setzero_pd();
772 fjx3 = _mm256_setzero_pd();
773 fjy3 = _mm256_setzero_pd();
774 fjz3 = _mm256_setzero_pd();
776 /**************************
777 * CALCULATE INTERACTIONS *
778 **************************/
780 r11 = _mm256_mul_pd(rsq11,rinv11);
781 r11 = _mm256_andnot_pd(dummy_mask,r11);
783 /* Calculate table index by multiplying r with table scale and truncate to integer */
784 rt = _mm256_mul_pd(r11,vftabscale);
785 vfitab = _mm256_cvttpd_epi32(rt);
786 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
787 vfitab = _mm_slli_epi32(vfitab,2);
789 /* CUBIC SPLINE TABLE ELECTROSTATICS */
790 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
791 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
792 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
793 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
794 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
795 Heps = _mm256_mul_pd(vfeps,H);
796 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
797 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
798 velec = _mm256_mul_pd(qq11,VV);
799 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
800 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
802 /* Update potential sum for this i atom from the interaction with this j atom. */
803 velec = _mm256_andnot_pd(dummy_mask,velec);
804 velecsum = _mm256_add_pd(velecsum,velec);
808 fscal = _mm256_andnot_pd(dummy_mask,fscal);
810 /* Calculate temporary vectorial force */
811 tx = _mm256_mul_pd(fscal,dx11);
812 ty = _mm256_mul_pd(fscal,dy11);
813 tz = _mm256_mul_pd(fscal,dz11);
815 /* Update vectorial force */
816 fix1 = _mm256_add_pd(fix1,tx);
817 fiy1 = _mm256_add_pd(fiy1,ty);
818 fiz1 = _mm256_add_pd(fiz1,tz);
820 fjx1 = _mm256_add_pd(fjx1,tx);
821 fjy1 = _mm256_add_pd(fjy1,ty);
822 fjz1 = _mm256_add_pd(fjz1,tz);
824 /**************************
825 * CALCULATE INTERACTIONS *
826 **************************/
828 r12 = _mm256_mul_pd(rsq12,rinv12);
829 r12 = _mm256_andnot_pd(dummy_mask,r12);
831 /* Calculate table index by multiplying r with table scale and truncate to integer */
832 rt = _mm256_mul_pd(r12,vftabscale);
833 vfitab = _mm256_cvttpd_epi32(rt);
834 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
835 vfitab = _mm_slli_epi32(vfitab,2);
837 /* CUBIC SPLINE TABLE ELECTROSTATICS */
838 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
839 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
840 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
841 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
842 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
843 Heps = _mm256_mul_pd(vfeps,H);
844 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
845 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
846 velec = _mm256_mul_pd(qq12,VV);
847 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
848 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
850 /* Update potential sum for this i atom from the interaction with this j atom. */
851 velec = _mm256_andnot_pd(dummy_mask,velec);
852 velecsum = _mm256_add_pd(velecsum,velec);
856 fscal = _mm256_andnot_pd(dummy_mask,fscal);
858 /* Calculate temporary vectorial force */
859 tx = _mm256_mul_pd(fscal,dx12);
860 ty = _mm256_mul_pd(fscal,dy12);
861 tz = _mm256_mul_pd(fscal,dz12);
863 /* Update vectorial force */
864 fix1 = _mm256_add_pd(fix1,tx);
865 fiy1 = _mm256_add_pd(fiy1,ty);
866 fiz1 = _mm256_add_pd(fiz1,tz);
868 fjx2 = _mm256_add_pd(fjx2,tx);
869 fjy2 = _mm256_add_pd(fjy2,ty);
870 fjz2 = _mm256_add_pd(fjz2,tz);
872 /**************************
873 * CALCULATE INTERACTIONS *
874 **************************/
876 r13 = _mm256_mul_pd(rsq13,rinv13);
877 r13 = _mm256_andnot_pd(dummy_mask,r13);
879 /* Calculate table index by multiplying r with table scale and truncate to integer */
880 rt = _mm256_mul_pd(r13,vftabscale);
881 vfitab = _mm256_cvttpd_epi32(rt);
882 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
883 vfitab = _mm_slli_epi32(vfitab,2);
885 /* CUBIC SPLINE TABLE ELECTROSTATICS */
886 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
887 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
888 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
889 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
890 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
891 Heps = _mm256_mul_pd(vfeps,H);
892 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
893 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
894 velec = _mm256_mul_pd(qq13,VV);
895 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
896 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq13,FF),_mm256_mul_pd(vftabscale,rinv13)));
898 /* Update potential sum for this i atom from the interaction with this j atom. */
899 velec = _mm256_andnot_pd(dummy_mask,velec);
900 velecsum = _mm256_add_pd(velecsum,velec);
904 fscal = _mm256_andnot_pd(dummy_mask,fscal);
906 /* Calculate temporary vectorial force */
907 tx = _mm256_mul_pd(fscal,dx13);
908 ty = _mm256_mul_pd(fscal,dy13);
909 tz = _mm256_mul_pd(fscal,dz13);
911 /* Update vectorial force */
912 fix1 = _mm256_add_pd(fix1,tx);
913 fiy1 = _mm256_add_pd(fiy1,ty);
914 fiz1 = _mm256_add_pd(fiz1,tz);
916 fjx3 = _mm256_add_pd(fjx3,tx);
917 fjy3 = _mm256_add_pd(fjy3,ty);
918 fjz3 = _mm256_add_pd(fjz3,tz);
920 /**************************
921 * CALCULATE INTERACTIONS *
922 **************************/
924 r21 = _mm256_mul_pd(rsq21,rinv21);
925 r21 = _mm256_andnot_pd(dummy_mask,r21);
927 /* Calculate table index by multiplying r with table scale and truncate to integer */
928 rt = _mm256_mul_pd(r21,vftabscale);
929 vfitab = _mm256_cvttpd_epi32(rt);
930 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
931 vfitab = _mm_slli_epi32(vfitab,2);
933 /* CUBIC SPLINE TABLE ELECTROSTATICS */
934 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
935 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
936 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
937 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
938 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
939 Heps = _mm256_mul_pd(vfeps,H);
940 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
941 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
942 velec = _mm256_mul_pd(qq21,VV);
943 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
944 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
946 /* Update potential sum for this i atom from the interaction with this j atom. */
947 velec = _mm256_andnot_pd(dummy_mask,velec);
948 velecsum = _mm256_add_pd(velecsum,velec);
952 fscal = _mm256_andnot_pd(dummy_mask,fscal);
954 /* Calculate temporary vectorial force */
955 tx = _mm256_mul_pd(fscal,dx21);
956 ty = _mm256_mul_pd(fscal,dy21);
957 tz = _mm256_mul_pd(fscal,dz21);
959 /* Update vectorial force */
960 fix2 = _mm256_add_pd(fix2,tx);
961 fiy2 = _mm256_add_pd(fiy2,ty);
962 fiz2 = _mm256_add_pd(fiz2,tz);
964 fjx1 = _mm256_add_pd(fjx1,tx);
965 fjy1 = _mm256_add_pd(fjy1,ty);
966 fjz1 = _mm256_add_pd(fjz1,tz);
968 /**************************
969 * CALCULATE INTERACTIONS *
970 **************************/
972 r22 = _mm256_mul_pd(rsq22,rinv22);
973 r22 = _mm256_andnot_pd(dummy_mask,r22);
975 /* Calculate table index by multiplying r with table scale and truncate to integer */
976 rt = _mm256_mul_pd(r22,vftabscale);
977 vfitab = _mm256_cvttpd_epi32(rt);
978 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
979 vfitab = _mm_slli_epi32(vfitab,2);
981 /* CUBIC SPLINE TABLE ELECTROSTATICS */
982 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
983 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
984 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
985 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
986 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
987 Heps = _mm256_mul_pd(vfeps,H);
988 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
989 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
990 velec = _mm256_mul_pd(qq22,VV);
991 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
992 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
994 /* Update potential sum for this i atom from the interaction with this j atom. */
995 velec = _mm256_andnot_pd(dummy_mask,velec);
996 velecsum = _mm256_add_pd(velecsum,velec);
1000 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1002 /* Calculate temporary vectorial force */
1003 tx = _mm256_mul_pd(fscal,dx22);
1004 ty = _mm256_mul_pd(fscal,dy22);
1005 tz = _mm256_mul_pd(fscal,dz22);
1007 /* Update vectorial force */
1008 fix2 = _mm256_add_pd(fix2,tx);
1009 fiy2 = _mm256_add_pd(fiy2,ty);
1010 fiz2 = _mm256_add_pd(fiz2,tz);
1012 fjx2 = _mm256_add_pd(fjx2,tx);
1013 fjy2 = _mm256_add_pd(fjy2,ty);
1014 fjz2 = _mm256_add_pd(fjz2,tz);
1016 /**************************
1017 * CALCULATE INTERACTIONS *
1018 **************************/
1020 r23 = _mm256_mul_pd(rsq23,rinv23);
1021 r23 = _mm256_andnot_pd(dummy_mask,r23);
1023 /* Calculate table index by multiplying r with table scale and truncate to integer */
1024 rt = _mm256_mul_pd(r23,vftabscale);
1025 vfitab = _mm256_cvttpd_epi32(rt);
1026 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1027 vfitab = _mm_slli_epi32(vfitab,2);
1029 /* CUBIC SPLINE TABLE ELECTROSTATICS */
1030 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1031 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1032 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1033 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1034 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1035 Heps = _mm256_mul_pd(vfeps,H);
1036 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1037 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
1038 velec = _mm256_mul_pd(qq23,VV);
1039 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1040 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq23,FF),_mm256_mul_pd(vftabscale,rinv23)));
1042 /* Update potential sum for this i atom from the interaction with this j atom. */
1043 velec = _mm256_andnot_pd(dummy_mask,velec);
1044 velecsum = _mm256_add_pd(velecsum,velec);
1048 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1050 /* Calculate temporary vectorial force */
1051 tx = _mm256_mul_pd(fscal,dx23);
1052 ty = _mm256_mul_pd(fscal,dy23);
1053 tz = _mm256_mul_pd(fscal,dz23);
1055 /* Update vectorial force */
1056 fix2 = _mm256_add_pd(fix2,tx);
1057 fiy2 = _mm256_add_pd(fiy2,ty);
1058 fiz2 = _mm256_add_pd(fiz2,tz);
1060 fjx3 = _mm256_add_pd(fjx3,tx);
1061 fjy3 = _mm256_add_pd(fjy3,ty);
1062 fjz3 = _mm256_add_pd(fjz3,tz);
1064 /**************************
1065 * CALCULATE INTERACTIONS *
1066 **************************/
1068 r31 = _mm256_mul_pd(rsq31,rinv31);
1069 r31 = _mm256_andnot_pd(dummy_mask,r31);
1071 /* Calculate table index by multiplying r with table scale and truncate to integer */
1072 rt = _mm256_mul_pd(r31,vftabscale);
1073 vfitab = _mm256_cvttpd_epi32(rt);
1074 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1075 vfitab = _mm_slli_epi32(vfitab,2);
1077 /* CUBIC SPLINE TABLE ELECTROSTATICS */
1078 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1079 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1080 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1081 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1082 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1083 Heps = _mm256_mul_pd(vfeps,H);
1084 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1085 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
1086 velec = _mm256_mul_pd(qq31,VV);
1087 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1088 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq31,FF),_mm256_mul_pd(vftabscale,rinv31)));
1090 /* Update potential sum for this i atom from the interaction with this j atom. */
1091 velec = _mm256_andnot_pd(dummy_mask,velec);
1092 velecsum = _mm256_add_pd(velecsum,velec);
1096 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1098 /* Calculate temporary vectorial force */
1099 tx = _mm256_mul_pd(fscal,dx31);
1100 ty = _mm256_mul_pd(fscal,dy31);
1101 tz = _mm256_mul_pd(fscal,dz31);
1103 /* Update vectorial force */
1104 fix3 = _mm256_add_pd(fix3,tx);
1105 fiy3 = _mm256_add_pd(fiy3,ty);
1106 fiz3 = _mm256_add_pd(fiz3,tz);
1108 fjx1 = _mm256_add_pd(fjx1,tx);
1109 fjy1 = _mm256_add_pd(fjy1,ty);
1110 fjz1 = _mm256_add_pd(fjz1,tz);
1112 /**************************
1113 * CALCULATE INTERACTIONS *
1114 **************************/
1116 r32 = _mm256_mul_pd(rsq32,rinv32);
1117 r32 = _mm256_andnot_pd(dummy_mask,r32);
1119 /* Calculate table index by multiplying r with table scale and truncate to integer */
1120 rt = _mm256_mul_pd(r32,vftabscale);
1121 vfitab = _mm256_cvttpd_epi32(rt);
1122 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1123 vfitab = _mm_slli_epi32(vfitab,2);
1125 /* CUBIC SPLINE TABLE ELECTROSTATICS */
1126 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1127 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1128 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1129 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1130 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1131 Heps = _mm256_mul_pd(vfeps,H);
1132 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1133 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
1134 velec = _mm256_mul_pd(qq32,VV);
1135 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1136 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq32,FF),_mm256_mul_pd(vftabscale,rinv32)));
1138 /* Update potential sum for this i atom from the interaction with this j atom. */
1139 velec = _mm256_andnot_pd(dummy_mask,velec);
1140 velecsum = _mm256_add_pd(velecsum,velec);
1144 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1146 /* Calculate temporary vectorial force */
1147 tx = _mm256_mul_pd(fscal,dx32);
1148 ty = _mm256_mul_pd(fscal,dy32);
1149 tz = _mm256_mul_pd(fscal,dz32);
1151 /* Update vectorial force */
1152 fix3 = _mm256_add_pd(fix3,tx);
1153 fiy3 = _mm256_add_pd(fiy3,ty);
1154 fiz3 = _mm256_add_pd(fiz3,tz);
1156 fjx2 = _mm256_add_pd(fjx2,tx);
1157 fjy2 = _mm256_add_pd(fjy2,ty);
1158 fjz2 = _mm256_add_pd(fjz2,tz);
1160 /**************************
1161 * CALCULATE INTERACTIONS *
1162 **************************/
1164 r33 = _mm256_mul_pd(rsq33,rinv33);
1165 r33 = _mm256_andnot_pd(dummy_mask,r33);
1167 /* Calculate table index by multiplying r with table scale and truncate to integer */
1168 rt = _mm256_mul_pd(r33,vftabscale);
1169 vfitab = _mm256_cvttpd_epi32(rt);
1170 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1171 vfitab = _mm_slli_epi32(vfitab,2);
1173 /* CUBIC SPLINE TABLE ELECTROSTATICS */
1174 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1175 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1176 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1177 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1178 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1179 Heps = _mm256_mul_pd(vfeps,H);
1180 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1181 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
1182 velec = _mm256_mul_pd(qq33,VV);
1183 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1184 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq33,FF),_mm256_mul_pd(vftabscale,rinv33)));
1186 /* Update potential sum for this i atom from the interaction with this j atom. */
1187 velec = _mm256_andnot_pd(dummy_mask,velec);
1188 velecsum = _mm256_add_pd(velecsum,velec);
1192 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1194 /* Calculate temporary vectorial force */
1195 tx = _mm256_mul_pd(fscal,dx33);
1196 ty = _mm256_mul_pd(fscal,dy33);
1197 tz = _mm256_mul_pd(fscal,dz33);
1199 /* Update vectorial force */
1200 fix3 = _mm256_add_pd(fix3,tx);
1201 fiy3 = _mm256_add_pd(fiy3,ty);
1202 fiz3 = _mm256_add_pd(fiz3,tz);
1204 fjx3 = _mm256_add_pd(fjx3,tx);
1205 fjy3 = _mm256_add_pd(fjy3,ty);
1206 fjz3 = _mm256_add_pd(fjz3,tz);
1208 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1209 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1210 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1211 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1213 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
1214 fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1216 /* Inner loop uses 396 flops */
1219 /* End of innermost loop */
1221 gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1222 f+i_coord_offset+DIM,fshift+i_shift_offset);
1225 /* Update potential energies */
1226 gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1228 /* Increment number of inner iterations */
1229 inneriter += j_index_end - j_index_start;
1231 /* Outer loop uses 19 flops */
1234 /* Increment number of outer iterations */
1237 /* Update outer/inner flops */
1239 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*396);
1242 * Gromacs nonbonded kernel: nb_kernel_ElecCSTab_VdwNone_GeomW4W4_F_avx_256_double
1243 * Electrostatics interaction: CubicSplineTable
1244 * VdW interaction: None
1245 * Geometry: Water4-Water4
1246 * Calculate force/pot: Force
1249 nb_kernel_ElecCSTab_VdwNone_GeomW4W4_F_avx_256_double
1250 (t_nblist * gmx_restrict nlist,
1251 rvec * gmx_restrict xx,
1252 rvec * gmx_restrict ff,
1253 t_forcerec * gmx_restrict fr,
1254 t_mdatoms * gmx_restrict mdatoms,
1255 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1256 t_nrnb * gmx_restrict nrnb)
1258 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
1259 * just 0 for non-waters.
1260 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1261 * jnr indices corresponding to data put in the four positions in the SIMD register.
1263 int i_shift_offset,i_coord_offset,outeriter,inneriter;
1264 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1265 int jnrA,jnrB,jnrC,jnrD;
1266 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1267 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1268 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1269 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
1270 real rcutoff_scalar;
1271 real *shiftvec,*fshift,*x,*f;
1272 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1273 real scratch[4*DIM];
1274 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1275 real * vdwioffsetptr1;
1276 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1277 real * vdwioffsetptr2;
1278 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1279 real * vdwioffsetptr3;
1280 __m256d ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1281 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1282 __m256d jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1283 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1284 __m256d jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1285 int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
1286 __m256d jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
1287 __m256d dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1288 __m256d dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1289 __m256d dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
1290 __m256d dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1291 __m256d dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1292 __m256d dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
1293 __m256d dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
1294 __m256d dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
1295 __m256d dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
1296 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
1299 __m128i ifour = _mm_set1_epi32(4);
1300 __m256d rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
1302 __m256d dummy_mask,cutoff_mask;
1303 __m128 tmpmask0,tmpmask1;
1304 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1305 __m256d one = _mm256_set1_pd(1.0);
1306 __m256d two = _mm256_set1_pd(2.0);
1312 jindex = nlist->jindex;
1314 shiftidx = nlist->shift;
1316 shiftvec = fr->shift_vec[0];
1317 fshift = fr->fshift[0];
1318 facel = _mm256_set1_pd(fr->epsfac);
1319 charge = mdatoms->chargeA;
1321 vftab = kernel_data->table_elec->data;
1322 vftabscale = _mm256_set1_pd(kernel_data->table_elec->scale);
1324 /* Setup water-specific parameters */
1325 inr = nlist->iinr[0];
1326 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1327 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1328 iq3 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
1330 jq1 = _mm256_set1_pd(charge[inr+1]);
1331 jq2 = _mm256_set1_pd(charge[inr+2]);
1332 jq3 = _mm256_set1_pd(charge[inr+3]);
1333 qq11 = _mm256_mul_pd(iq1,jq1);
1334 qq12 = _mm256_mul_pd(iq1,jq2);
1335 qq13 = _mm256_mul_pd(iq1,jq3);
1336 qq21 = _mm256_mul_pd(iq2,jq1);
1337 qq22 = _mm256_mul_pd(iq2,jq2);
1338 qq23 = _mm256_mul_pd(iq2,jq3);
1339 qq31 = _mm256_mul_pd(iq3,jq1);
1340 qq32 = _mm256_mul_pd(iq3,jq2);
1341 qq33 = _mm256_mul_pd(iq3,jq3);
1343 /* Avoid stupid compiler warnings */
1344 jnrA = jnrB = jnrC = jnrD = 0;
1345 j_coord_offsetA = 0;
1346 j_coord_offsetB = 0;
1347 j_coord_offsetC = 0;
1348 j_coord_offsetD = 0;
1353 for(iidx=0;iidx<4*DIM;iidx++)
1355 scratch[iidx] = 0.0;
1358 /* Start outer loop over neighborlists */
1359 for(iidx=0; iidx<nri; iidx++)
1361 /* Load shift vector for this list */
1362 i_shift_offset = DIM*shiftidx[iidx];
1364 /* Load limits for loop over neighbors */
1365 j_index_start = jindex[iidx];
1366 j_index_end = jindex[iidx+1];
1368 /* Get outer coordinate index */
1370 i_coord_offset = DIM*inr;
1372 /* Load i particle coords and add shift vector */
1373 gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
1374 &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1376 fix1 = _mm256_setzero_pd();
1377 fiy1 = _mm256_setzero_pd();
1378 fiz1 = _mm256_setzero_pd();
1379 fix2 = _mm256_setzero_pd();
1380 fiy2 = _mm256_setzero_pd();
1381 fiz2 = _mm256_setzero_pd();
1382 fix3 = _mm256_setzero_pd();
1383 fiy3 = _mm256_setzero_pd();
1384 fiz3 = _mm256_setzero_pd();
1386 /* Start inner kernel loop */
1387 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1390 /* Get j neighbor index, and coordinate index */
1392 jnrB = jjnr[jidx+1];
1393 jnrC = jjnr[jidx+2];
1394 jnrD = jjnr[jidx+3];
1395 j_coord_offsetA = DIM*jnrA;
1396 j_coord_offsetB = DIM*jnrB;
1397 j_coord_offsetC = DIM*jnrC;
1398 j_coord_offsetD = DIM*jnrD;
1400 /* load j atom coordinates */
1401 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
1402 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
1403 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
1405 /* Calculate displacement vector */
1406 dx11 = _mm256_sub_pd(ix1,jx1);
1407 dy11 = _mm256_sub_pd(iy1,jy1);
1408 dz11 = _mm256_sub_pd(iz1,jz1);
1409 dx12 = _mm256_sub_pd(ix1,jx2);
1410 dy12 = _mm256_sub_pd(iy1,jy2);
1411 dz12 = _mm256_sub_pd(iz1,jz2);
1412 dx13 = _mm256_sub_pd(ix1,jx3);
1413 dy13 = _mm256_sub_pd(iy1,jy3);
1414 dz13 = _mm256_sub_pd(iz1,jz3);
1415 dx21 = _mm256_sub_pd(ix2,jx1);
1416 dy21 = _mm256_sub_pd(iy2,jy1);
1417 dz21 = _mm256_sub_pd(iz2,jz1);
1418 dx22 = _mm256_sub_pd(ix2,jx2);
1419 dy22 = _mm256_sub_pd(iy2,jy2);
1420 dz22 = _mm256_sub_pd(iz2,jz2);
1421 dx23 = _mm256_sub_pd(ix2,jx3);
1422 dy23 = _mm256_sub_pd(iy2,jy3);
1423 dz23 = _mm256_sub_pd(iz2,jz3);
1424 dx31 = _mm256_sub_pd(ix3,jx1);
1425 dy31 = _mm256_sub_pd(iy3,jy1);
1426 dz31 = _mm256_sub_pd(iz3,jz1);
1427 dx32 = _mm256_sub_pd(ix3,jx2);
1428 dy32 = _mm256_sub_pd(iy3,jy2);
1429 dz32 = _mm256_sub_pd(iz3,jz2);
1430 dx33 = _mm256_sub_pd(ix3,jx3);
1431 dy33 = _mm256_sub_pd(iy3,jy3);
1432 dz33 = _mm256_sub_pd(iz3,jz3);
1434 /* Calculate squared distance and things based on it */
1435 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1436 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1437 rsq13 = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
1438 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1439 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1440 rsq23 = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
1441 rsq31 = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
1442 rsq32 = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
1443 rsq33 = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
1445 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
1446 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
1447 rinv13 = gmx_mm256_invsqrt_pd(rsq13);
1448 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
1449 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
1450 rinv23 = gmx_mm256_invsqrt_pd(rsq23);
1451 rinv31 = gmx_mm256_invsqrt_pd(rsq31);
1452 rinv32 = gmx_mm256_invsqrt_pd(rsq32);
1453 rinv33 = gmx_mm256_invsqrt_pd(rsq33);
1455 fjx1 = _mm256_setzero_pd();
1456 fjy1 = _mm256_setzero_pd();
1457 fjz1 = _mm256_setzero_pd();
1458 fjx2 = _mm256_setzero_pd();
1459 fjy2 = _mm256_setzero_pd();
1460 fjz2 = _mm256_setzero_pd();
1461 fjx3 = _mm256_setzero_pd();
1462 fjy3 = _mm256_setzero_pd();
1463 fjz3 = _mm256_setzero_pd();
1465 /**************************
1466 * CALCULATE INTERACTIONS *
1467 **************************/
1469 r11 = _mm256_mul_pd(rsq11,rinv11);
1471 /* Calculate table index by multiplying r with table scale and truncate to integer */
1472 rt = _mm256_mul_pd(r11,vftabscale);
1473 vfitab = _mm256_cvttpd_epi32(rt);
1474 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1475 vfitab = _mm_slli_epi32(vfitab,2);
1477 /* CUBIC SPLINE TABLE ELECTROSTATICS */
1478 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1479 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1480 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1481 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1482 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1483 Heps = _mm256_mul_pd(vfeps,H);
1484 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1485 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1486 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
1490 /* Calculate temporary vectorial force */
1491 tx = _mm256_mul_pd(fscal,dx11);
1492 ty = _mm256_mul_pd(fscal,dy11);
1493 tz = _mm256_mul_pd(fscal,dz11);
1495 /* Update vectorial force */
1496 fix1 = _mm256_add_pd(fix1,tx);
1497 fiy1 = _mm256_add_pd(fiy1,ty);
1498 fiz1 = _mm256_add_pd(fiz1,tz);
1500 fjx1 = _mm256_add_pd(fjx1,tx);
1501 fjy1 = _mm256_add_pd(fjy1,ty);
1502 fjz1 = _mm256_add_pd(fjz1,tz);
1504 /**************************
1505 * CALCULATE INTERACTIONS *
1506 **************************/
1508 r12 = _mm256_mul_pd(rsq12,rinv12);
1510 /* Calculate table index by multiplying r with table scale and truncate to integer */
1511 rt = _mm256_mul_pd(r12,vftabscale);
1512 vfitab = _mm256_cvttpd_epi32(rt);
1513 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1514 vfitab = _mm_slli_epi32(vfitab,2);
1516 /* CUBIC SPLINE TABLE ELECTROSTATICS */
1517 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1518 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1519 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1520 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1521 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1522 Heps = _mm256_mul_pd(vfeps,H);
1523 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1524 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1525 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
1529 /* Calculate temporary vectorial force */
1530 tx = _mm256_mul_pd(fscal,dx12);
1531 ty = _mm256_mul_pd(fscal,dy12);
1532 tz = _mm256_mul_pd(fscal,dz12);
1534 /* Update vectorial force */
1535 fix1 = _mm256_add_pd(fix1,tx);
1536 fiy1 = _mm256_add_pd(fiy1,ty);
1537 fiz1 = _mm256_add_pd(fiz1,tz);
1539 fjx2 = _mm256_add_pd(fjx2,tx);
1540 fjy2 = _mm256_add_pd(fjy2,ty);
1541 fjz2 = _mm256_add_pd(fjz2,tz);
1543 /**************************
1544 * CALCULATE INTERACTIONS *
1545 **************************/
1547 r13 = _mm256_mul_pd(rsq13,rinv13);
1549 /* Calculate table index by multiplying r with table scale and truncate to integer */
1550 rt = _mm256_mul_pd(r13,vftabscale);
1551 vfitab = _mm256_cvttpd_epi32(rt);
1552 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1553 vfitab = _mm_slli_epi32(vfitab,2);
1555 /* CUBIC SPLINE TABLE ELECTROSTATICS */
1556 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1557 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1558 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1559 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1560 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1561 Heps = _mm256_mul_pd(vfeps,H);
1562 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1563 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1564 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq13,FF),_mm256_mul_pd(vftabscale,rinv13)));
1568 /* Calculate temporary vectorial force */
1569 tx = _mm256_mul_pd(fscal,dx13);
1570 ty = _mm256_mul_pd(fscal,dy13);
1571 tz = _mm256_mul_pd(fscal,dz13);
1573 /* Update vectorial force */
1574 fix1 = _mm256_add_pd(fix1,tx);
1575 fiy1 = _mm256_add_pd(fiy1,ty);
1576 fiz1 = _mm256_add_pd(fiz1,tz);
1578 fjx3 = _mm256_add_pd(fjx3,tx);
1579 fjy3 = _mm256_add_pd(fjy3,ty);
1580 fjz3 = _mm256_add_pd(fjz3,tz);
1582 /**************************
1583 * CALCULATE INTERACTIONS *
1584 **************************/
1586 r21 = _mm256_mul_pd(rsq21,rinv21);
1588 /* Calculate table index by multiplying r with table scale and truncate to integer */
1589 rt = _mm256_mul_pd(r21,vftabscale);
1590 vfitab = _mm256_cvttpd_epi32(rt);
1591 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1592 vfitab = _mm_slli_epi32(vfitab,2);
1594 /* CUBIC SPLINE TABLE ELECTROSTATICS */
1595 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1596 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1597 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1598 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1599 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1600 Heps = _mm256_mul_pd(vfeps,H);
1601 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1602 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1603 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
1607 /* Calculate temporary vectorial force */
1608 tx = _mm256_mul_pd(fscal,dx21);
1609 ty = _mm256_mul_pd(fscal,dy21);
1610 tz = _mm256_mul_pd(fscal,dz21);
1612 /* Update vectorial force */
1613 fix2 = _mm256_add_pd(fix2,tx);
1614 fiy2 = _mm256_add_pd(fiy2,ty);
1615 fiz2 = _mm256_add_pd(fiz2,tz);
1617 fjx1 = _mm256_add_pd(fjx1,tx);
1618 fjy1 = _mm256_add_pd(fjy1,ty);
1619 fjz1 = _mm256_add_pd(fjz1,tz);
1621 /**************************
1622 * CALCULATE INTERACTIONS *
1623 **************************/
1625 r22 = _mm256_mul_pd(rsq22,rinv22);
1627 /* Calculate table index by multiplying r with table scale and truncate to integer */
1628 rt = _mm256_mul_pd(r22,vftabscale);
1629 vfitab = _mm256_cvttpd_epi32(rt);
1630 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1631 vfitab = _mm_slli_epi32(vfitab,2);
1633 /* CUBIC SPLINE TABLE ELECTROSTATICS */
1634 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1635 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1636 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1637 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1638 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1639 Heps = _mm256_mul_pd(vfeps,H);
1640 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1641 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1642 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
1646 /* Calculate temporary vectorial force */
1647 tx = _mm256_mul_pd(fscal,dx22);
1648 ty = _mm256_mul_pd(fscal,dy22);
1649 tz = _mm256_mul_pd(fscal,dz22);
1651 /* Update vectorial force */
1652 fix2 = _mm256_add_pd(fix2,tx);
1653 fiy2 = _mm256_add_pd(fiy2,ty);
1654 fiz2 = _mm256_add_pd(fiz2,tz);
1656 fjx2 = _mm256_add_pd(fjx2,tx);
1657 fjy2 = _mm256_add_pd(fjy2,ty);
1658 fjz2 = _mm256_add_pd(fjz2,tz);
1660 /**************************
1661 * CALCULATE INTERACTIONS *
1662 **************************/
1664 r23 = _mm256_mul_pd(rsq23,rinv23);
1666 /* Calculate table index by multiplying r with table scale and truncate to integer */
1667 rt = _mm256_mul_pd(r23,vftabscale);
1668 vfitab = _mm256_cvttpd_epi32(rt);
1669 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1670 vfitab = _mm_slli_epi32(vfitab,2);
1672 /* CUBIC SPLINE TABLE ELECTROSTATICS */
1673 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1674 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1675 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1676 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1677 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1678 Heps = _mm256_mul_pd(vfeps,H);
1679 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1680 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1681 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq23,FF),_mm256_mul_pd(vftabscale,rinv23)));
1685 /* Calculate temporary vectorial force */
1686 tx = _mm256_mul_pd(fscal,dx23);
1687 ty = _mm256_mul_pd(fscal,dy23);
1688 tz = _mm256_mul_pd(fscal,dz23);
1690 /* Update vectorial force */
1691 fix2 = _mm256_add_pd(fix2,tx);
1692 fiy2 = _mm256_add_pd(fiy2,ty);
1693 fiz2 = _mm256_add_pd(fiz2,tz);
1695 fjx3 = _mm256_add_pd(fjx3,tx);
1696 fjy3 = _mm256_add_pd(fjy3,ty);
1697 fjz3 = _mm256_add_pd(fjz3,tz);
1699 /**************************
1700 * CALCULATE INTERACTIONS *
1701 **************************/
1703 r31 = _mm256_mul_pd(rsq31,rinv31);
1705 /* Calculate table index by multiplying r with table scale and truncate to integer */
1706 rt = _mm256_mul_pd(r31,vftabscale);
1707 vfitab = _mm256_cvttpd_epi32(rt);
1708 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1709 vfitab = _mm_slli_epi32(vfitab,2);
1711 /* CUBIC SPLINE TABLE ELECTROSTATICS */
1712 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1713 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1714 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1715 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1716 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1717 Heps = _mm256_mul_pd(vfeps,H);
1718 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1719 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1720 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq31,FF),_mm256_mul_pd(vftabscale,rinv31)));
1724 /* Calculate temporary vectorial force */
1725 tx = _mm256_mul_pd(fscal,dx31);
1726 ty = _mm256_mul_pd(fscal,dy31);
1727 tz = _mm256_mul_pd(fscal,dz31);
1729 /* Update vectorial force */
1730 fix3 = _mm256_add_pd(fix3,tx);
1731 fiy3 = _mm256_add_pd(fiy3,ty);
1732 fiz3 = _mm256_add_pd(fiz3,tz);
1734 fjx1 = _mm256_add_pd(fjx1,tx);
1735 fjy1 = _mm256_add_pd(fjy1,ty);
1736 fjz1 = _mm256_add_pd(fjz1,tz);
1738 /**************************
1739 * CALCULATE INTERACTIONS *
1740 **************************/
1742 r32 = _mm256_mul_pd(rsq32,rinv32);
1744 /* Calculate table index by multiplying r with table scale and truncate to integer */
1745 rt = _mm256_mul_pd(r32,vftabscale);
1746 vfitab = _mm256_cvttpd_epi32(rt);
1747 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1748 vfitab = _mm_slli_epi32(vfitab,2);
1750 /* CUBIC SPLINE TABLE ELECTROSTATICS */
1751 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1752 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1753 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1754 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1755 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1756 Heps = _mm256_mul_pd(vfeps,H);
1757 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1758 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1759 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq32,FF),_mm256_mul_pd(vftabscale,rinv32)));
1763 /* Calculate temporary vectorial force */
1764 tx = _mm256_mul_pd(fscal,dx32);
1765 ty = _mm256_mul_pd(fscal,dy32);
1766 tz = _mm256_mul_pd(fscal,dz32);
1768 /* Update vectorial force */
1769 fix3 = _mm256_add_pd(fix3,tx);
1770 fiy3 = _mm256_add_pd(fiy3,ty);
1771 fiz3 = _mm256_add_pd(fiz3,tz);
1773 fjx2 = _mm256_add_pd(fjx2,tx);
1774 fjy2 = _mm256_add_pd(fjy2,ty);
1775 fjz2 = _mm256_add_pd(fjz2,tz);
1777 /**************************
1778 * CALCULATE INTERACTIONS *
1779 **************************/
1781 r33 = _mm256_mul_pd(rsq33,rinv33);
1783 /* Calculate table index by multiplying r with table scale and truncate to integer */
1784 rt = _mm256_mul_pd(r33,vftabscale);
1785 vfitab = _mm256_cvttpd_epi32(rt);
1786 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1787 vfitab = _mm_slli_epi32(vfitab,2);
1789 /* CUBIC SPLINE TABLE ELECTROSTATICS */
1790 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1791 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1792 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1793 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1794 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1795 Heps = _mm256_mul_pd(vfeps,H);
1796 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1797 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1798 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq33,FF),_mm256_mul_pd(vftabscale,rinv33)));
1802 /* Calculate temporary vectorial force */
1803 tx = _mm256_mul_pd(fscal,dx33);
1804 ty = _mm256_mul_pd(fscal,dy33);
1805 tz = _mm256_mul_pd(fscal,dz33);
1807 /* Update vectorial force */
1808 fix3 = _mm256_add_pd(fix3,tx);
1809 fiy3 = _mm256_add_pd(fiy3,ty);
1810 fiz3 = _mm256_add_pd(fiz3,tz);
1812 fjx3 = _mm256_add_pd(fjx3,tx);
1813 fjy3 = _mm256_add_pd(fjy3,ty);
1814 fjz3 = _mm256_add_pd(fjz3,tz);
1816 fjptrA = f+j_coord_offsetA;
1817 fjptrB = f+j_coord_offsetB;
1818 fjptrC = f+j_coord_offsetC;
1819 fjptrD = f+j_coord_offsetD;
1821 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
1822 fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1824 /* Inner loop uses 351 flops */
1827 if(jidx<j_index_end)
1830 /* Get j neighbor index, and coordinate index */
1831 jnrlistA = jjnr[jidx];
1832 jnrlistB = jjnr[jidx+1];
1833 jnrlistC = jjnr[jidx+2];
1834 jnrlistD = jjnr[jidx+3];
1835 /* Sign of each element will be negative for non-real atoms.
1836 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1837 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
1839 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
1841 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
1842 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
1843 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
1845 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
1846 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
1847 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
1848 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
1849 j_coord_offsetA = DIM*jnrA;
1850 j_coord_offsetB = DIM*jnrB;
1851 j_coord_offsetC = DIM*jnrC;
1852 j_coord_offsetD = DIM*jnrD;
1854 /* load j atom coordinates */
1855 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
1856 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
1857 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
1859 /* Calculate displacement vector */
1860 dx11 = _mm256_sub_pd(ix1,jx1);
1861 dy11 = _mm256_sub_pd(iy1,jy1);
1862 dz11 = _mm256_sub_pd(iz1,jz1);
1863 dx12 = _mm256_sub_pd(ix1,jx2);
1864 dy12 = _mm256_sub_pd(iy1,jy2);
1865 dz12 = _mm256_sub_pd(iz1,jz2);
1866 dx13 = _mm256_sub_pd(ix1,jx3);
1867 dy13 = _mm256_sub_pd(iy1,jy3);
1868 dz13 = _mm256_sub_pd(iz1,jz3);
1869 dx21 = _mm256_sub_pd(ix2,jx1);
1870 dy21 = _mm256_sub_pd(iy2,jy1);
1871 dz21 = _mm256_sub_pd(iz2,jz1);
1872 dx22 = _mm256_sub_pd(ix2,jx2);
1873 dy22 = _mm256_sub_pd(iy2,jy2);
1874 dz22 = _mm256_sub_pd(iz2,jz2);
1875 dx23 = _mm256_sub_pd(ix2,jx3);
1876 dy23 = _mm256_sub_pd(iy2,jy3);
1877 dz23 = _mm256_sub_pd(iz2,jz3);
1878 dx31 = _mm256_sub_pd(ix3,jx1);
1879 dy31 = _mm256_sub_pd(iy3,jy1);
1880 dz31 = _mm256_sub_pd(iz3,jz1);
1881 dx32 = _mm256_sub_pd(ix3,jx2);
1882 dy32 = _mm256_sub_pd(iy3,jy2);
1883 dz32 = _mm256_sub_pd(iz3,jz2);
1884 dx33 = _mm256_sub_pd(ix3,jx3);
1885 dy33 = _mm256_sub_pd(iy3,jy3);
1886 dz33 = _mm256_sub_pd(iz3,jz3);
1888 /* Calculate squared distance and things based on it */
1889 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1890 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1891 rsq13 = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
1892 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1893 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1894 rsq23 = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
1895 rsq31 = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
1896 rsq32 = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
1897 rsq33 = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
1899 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
1900 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
1901 rinv13 = gmx_mm256_invsqrt_pd(rsq13);
1902 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
1903 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
1904 rinv23 = gmx_mm256_invsqrt_pd(rsq23);
1905 rinv31 = gmx_mm256_invsqrt_pd(rsq31);
1906 rinv32 = gmx_mm256_invsqrt_pd(rsq32);
1907 rinv33 = gmx_mm256_invsqrt_pd(rsq33);
1909 fjx1 = _mm256_setzero_pd();
1910 fjy1 = _mm256_setzero_pd();
1911 fjz1 = _mm256_setzero_pd();
1912 fjx2 = _mm256_setzero_pd();
1913 fjy2 = _mm256_setzero_pd();
1914 fjz2 = _mm256_setzero_pd();
1915 fjx3 = _mm256_setzero_pd();
1916 fjy3 = _mm256_setzero_pd();
1917 fjz3 = _mm256_setzero_pd();
1919 /**************************
1920 * CALCULATE INTERACTIONS *
1921 **************************/
1923 r11 = _mm256_mul_pd(rsq11,rinv11);
1924 r11 = _mm256_andnot_pd(dummy_mask,r11);
1926 /* Calculate table index by multiplying r with table scale and truncate to integer */
1927 rt = _mm256_mul_pd(r11,vftabscale);
1928 vfitab = _mm256_cvttpd_epi32(rt);
1929 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1930 vfitab = _mm_slli_epi32(vfitab,2);
1932 /* CUBIC SPLINE TABLE ELECTROSTATICS */
1933 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1934 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1935 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1936 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1937 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1938 Heps = _mm256_mul_pd(vfeps,H);
1939 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1940 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1941 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq11,FF),_mm256_mul_pd(vftabscale,rinv11)));
1945 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1947 /* Calculate temporary vectorial force */
1948 tx = _mm256_mul_pd(fscal,dx11);
1949 ty = _mm256_mul_pd(fscal,dy11);
1950 tz = _mm256_mul_pd(fscal,dz11);
1952 /* Update vectorial force */
1953 fix1 = _mm256_add_pd(fix1,tx);
1954 fiy1 = _mm256_add_pd(fiy1,ty);
1955 fiz1 = _mm256_add_pd(fiz1,tz);
1957 fjx1 = _mm256_add_pd(fjx1,tx);
1958 fjy1 = _mm256_add_pd(fjy1,ty);
1959 fjz1 = _mm256_add_pd(fjz1,tz);
1961 /**************************
1962 * CALCULATE INTERACTIONS *
1963 **************************/
1965 r12 = _mm256_mul_pd(rsq12,rinv12);
1966 r12 = _mm256_andnot_pd(dummy_mask,r12);
1968 /* Calculate table index by multiplying r with table scale and truncate to integer */
1969 rt = _mm256_mul_pd(r12,vftabscale);
1970 vfitab = _mm256_cvttpd_epi32(rt);
1971 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1972 vfitab = _mm_slli_epi32(vfitab,2);
1974 /* CUBIC SPLINE TABLE ELECTROSTATICS */
1975 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1976 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1977 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1978 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1979 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1980 Heps = _mm256_mul_pd(vfeps,H);
1981 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1982 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1983 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq12,FF),_mm256_mul_pd(vftabscale,rinv12)));
1987 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1989 /* Calculate temporary vectorial force */
1990 tx = _mm256_mul_pd(fscal,dx12);
1991 ty = _mm256_mul_pd(fscal,dy12);
1992 tz = _mm256_mul_pd(fscal,dz12);
1994 /* Update vectorial force */
1995 fix1 = _mm256_add_pd(fix1,tx);
1996 fiy1 = _mm256_add_pd(fiy1,ty);
1997 fiz1 = _mm256_add_pd(fiz1,tz);
1999 fjx2 = _mm256_add_pd(fjx2,tx);
2000 fjy2 = _mm256_add_pd(fjy2,ty);
2001 fjz2 = _mm256_add_pd(fjz2,tz);
2003 /**************************
2004 * CALCULATE INTERACTIONS *
2005 **************************/
2007 r13 = _mm256_mul_pd(rsq13,rinv13);
2008 r13 = _mm256_andnot_pd(dummy_mask,r13);
2010 /* Calculate table index by multiplying r with table scale and truncate to integer */
2011 rt = _mm256_mul_pd(r13,vftabscale);
2012 vfitab = _mm256_cvttpd_epi32(rt);
2013 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
2014 vfitab = _mm_slli_epi32(vfitab,2);
2016 /* CUBIC SPLINE TABLE ELECTROSTATICS */
2017 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
2018 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
2019 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
2020 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
2021 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
2022 Heps = _mm256_mul_pd(vfeps,H);
2023 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
2024 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
2025 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq13,FF),_mm256_mul_pd(vftabscale,rinv13)));
2029 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2031 /* Calculate temporary vectorial force */
2032 tx = _mm256_mul_pd(fscal,dx13);
2033 ty = _mm256_mul_pd(fscal,dy13);
2034 tz = _mm256_mul_pd(fscal,dz13);
2036 /* Update vectorial force */
2037 fix1 = _mm256_add_pd(fix1,tx);
2038 fiy1 = _mm256_add_pd(fiy1,ty);
2039 fiz1 = _mm256_add_pd(fiz1,tz);
2041 fjx3 = _mm256_add_pd(fjx3,tx);
2042 fjy3 = _mm256_add_pd(fjy3,ty);
2043 fjz3 = _mm256_add_pd(fjz3,tz);
2045 /**************************
2046 * CALCULATE INTERACTIONS *
2047 **************************/
2049 r21 = _mm256_mul_pd(rsq21,rinv21);
2050 r21 = _mm256_andnot_pd(dummy_mask,r21);
2052 /* Calculate table index by multiplying r with table scale and truncate to integer */
2053 rt = _mm256_mul_pd(r21,vftabscale);
2054 vfitab = _mm256_cvttpd_epi32(rt);
2055 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
2056 vfitab = _mm_slli_epi32(vfitab,2);
2058 /* CUBIC SPLINE TABLE ELECTROSTATICS */
2059 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
2060 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
2061 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
2062 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
2063 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
2064 Heps = _mm256_mul_pd(vfeps,H);
2065 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
2066 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
2067 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq21,FF),_mm256_mul_pd(vftabscale,rinv21)));
2071 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2073 /* Calculate temporary vectorial force */
2074 tx = _mm256_mul_pd(fscal,dx21);
2075 ty = _mm256_mul_pd(fscal,dy21);
2076 tz = _mm256_mul_pd(fscal,dz21);
2078 /* Update vectorial force */
2079 fix2 = _mm256_add_pd(fix2,tx);
2080 fiy2 = _mm256_add_pd(fiy2,ty);
2081 fiz2 = _mm256_add_pd(fiz2,tz);
2083 fjx1 = _mm256_add_pd(fjx1,tx);
2084 fjy1 = _mm256_add_pd(fjy1,ty);
2085 fjz1 = _mm256_add_pd(fjz1,tz);
2087 /**************************
2088 * CALCULATE INTERACTIONS *
2089 **************************/
2091 r22 = _mm256_mul_pd(rsq22,rinv22);
2092 r22 = _mm256_andnot_pd(dummy_mask,r22);
2094 /* Calculate table index by multiplying r with table scale and truncate to integer */
2095 rt = _mm256_mul_pd(r22,vftabscale);
2096 vfitab = _mm256_cvttpd_epi32(rt);
2097 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
2098 vfitab = _mm_slli_epi32(vfitab,2);
2100 /* CUBIC SPLINE TABLE ELECTROSTATICS */
2101 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
2102 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
2103 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
2104 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
2105 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
2106 Heps = _mm256_mul_pd(vfeps,H);
2107 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
2108 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
2109 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq22,FF),_mm256_mul_pd(vftabscale,rinv22)));
2113 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2115 /* Calculate temporary vectorial force */
2116 tx = _mm256_mul_pd(fscal,dx22);
2117 ty = _mm256_mul_pd(fscal,dy22);
2118 tz = _mm256_mul_pd(fscal,dz22);
2120 /* Update vectorial force */
2121 fix2 = _mm256_add_pd(fix2,tx);
2122 fiy2 = _mm256_add_pd(fiy2,ty);
2123 fiz2 = _mm256_add_pd(fiz2,tz);
2125 fjx2 = _mm256_add_pd(fjx2,tx);
2126 fjy2 = _mm256_add_pd(fjy2,ty);
2127 fjz2 = _mm256_add_pd(fjz2,tz);
2129 /**************************
2130 * CALCULATE INTERACTIONS *
2131 **************************/
2133 r23 = _mm256_mul_pd(rsq23,rinv23);
2134 r23 = _mm256_andnot_pd(dummy_mask,r23);
2136 /* Calculate table index by multiplying r with table scale and truncate to integer */
2137 rt = _mm256_mul_pd(r23,vftabscale);
2138 vfitab = _mm256_cvttpd_epi32(rt);
2139 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
2140 vfitab = _mm_slli_epi32(vfitab,2);
2142 /* CUBIC SPLINE TABLE ELECTROSTATICS */
2143 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
2144 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
2145 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
2146 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
2147 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
2148 Heps = _mm256_mul_pd(vfeps,H);
2149 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
2150 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
2151 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq23,FF),_mm256_mul_pd(vftabscale,rinv23)));
2155 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2157 /* Calculate temporary vectorial force */
2158 tx = _mm256_mul_pd(fscal,dx23);
2159 ty = _mm256_mul_pd(fscal,dy23);
2160 tz = _mm256_mul_pd(fscal,dz23);
2162 /* Update vectorial force */
2163 fix2 = _mm256_add_pd(fix2,tx);
2164 fiy2 = _mm256_add_pd(fiy2,ty);
2165 fiz2 = _mm256_add_pd(fiz2,tz);
2167 fjx3 = _mm256_add_pd(fjx3,tx);
2168 fjy3 = _mm256_add_pd(fjy3,ty);
2169 fjz3 = _mm256_add_pd(fjz3,tz);
2171 /**************************
2172 * CALCULATE INTERACTIONS *
2173 **************************/
2175 r31 = _mm256_mul_pd(rsq31,rinv31);
2176 r31 = _mm256_andnot_pd(dummy_mask,r31);
2178 /* Calculate table index by multiplying r with table scale and truncate to integer */
2179 rt = _mm256_mul_pd(r31,vftabscale);
2180 vfitab = _mm256_cvttpd_epi32(rt);
2181 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
2182 vfitab = _mm_slli_epi32(vfitab,2);
2184 /* CUBIC SPLINE TABLE ELECTROSTATICS */
2185 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
2186 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
2187 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
2188 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
2189 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
2190 Heps = _mm256_mul_pd(vfeps,H);
2191 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
2192 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
2193 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq31,FF),_mm256_mul_pd(vftabscale,rinv31)));
2197 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2199 /* Calculate temporary vectorial force */
2200 tx = _mm256_mul_pd(fscal,dx31);
2201 ty = _mm256_mul_pd(fscal,dy31);
2202 tz = _mm256_mul_pd(fscal,dz31);
2204 /* Update vectorial force */
2205 fix3 = _mm256_add_pd(fix3,tx);
2206 fiy3 = _mm256_add_pd(fiy3,ty);
2207 fiz3 = _mm256_add_pd(fiz3,tz);
2209 fjx1 = _mm256_add_pd(fjx1,tx);
2210 fjy1 = _mm256_add_pd(fjy1,ty);
2211 fjz1 = _mm256_add_pd(fjz1,tz);
2213 /**************************
2214 * CALCULATE INTERACTIONS *
2215 **************************/
2217 r32 = _mm256_mul_pd(rsq32,rinv32);
2218 r32 = _mm256_andnot_pd(dummy_mask,r32);
2220 /* Calculate table index by multiplying r with table scale and truncate to integer */
2221 rt = _mm256_mul_pd(r32,vftabscale);
2222 vfitab = _mm256_cvttpd_epi32(rt);
2223 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
2224 vfitab = _mm_slli_epi32(vfitab,2);
2226 /* CUBIC SPLINE TABLE ELECTROSTATICS */
2227 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
2228 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
2229 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
2230 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
2231 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
2232 Heps = _mm256_mul_pd(vfeps,H);
2233 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
2234 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
2235 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq32,FF),_mm256_mul_pd(vftabscale,rinv32)));
2239 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2241 /* Calculate temporary vectorial force */
2242 tx = _mm256_mul_pd(fscal,dx32);
2243 ty = _mm256_mul_pd(fscal,dy32);
2244 tz = _mm256_mul_pd(fscal,dz32);
2246 /* Update vectorial force */
2247 fix3 = _mm256_add_pd(fix3,tx);
2248 fiy3 = _mm256_add_pd(fiy3,ty);
2249 fiz3 = _mm256_add_pd(fiz3,tz);
2251 fjx2 = _mm256_add_pd(fjx2,tx);
2252 fjy2 = _mm256_add_pd(fjy2,ty);
2253 fjz2 = _mm256_add_pd(fjz2,tz);
2255 /**************************
2256 * CALCULATE INTERACTIONS *
2257 **************************/
2259 r33 = _mm256_mul_pd(rsq33,rinv33);
2260 r33 = _mm256_andnot_pd(dummy_mask,r33);
2262 /* Calculate table index by multiplying r with table scale and truncate to integer */
2263 rt = _mm256_mul_pd(r33,vftabscale);
2264 vfitab = _mm256_cvttpd_epi32(rt);
2265 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
2266 vfitab = _mm_slli_epi32(vfitab,2);
2268 /* CUBIC SPLINE TABLE ELECTROSTATICS */
2269 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
2270 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
2271 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
2272 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
2273 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
2274 Heps = _mm256_mul_pd(vfeps,H);
2275 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
2276 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
2277 felec = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_mul_pd(qq33,FF),_mm256_mul_pd(vftabscale,rinv33)));
2281 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2283 /* Calculate temporary vectorial force */
2284 tx = _mm256_mul_pd(fscal,dx33);
2285 ty = _mm256_mul_pd(fscal,dy33);
2286 tz = _mm256_mul_pd(fscal,dz33);
2288 /* Update vectorial force */
2289 fix3 = _mm256_add_pd(fix3,tx);
2290 fiy3 = _mm256_add_pd(fiy3,ty);
2291 fiz3 = _mm256_add_pd(fiz3,tz);
2293 fjx3 = _mm256_add_pd(fjx3,tx);
2294 fjy3 = _mm256_add_pd(fjy3,ty);
2295 fjz3 = _mm256_add_pd(fjz3,tz);
2297 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2298 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2299 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2300 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2302 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
2303 fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2305 /* Inner loop uses 360 flops */
2308 /* End of innermost loop */
2310 gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
2311 f+i_coord_offset+DIM,fshift+i_shift_offset);
2313 /* Increment number of inner iterations */
2314 inneriter += j_index_end - j_index_start;
2316 /* Outer loop uses 18 flops */
2319 /* Increment number of outer iterations */
2322 /* Update outer/inner flops */
2324 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*360);