2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014,2015,2017, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_double kernel generator.
44 #include "../nb_kernel.h"
45 #include "gromacs/gmxlib/nrnb.h"
47 #include "kernelutil_x86_avx_256_double.h"
50 * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_VF_avx_256_double
51 * Electrostatics interaction: Coulomb
52 * VdW interaction: CubicSplineTable
53 * Geometry: Water3-Water3
54 * Calculate force/pot: PotentialAndForce
57 nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_VF_avx_256_double
58 (t_nblist * gmx_restrict nlist,
59 rvec * gmx_restrict xx,
60 rvec * gmx_restrict ff,
61 struct t_forcerec * gmx_restrict fr,
62 t_mdatoms * gmx_restrict mdatoms,
63 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
64 t_nrnb * gmx_restrict nrnb)
66 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
67 * just 0 for non-waters.
68 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
69 * jnr indices corresponding to data put in the four positions in the SIMD register.
71 int i_shift_offset,i_coord_offset,outeriter,inneriter;
72 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
73 int jnrA,jnrB,jnrC,jnrD;
74 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
75 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
76 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
77 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
79 real *shiftvec,*fshift,*x,*f;
80 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
82 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
83 real * vdwioffsetptr0;
84 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
85 real * vdwioffsetptr1;
86 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
87 real * vdwioffsetptr2;
88 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
89 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
90 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
91 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
92 __m256d jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
93 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
94 __m256d jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
95 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
96 __m256d dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
97 __m256d dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
98 __m256d dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
99 __m256d dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
100 __m256d dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
101 __m256d dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
102 __m256d dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
103 __m256d dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
104 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
107 __m256d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
110 __m256d one_sixth = _mm256_set1_pd(1.0/6.0);
111 __m256d one_twelfth = _mm256_set1_pd(1.0/12.0);
113 __m128i ifour = _mm_set1_epi32(4);
114 __m256d rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
116 __m256d dummy_mask,cutoff_mask;
117 __m128 tmpmask0,tmpmask1;
118 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
119 __m256d one = _mm256_set1_pd(1.0);
120 __m256d two = _mm256_set1_pd(2.0);
126 jindex = nlist->jindex;
128 shiftidx = nlist->shift;
130 shiftvec = fr->shift_vec[0];
131 fshift = fr->fshift[0];
132 facel = _mm256_set1_pd(fr->ic->epsfac);
133 charge = mdatoms->chargeA;
134 nvdwtype = fr->ntype;
136 vdwtype = mdatoms->typeA;
138 vftab = kernel_data->table_vdw->data;
139 vftabscale = _mm256_set1_pd(kernel_data->table_vdw->scale);
141 /* Setup water-specific parameters */
142 inr = nlist->iinr[0];
143 iq0 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
144 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
145 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
146 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
148 jq0 = _mm256_set1_pd(charge[inr+0]);
149 jq1 = _mm256_set1_pd(charge[inr+1]);
150 jq2 = _mm256_set1_pd(charge[inr+2]);
151 vdwjidx0A = 2*vdwtype[inr+0];
152 qq00 = _mm256_mul_pd(iq0,jq0);
153 c6_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
154 c12_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
155 qq01 = _mm256_mul_pd(iq0,jq1);
156 qq02 = _mm256_mul_pd(iq0,jq2);
157 qq10 = _mm256_mul_pd(iq1,jq0);
158 qq11 = _mm256_mul_pd(iq1,jq1);
159 qq12 = _mm256_mul_pd(iq1,jq2);
160 qq20 = _mm256_mul_pd(iq2,jq0);
161 qq21 = _mm256_mul_pd(iq2,jq1);
162 qq22 = _mm256_mul_pd(iq2,jq2);
164 /* Avoid stupid compiler warnings */
165 jnrA = jnrB = jnrC = jnrD = 0;
174 for(iidx=0;iidx<4*DIM;iidx++)
179 /* Start outer loop over neighborlists */
180 for(iidx=0; iidx<nri; iidx++)
182 /* Load shift vector for this list */
183 i_shift_offset = DIM*shiftidx[iidx];
185 /* Load limits for loop over neighbors */
186 j_index_start = jindex[iidx];
187 j_index_end = jindex[iidx+1];
189 /* Get outer coordinate index */
191 i_coord_offset = DIM*inr;
193 /* Load i particle coords and add shift vector */
194 gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
195 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
197 fix0 = _mm256_setzero_pd();
198 fiy0 = _mm256_setzero_pd();
199 fiz0 = _mm256_setzero_pd();
200 fix1 = _mm256_setzero_pd();
201 fiy1 = _mm256_setzero_pd();
202 fiz1 = _mm256_setzero_pd();
203 fix2 = _mm256_setzero_pd();
204 fiy2 = _mm256_setzero_pd();
205 fiz2 = _mm256_setzero_pd();
207 /* Reset potential sums */
208 velecsum = _mm256_setzero_pd();
209 vvdwsum = _mm256_setzero_pd();
211 /* Start inner kernel loop */
212 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
215 /* Get j neighbor index, and coordinate index */
220 j_coord_offsetA = DIM*jnrA;
221 j_coord_offsetB = DIM*jnrB;
222 j_coord_offsetC = DIM*jnrC;
223 j_coord_offsetD = DIM*jnrD;
225 /* load j atom coordinates */
226 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
227 x+j_coord_offsetC,x+j_coord_offsetD,
228 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
230 /* Calculate displacement vector */
231 dx00 = _mm256_sub_pd(ix0,jx0);
232 dy00 = _mm256_sub_pd(iy0,jy0);
233 dz00 = _mm256_sub_pd(iz0,jz0);
234 dx01 = _mm256_sub_pd(ix0,jx1);
235 dy01 = _mm256_sub_pd(iy0,jy1);
236 dz01 = _mm256_sub_pd(iz0,jz1);
237 dx02 = _mm256_sub_pd(ix0,jx2);
238 dy02 = _mm256_sub_pd(iy0,jy2);
239 dz02 = _mm256_sub_pd(iz0,jz2);
240 dx10 = _mm256_sub_pd(ix1,jx0);
241 dy10 = _mm256_sub_pd(iy1,jy0);
242 dz10 = _mm256_sub_pd(iz1,jz0);
243 dx11 = _mm256_sub_pd(ix1,jx1);
244 dy11 = _mm256_sub_pd(iy1,jy1);
245 dz11 = _mm256_sub_pd(iz1,jz1);
246 dx12 = _mm256_sub_pd(ix1,jx2);
247 dy12 = _mm256_sub_pd(iy1,jy2);
248 dz12 = _mm256_sub_pd(iz1,jz2);
249 dx20 = _mm256_sub_pd(ix2,jx0);
250 dy20 = _mm256_sub_pd(iy2,jy0);
251 dz20 = _mm256_sub_pd(iz2,jz0);
252 dx21 = _mm256_sub_pd(ix2,jx1);
253 dy21 = _mm256_sub_pd(iy2,jy1);
254 dz21 = _mm256_sub_pd(iz2,jz1);
255 dx22 = _mm256_sub_pd(ix2,jx2);
256 dy22 = _mm256_sub_pd(iy2,jy2);
257 dz22 = _mm256_sub_pd(iz2,jz2);
259 /* Calculate squared distance and things based on it */
260 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
261 rsq01 = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
262 rsq02 = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
263 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
264 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
265 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
266 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
267 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
268 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
270 rinv00 = avx256_invsqrt_d(rsq00);
271 rinv01 = avx256_invsqrt_d(rsq01);
272 rinv02 = avx256_invsqrt_d(rsq02);
273 rinv10 = avx256_invsqrt_d(rsq10);
274 rinv11 = avx256_invsqrt_d(rsq11);
275 rinv12 = avx256_invsqrt_d(rsq12);
276 rinv20 = avx256_invsqrt_d(rsq20);
277 rinv21 = avx256_invsqrt_d(rsq21);
278 rinv22 = avx256_invsqrt_d(rsq22);
280 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
281 rinvsq01 = _mm256_mul_pd(rinv01,rinv01);
282 rinvsq02 = _mm256_mul_pd(rinv02,rinv02);
283 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
284 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
285 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
286 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
287 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
288 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
290 fjx0 = _mm256_setzero_pd();
291 fjy0 = _mm256_setzero_pd();
292 fjz0 = _mm256_setzero_pd();
293 fjx1 = _mm256_setzero_pd();
294 fjy1 = _mm256_setzero_pd();
295 fjz1 = _mm256_setzero_pd();
296 fjx2 = _mm256_setzero_pd();
297 fjy2 = _mm256_setzero_pd();
298 fjz2 = _mm256_setzero_pd();
300 /**************************
301 * CALCULATE INTERACTIONS *
302 **************************/
304 r00 = _mm256_mul_pd(rsq00,rinv00);
306 /* Calculate table index by multiplying r with table scale and truncate to integer */
307 rt = _mm256_mul_pd(r00,vftabscale);
308 vfitab = _mm256_cvttpd_epi32(rt);
309 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
310 vfitab = _mm_slli_epi32(vfitab,3);
312 /* COULOMB ELECTROSTATICS */
313 velec = _mm256_mul_pd(qq00,rinv00);
314 felec = _mm256_mul_pd(velec,rinvsq00);
316 /* CUBIC SPLINE TABLE DISPERSION */
317 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
318 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
319 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
320 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
321 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
322 Heps = _mm256_mul_pd(vfeps,H);
323 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
324 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
325 vvdw6 = _mm256_mul_pd(c6_00,VV);
326 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
327 fvdw6 = _mm256_mul_pd(c6_00,FF);
329 /* CUBIC SPLINE TABLE REPULSION */
330 vfitab = _mm_add_epi32(vfitab,ifour);
331 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
332 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
333 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
334 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
335 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
336 Heps = _mm256_mul_pd(vfeps,H);
337 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
338 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
339 vvdw12 = _mm256_mul_pd(c12_00,VV);
340 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
341 fvdw12 = _mm256_mul_pd(c12_00,FF);
342 vvdw = _mm256_add_pd(vvdw12,vvdw6);
343 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
345 /* Update potential sum for this i atom from the interaction with this j atom. */
346 velecsum = _mm256_add_pd(velecsum,velec);
347 vvdwsum = _mm256_add_pd(vvdwsum,vvdw);
349 fscal = _mm256_add_pd(felec,fvdw);
351 /* Calculate temporary vectorial force */
352 tx = _mm256_mul_pd(fscal,dx00);
353 ty = _mm256_mul_pd(fscal,dy00);
354 tz = _mm256_mul_pd(fscal,dz00);
356 /* Update vectorial force */
357 fix0 = _mm256_add_pd(fix0,tx);
358 fiy0 = _mm256_add_pd(fiy0,ty);
359 fiz0 = _mm256_add_pd(fiz0,tz);
361 fjx0 = _mm256_add_pd(fjx0,tx);
362 fjy0 = _mm256_add_pd(fjy0,ty);
363 fjz0 = _mm256_add_pd(fjz0,tz);
365 /**************************
366 * CALCULATE INTERACTIONS *
367 **************************/
369 /* COULOMB ELECTROSTATICS */
370 velec = _mm256_mul_pd(qq01,rinv01);
371 felec = _mm256_mul_pd(velec,rinvsq01);
373 /* Update potential sum for this i atom from the interaction with this j atom. */
374 velecsum = _mm256_add_pd(velecsum,velec);
378 /* Calculate temporary vectorial force */
379 tx = _mm256_mul_pd(fscal,dx01);
380 ty = _mm256_mul_pd(fscal,dy01);
381 tz = _mm256_mul_pd(fscal,dz01);
383 /* Update vectorial force */
384 fix0 = _mm256_add_pd(fix0,tx);
385 fiy0 = _mm256_add_pd(fiy0,ty);
386 fiz0 = _mm256_add_pd(fiz0,tz);
388 fjx1 = _mm256_add_pd(fjx1,tx);
389 fjy1 = _mm256_add_pd(fjy1,ty);
390 fjz1 = _mm256_add_pd(fjz1,tz);
392 /**************************
393 * CALCULATE INTERACTIONS *
394 **************************/
396 /* COULOMB ELECTROSTATICS */
397 velec = _mm256_mul_pd(qq02,rinv02);
398 felec = _mm256_mul_pd(velec,rinvsq02);
400 /* Update potential sum for this i atom from the interaction with this j atom. */
401 velecsum = _mm256_add_pd(velecsum,velec);
405 /* Calculate temporary vectorial force */
406 tx = _mm256_mul_pd(fscal,dx02);
407 ty = _mm256_mul_pd(fscal,dy02);
408 tz = _mm256_mul_pd(fscal,dz02);
410 /* Update vectorial force */
411 fix0 = _mm256_add_pd(fix0,tx);
412 fiy0 = _mm256_add_pd(fiy0,ty);
413 fiz0 = _mm256_add_pd(fiz0,tz);
415 fjx2 = _mm256_add_pd(fjx2,tx);
416 fjy2 = _mm256_add_pd(fjy2,ty);
417 fjz2 = _mm256_add_pd(fjz2,tz);
419 /**************************
420 * CALCULATE INTERACTIONS *
421 **************************/
423 /* COULOMB ELECTROSTATICS */
424 velec = _mm256_mul_pd(qq10,rinv10);
425 felec = _mm256_mul_pd(velec,rinvsq10);
427 /* Update potential sum for this i atom from the interaction with this j atom. */
428 velecsum = _mm256_add_pd(velecsum,velec);
432 /* Calculate temporary vectorial force */
433 tx = _mm256_mul_pd(fscal,dx10);
434 ty = _mm256_mul_pd(fscal,dy10);
435 tz = _mm256_mul_pd(fscal,dz10);
437 /* Update vectorial force */
438 fix1 = _mm256_add_pd(fix1,tx);
439 fiy1 = _mm256_add_pd(fiy1,ty);
440 fiz1 = _mm256_add_pd(fiz1,tz);
442 fjx0 = _mm256_add_pd(fjx0,tx);
443 fjy0 = _mm256_add_pd(fjy0,ty);
444 fjz0 = _mm256_add_pd(fjz0,tz);
446 /**************************
447 * CALCULATE INTERACTIONS *
448 **************************/
450 /* COULOMB ELECTROSTATICS */
451 velec = _mm256_mul_pd(qq11,rinv11);
452 felec = _mm256_mul_pd(velec,rinvsq11);
454 /* Update potential sum for this i atom from the interaction with this j atom. */
455 velecsum = _mm256_add_pd(velecsum,velec);
459 /* Calculate temporary vectorial force */
460 tx = _mm256_mul_pd(fscal,dx11);
461 ty = _mm256_mul_pd(fscal,dy11);
462 tz = _mm256_mul_pd(fscal,dz11);
464 /* Update vectorial force */
465 fix1 = _mm256_add_pd(fix1,tx);
466 fiy1 = _mm256_add_pd(fiy1,ty);
467 fiz1 = _mm256_add_pd(fiz1,tz);
469 fjx1 = _mm256_add_pd(fjx1,tx);
470 fjy1 = _mm256_add_pd(fjy1,ty);
471 fjz1 = _mm256_add_pd(fjz1,tz);
473 /**************************
474 * CALCULATE INTERACTIONS *
475 **************************/
477 /* COULOMB ELECTROSTATICS */
478 velec = _mm256_mul_pd(qq12,rinv12);
479 felec = _mm256_mul_pd(velec,rinvsq12);
481 /* Update potential sum for this i atom from the interaction with this j atom. */
482 velecsum = _mm256_add_pd(velecsum,velec);
486 /* Calculate temporary vectorial force */
487 tx = _mm256_mul_pd(fscal,dx12);
488 ty = _mm256_mul_pd(fscal,dy12);
489 tz = _mm256_mul_pd(fscal,dz12);
491 /* Update vectorial force */
492 fix1 = _mm256_add_pd(fix1,tx);
493 fiy1 = _mm256_add_pd(fiy1,ty);
494 fiz1 = _mm256_add_pd(fiz1,tz);
496 fjx2 = _mm256_add_pd(fjx2,tx);
497 fjy2 = _mm256_add_pd(fjy2,ty);
498 fjz2 = _mm256_add_pd(fjz2,tz);
500 /**************************
501 * CALCULATE INTERACTIONS *
502 **************************/
504 /* COULOMB ELECTROSTATICS */
505 velec = _mm256_mul_pd(qq20,rinv20);
506 felec = _mm256_mul_pd(velec,rinvsq20);
508 /* Update potential sum for this i atom from the interaction with this j atom. */
509 velecsum = _mm256_add_pd(velecsum,velec);
513 /* Calculate temporary vectorial force */
514 tx = _mm256_mul_pd(fscal,dx20);
515 ty = _mm256_mul_pd(fscal,dy20);
516 tz = _mm256_mul_pd(fscal,dz20);
518 /* Update vectorial force */
519 fix2 = _mm256_add_pd(fix2,tx);
520 fiy2 = _mm256_add_pd(fiy2,ty);
521 fiz2 = _mm256_add_pd(fiz2,tz);
523 fjx0 = _mm256_add_pd(fjx0,tx);
524 fjy0 = _mm256_add_pd(fjy0,ty);
525 fjz0 = _mm256_add_pd(fjz0,tz);
527 /**************************
528 * CALCULATE INTERACTIONS *
529 **************************/
531 /* COULOMB ELECTROSTATICS */
532 velec = _mm256_mul_pd(qq21,rinv21);
533 felec = _mm256_mul_pd(velec,rinvsq21);
535 /* Update potential sum for this i atom from the interaction with this j atom. */
536 velecsum = _mm256_add_pd(velecsum,velec);
540 /* Calculate temporary vectorial force */
541 tx = _mm256_mul_pd(fscal,dx21);
542 ty = _mm256_mul_pd(fscal,dy21);
543 tz = _mm256_mul_pd(fscal,dz21);
545 /* Update vectorial force */
546 fix2 = _mm256_add_pd(fix2,tx);
547 fiy2 = _mm256_add_pd(fiy2,ty);
548 fiz2 = _mm256_add_pd(fiz2,tz);
550 fjx1 = _mm256_add_pd(fjx1,tx);
551 fjy1 = _mm256_add_pd(fjy1,ty);
552 fjz1 = _mm256_add_pd(fjz1,tz);
554 /**************************
555 * CALCULATE INTERACTIONS *
556 **************************/
558 /* COULOMB ELECTROSTATICS */
559 velec = _mm256_mul_pd(qq22,rinv22);
560 felec = _mm256_mul_pd(velec,rinvsq22);
562 /* Update potential sum for this i atom from the interaction with this j atom. */
563 velecsum = _mm256_add_pd(velecsum,velec);
567 /* Calculate temporary vectorial force */
568 tx = _mm256_mul_pd(fscal,dx22);
569 ty = _mm256_mul_pd(fscal,dy22);
570 tz = _mm256_mul_pd(fscal,dz22);
572 /* Update vectorial force */
573 fix2 = _mm256_add_pd(fix2,tx);
574 fiy2 = _mm256_add_pd(fiy2,ty);
575 fiz2 = _mm256_add_pd(fiz2,tz);
577 fjx2 = _mm256_add_pd(fjx2,tx);
578 fjy2 = _mm256_add_pd(fjy2,ty);
579 fjz2 = _mm256_add_pd(fjz2,tz);
581 fjptrA = f+j_coord_offsetA;
582 fjptrB = f+j_coord_offsetB;
583 fjptrC = f+j_coord_offsetC;
584 fjptrD = f+j_coord_offsetD;
586 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
587 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
589 /* Inner loop uses 278 flops */
595 /* Get j neighbor index, and coordinate index */
596 jnrlistA = jjnr[jidx];
597 jnrlistB = jjnr[jidx+1];
598 jnrlistC = jjnr[jidx+2];
599 jnrlistD = jjnr[jidx+3];
600 /* Sign of each element will be negative for non-real atoms.
601 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
602 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
604 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
606 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
607 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
608 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
610 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
611 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
612 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
613 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
614 j_coord_offsetA = DIM*jnrA;
615 j_coord_offsetB = DIM*jnrB;
616 j_coord_offsetC = DIM*jnrC;
617 j_coord_offsetD = DIM*jnrD;
619 /* load j atom coordinates */
620 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
621 x+j_coord_offsetC,x+j_coord_offsetD,
622 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
624 /* Calculate displacement vector */
625 dx00 = _mm256_sub_pd(ix0,jx0);
626 dy00 = _mm256_sub_pd(iy0,jy0);
627 dz00 = _mm256_sub_pd(iz0,jz0);
628 dx01 = _mm256_sub_pd(ix0,jx1);
629 dy01 = _mm256_sub_pd(iy0,jy1);
630 dz01 = _mm256_sub_pd(iz0,jz1);
631 dx02 = _mm256_sub_pd(ix0,jx2);
632 dy02 = _mm256_sub_pd(iy0,jy2);
633 dz02 = _mm256_sub_pd(iz0,jz2);
634 dx10 = _mm256_sub_pd(ix1,jx0);
635 dy10 = _mm256_sub_pd(iy1,jy0);
636 dz10 = _mm256_sub_pd(iz1,jz0);
637 dx11 = _mm256_sub_pd(ix1,jx1);
638 dy11 = _mm256_sub_pd(iy1,jy1);
639 dz11 = _mm256_sub_pd(iz1,jz1);
640 dx12 = _mm256_sub_pd(ix1,jx2);
641 dy12 = _mm256_sub_pd(iy1,jy2);
642 dz12 = _mm256_sub_pd(iz1,jz2);
643 dx20 = _mm256_sub_pd(ix2,jx0);
644 dy20 = _mm256_sub_pd(iy2,jy0);
645 dz20 = _mm256_sub_pd(iz2,jz0);
646 dx21 = _mm256_sub_pd(ix2,jx1);
647 dy21 = _mm256_sub_pd(iy2,jy1);
648 dz21 = _mm256_sub_pd(iz2,jz1);
649 dx22 = _mm256_sub_pd(ix2,jx2);
650 dy22 = _mm256_sub_pd(iy2,jy2);
651 dz22 = _mm256_sub_pd(iz2,jz2);
653 /* Calculate squared distance and things based on it */
654 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
655 rsq01 = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
656 rsq02 = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
657 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
658 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
659 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
660 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
661 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
662 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
664 rinv00 = avx256_invsqrt_d(rsq00);
665 rinv01 = avx256_invsqrt_d(rsq01);
666 rinv02 = avx256_invsqrt_d(rsq02);
667 rinv10 = avx256_invsqrt_d(rsq10);
668 rinv11 = avx256_invsqrt_d(rsq11);
669 rinv12 = avx256_invsqrt_d(rsq12);
670 rinv20 = avx256_invsqrt_d(rsq20);
671 rinv21 = avx256_invsqrt_d(rsq21);
672 rinv22 = avx256_invsqrt_d(rsq22);
674 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
675 rinvsq01 = _mm256_mul_pd(rinv01,rinv01);
676 rinvsq02 = _mm256_mul_pd(rinv02,rinv02);
677 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
678 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
679 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
680 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
681 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
682 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
684 fjx0 = _mm256_setzero_pd();
685 fjy0 = _mm256_setzero_pd();
686 fjz0 = _mm256_setzero_pd();
687 fjx1 = _mm256_setzero_pd();
688 fjy1 = _mm256_setzero_pd();
689 fjz1 = _mm256_setzero_pd();
690 fjx2 = _mm256_setzero_pd();
691 fjy2 = _mm256_setzero_pd();
692 fjz2 = _mm256_setzero_pd();
694 /**************************
695 * CALCULATE INTERACTIONS *
696 **************************/
698 r00 = _mm256_mul_pd(rsq00,rinv00);
699 r00 = _mm256_andnot_pd(dummy_mask,r00);
701 /* Calculate table index by multiplying r with table scale and truncate to integer */
702 rt = _mm256_mul_pd(r00,vftabscale);
703 vfitab = _mm256_cvttpd_epi32(rt);
704 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
705 vfitab = _mm_slli_epi32(vfitab,3);
707 /* COULOMB ELECTROSTATICS */
708 velec = _mm256_mul_pd(qq00,rinv00);
709 felec = _mm256_mul_pd(velec,rinvsq00);
711 /* CUBIC SPLINE TABLE DISPERSION */
712 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
713 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
714 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
715 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
716 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
717 Heps = _mm256_mul_pd(vfeps,H);
718 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
719 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
720 vvdw6 = _mm256_mul_pd(c6_00,VV);
721 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
722 fvdw6 = _mm256_mul_pd(c6_00,FF);
724 /* CUBIC SPLINE TABLE REPULSION */
725 vfitab = _mm_add_epi32(vfitab,ifour);
726 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
727 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
728 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
729 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
730 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
731 Heps = _mm256_mul_pd(vfeps,H);
732 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
733 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
734 vvdw12 = _mm256_mul_pd(c12_00,VV);
735 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
736 fvdw12 = _mm256_mul_pd(c12_00,FF);
737 vvdw = _mm256_add_pd(vvdw12,vvdw6);
738 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
740 /* Update potential sum for this i atom from the interaction with this j atom. */
741 velec = _mm256_andnot_pd(dummy_mask,velec);
742 velecsum = _mm256_add_pd(velecsum,velec);
743 vvdw = _mm256_andnot_pd(dummy_mask,vvdw);
744 vvdwsum = _mm256_add_pd(vvdwsum,vvdw);
746 fscal = _mm256_add_pd(felec,fvdw);
748 fscal = _mm256_andnot_pd(dummy_mask,fscal);
750 /* Calculate temporary vectorial force */
751 tx = _mm256_mul_pd(fscal,dx00);
752 ty = _mm256_mul_pd(fscal,dy00);
753 tz = _mm256_mul_pd(fscal,dz00);
755 /* Update vectorial force */
756 fix0 = _mm256_add_pd(fix0,tx);
757 fiy0 = _mm256_add_pd(fiy0,ty);
758 fiz0 = _mm256_add_pd(fiz0,tz);
760 fjx0 = _mm256_add_pd(fjx0,tx);
761 fjy0 = _mm256_add_pd(fjy0,ty);
762 fjz0 = _mm256_add_pd(fjz0,tz);
764 /**************************
765 * CALCULATE INTERACTIONS *
766 **************************/
768 /* COULOMB ELECTROSTATICS */
769 velec = _mm256_mul_pd(qq01,rinv01);
770 felec = _mm256_mul_pd(velec,rinvsq01);
772 /* Update potential sum for this i atom from the interaction with this j atom. */
773 velec = _mm256_andnot_pd(dummy_mask,velec);
774 velecsum = _mm256_add_pd(velecsum,velec);
778 fscal = _mm256_andnot_pd(dummy_mask,fscal);
780 /* Calculate temporary vectorial force */
781 tx = _mm256_mul_pd(fscal,dx01);
782 ty = _mm256_mul_pd(fscal,dy01);
783 tz = _mm256_mul_pd(fscal,dz01);
785 /* Update vectorial force */
786 fix0 = _mm256_add_pd(fix0,tx);
787 fiy0 = _mm256_add_pd(fiy0,ty);
788 fiz0 = _mm256_add_pd(fiz0,tz);
790 fjx1 = _mm256_add_pd(fjx1,tx);
791 fjy1 = _mm256_add_pd(fjy1,ty);
792 fjz1 = _mm256_add_pd(fjz1,tz);
794 /**************************
795 * CALCULATE INTERACTIONS *
796 **************************/
798 /* COULOMB ELECTROSTATICS */
799 velec = _mm256_mul_pd(qq02,rinv02);
800 felec = _mm256_mul_pd(velec,rinvsq02);
802 /* Update potential sum for this i atom from the interaction with this j atom. */
803 velec = _mm256_andnot_pd(dummy_mask,velec);
804 velecsum = _mm256_add_pd(velecsum,velec);
808 fscal = _mm256_andnot_pd(dummy_mask,fscal);
810 /* Calculate temporary vectorial force */
811 tx = _mm256_mul_pd(fscal,dx02);
812 ty = _mm256_mul_pd(fscal,dy02);
813 tz = _mm256_mul_pd(fscal,dz02);
815 /* Update vectorial force */
816 fix0 = _mm256_add_pd(fix0,tx);
817 fiy0 = _mm256_add_pd(fiy0,ty);
818 fiz0 = _mm256_add_pd(fiz0,tz);
820 fjx2 = _mm256_add_pd(fjx2,tx);
821 fjy2 = _mm256_add_pd(fjy2,ty);
822 fjz2 = _mm256_add_pd(fjz2,tz);
824 /**************************
825 * CALCULATE INTERACTIONS *
826 **************************/
828 /* COULOMB ELECTROSTATICS */
829 velec = _mm256_mul_pd(qq10,rinv10);
830 felec = _mm256_mul_pd(velec,rinvsq10);
832 /* Update potential sum for this i atom from the interaction with this j atom. */
833 velec = _mm256_andnot_pd(dummy_mask,velec);
834 velecsum = _mm256_add_pd(velecsum,velec);
838 fscal = _mm256_andnot_pd(dummy_mask,fscal);
840 /* Calculate temporary vectorial force */
841 tx = _mm256_mul_pd(fscal,dx10);
842 ty = _mm256_mul_pd(fscal,dy10);
843 tz = _mm256_mul_pd(fscal,dz10);
845 /* Update vectorial force */
846 fix1 = _mm256_add_pd(fix1,tx);
847 fiy1 = _mm256_add_pd(fiy1,ty);
848 fiz1 = _mm256_add_pd(fiz1,tz);
850 fjx0 = _mm256_add_pd(fjx0,tx);
851 fjy0 = _mm256_add_pd(fjy0,ty);
852 fjz0 = _mm256_add_pd(fjz0,tz);
854 /**************************
855 * CALCULATE INTERACTIONS *
856 **************************/
858 /* COULOMB ELECTROSTATICS */
859 velec = _mm256_mul_pd(qq11,rinv11);
860 felec = _mm256_mul_pd(velec,rinvsq11);
862 /* Update potential sum for this i atom from the interaction with this j atom. */
863 velec = _mm256_andnot_pd(dummy_mask,velec);
864 velecsum = _mm256_add_pd(velecsum,velec);
868 fscal = _mm256_andnot_pd(dummy_mask,fscal);
870 /* Calculate temporary vectorial force */
871 tx = _mm256_mul_pd(fscal,dx11);
872 ty = _mm256_mul_pd(fscal,dy11);
873 tz = _mm256_mul_pd(fscal,dz11);
875 /* Update vectorial force */
876 fix1 = _mm256_add_pd(fix1,tx);
877 fiy1 = _mm256_add_pd(fiy1,ty);
878 fiz1 = _mm256_add_pd(fiz1,tz);
880 fjx1 = _mm256_add_pd(fjx1,tx);
881 fjy1 = _mm256_add_pd(fjy1,ty);
882 fjz1 = _mm256_add_pd(fjz1,tz);
884 /**************************
885 * CALCULATE INTERACTIONS *
886 **************************/
888 /* COULOMB ELECTROSTATICS */
889 velec = _mm256_mul_pd(qq12,rinv12);
890 felec = _mm256_mul_pd(velec,rinvsq12);
892 /* Update potential sum for this i atom from the interaction with this j atom. */
893 velec = _mm256_andnot_pd(dummy_mask,velec);
894 velecsum = _mm256_add_pd(velecsum,velec);
898 fscal = _mm256_andnot_pd(dummy_mask,fscal);
900 /* Calculate temporary vectorial force */
901 tx = _mm256_mul_pd(fscal,dx12);
902 ty = _mm256_mul_pd(fscal,dy12);
903 tz = _mm256_mul_pd(fscal,dz12);
905 /* Update vectorial force */
906 fix1 = _mm256_add_pd(fix1,tx);
907 fiy1 = _mm256_add_pd(fiy1,ty);
908 fiz1 = _mm256_add_pd(fiz1,tz);
910 fjx2 = _mm256_add_pd(fjx2,tx);
911 fjy2 = _mm256_add_pd(fjy2,ty);
912 fjz2 = _mm256_add_pd(fjz2,tz);
914 /**************************
915 * CALCULATE INTERACTIONS *
916 **************************/
918 /* COULOMB ELECTROSTATICS */
919 velec = _mm256_mul_pd(qq20,rinv20);
920 felec = _mm256_mul_pd(velec,rinvsq20);
922 /* Update potential sum for this i atom from the interaction with this j atom. */
923 velec = _mm256_andnot_pd(dummy_mask,velec);
924 velecsum = _mm256_add_pd(velecsum,velec);
928 fscal = _mm256_andnot_pd(dummy_mask,fscal);
930 /* Calculate temporary vectorial force */
931 tx = _mm256_mul_pd(fscal,dx20);
932 ty = _mm256_mul_pd(fscal,dy20);
933 tz = _mm256_mul_pd(fscal,dz20);
935 /* Update vectorial force */
936 fix2 = _mm256_add_pd(fix2,tx);
937 fiy2 = _mm256_add_pd(fiy2,ty);
938 fiz2 = _mm256_add_pd(fiz2,tz);
940 fjx0 = _mm256_add_pd(fjx0,tx);
941 fjy0 = _mm256_add_pd(fjy0,ty);
942 fjz0 = _mm256_add_pd(fjz0,tz);
944 /**************************
945 * CALCULATE INTERACTIONS *
946 **************************/
948 /* COULOMB ELECTROSTATICS */
949 velec = _mm256_mul_pd(qq21,rinv21);
950 felec = _mm256_mul_pd(velec,rinvsq21);
952 /* Update potential sum for this i atom from the interaction with this j atom. */
953 velec = _mm256_andnot_pd(dummy_mask,velec);
954 velecsum = _mm256_add_pd(velecsum,velec);
958 fscal = _mm256_andnot_pd(dummy_mask,fscal);
960 /* Calculate temporary vectorial force */
961 tx = _mm256_mul_pd(fscal,dx21);
962 ty = _mm256_mul_pd(fscal,dy21);
963 tz = _mm256_mul_pd(fscal,dz21);
965 /* Update vectorial force */
966 fix2 = _mm256_add_pd(fix2,tx);
967 fiy2 = _mm256_add_pd(fiy2,ty);
968 fiz2 = _mm256_add_pd(fiz2,tz);
970 fjx1 = _mm256_add_pd(fjx1,tx);
971 fjy1 = _mm256_add_pd(fjy1,ty);
972 fjz1 = _mm256_add_pd(fjz1,tz);
974 /**************************
975 * CALCULATE INTERACTIONS *
976 **************************/
978 /* COULOMB ELECTROSTATICS */
979 velec = _mm256_mul_pd(qq22,rinv22);
980 felec = _mm256_mul_pd(velec,rinvsq22);
982 /* Update potential sum for this i atom from the interaction with this j atom. */
983 velec = _mm256_andnot_pd(dummy_mask,velec);
984 velecsum = _mm256_add_pd(velecsum,velec);
988 fscal = _mm256_andnot_pd(dummy_mask,fscal);
990 /* Calculate temporary vectorial force */
991 tx = _mm256_mul_pd(fscal,dx22);
992 ty = _mm256_mul_pd(fscal,dy22);
993 tz = _mm256_mul_pd(fscal,dz22);
995 /* Update vectorial force */
996 fix2 = _mm256_add_pd(fix2,tx);
997 fiy2 = _mm256_add_pd(fiy2,ty);
998 fiz2 = _mm256_add_pd(fiz2,tz);
1000 fjx2 = _mm256_add_pd(fjx2,tx);
1001 fjy2 = _mm256_add_pd(fjy2,ty);
1002 fjz2 = _mm256_add_pd(fjz2,tz);
1004 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1005 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1006 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1007 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1009 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1010 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1012 /* Inner loop uses 279 flops */
1015 /* End of innermost loop */
1017 gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1018 f+i_coord_offset,fshift+i_shift_offset);
1021 /* Update potential energies */
1022 gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1023 gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
1025 /* Increment number of inner iterations */
1026 inneriter += j_index_end - j_index_start;
1028 /* Outer loop uses 20 flops */
1031 /* Increment number of outer iterations */
1034 /* Update outer/inner flops */
1036 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*279);
1039 * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_F_avx_256_double
1040 * Electrostatics interaction: Coulomb
1041 * VdW interaction: CubicSplineTable
1042 * Geometry: Water3-Water3
1043 * Calculate force/pot: Force
1046 nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_F_avx_256_double
1047 (t_nblist * gmx_restrict nlist,
1048 rvec * gmx_restrict xx,
1049 rvec * gmx_restrict ff,
1050 struct t_forcerec * gmx_restrict fr,
1051 t_mdatoms * gmx_restrict mdatoms,
1052 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1053 t_nrnb * gmx_restrict nrnb)
1055 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
1056 * just 0 for non-waters.
1057 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1058 * jnr indices corresponding to data put in the four positions in the SIMD register.
1060 int i_shift_offset,i_coord_offset,outeriter,inneriter;
1061 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1062 int jnrA,jnrB,jnrC,jnrD;
1063 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1064 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1065 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1066 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
1067 real rcutoff_scalar;
1068 real *shiftvec,*fshift,*x,*f;
1069 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1070 real scratch[4*DIM];
1071 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1072 real * vdwioffsetptr0;
1073 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1074 real * vdwioffsetptr1;
1075 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1076 real * vdwioffsetptr2;
1077 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1078 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
1079 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1080 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1081 __m256d jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1082 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1083 __m256d jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1084 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1085 __m256d dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1086 __m256d dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1087 __m256d dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1088 __m256d dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1089 __m256d dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1090 __m256d dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1091 __m256d dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1092 __m256d dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1093 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
1096 __m256d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1099 __m256d one_sixth = _mm256_set1_pd(1.0/6.0);
1100 __m256d one_twelfth = _mm256_set1_pd(1.0/12.0);
1102 __m128i ifour = _mm_set1_epi32(4);
1103 __m256d rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
1105 __m256d dummy_mask,cutoff_mask;
1106 __m128 tmpmask0,tmpmask1;
1107 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1108 __m256d one = _mm256_set1_pd(1.0);
1109 __m256d two = _mm256_set1_pd(2.0);
1115 jindex = nlist->jindex;
1117 shiftidx = nlist->shift;
1119 shiftvec = fr->shift_vec[0];
1120 fshift = fr->fshift[0];
1121 facel = _mm256_set1_pd(fr->ic->epsfac);
1122 charge = mdatoms->chargeA;
1123 nvdwtype = fr->ntype;
1124 vdwparam = fr->nbfp;
1125 vdwtype = mdatoms->typeA;
1127 vftab = kernel_data->table_vdw->data;
1128 vftabscale = _mm256_set1_pd(kernel_data->table_vdw->scale);
1130 /* Setup water-specific parameters */
1131 inr = nlist->iinr[0];
1132 iq0 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
1133 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1134 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1135 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
1137 jq0 = _mm256_set1_pd(charge[inr+0]);
1138 jq1 = _mm256_set1_pd(charge[inr+1]);
1139 jq2 = _mm256_set1_pd(charge[inr+2]);
1140 vdwjidx0A = 2*vdwtype[inr+0];
1141 qq00 = _mm256_mul_pd(iq0,jq0);
1142 c6_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
1143 c12_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
1144 qq01 = _mm256_mul_pd(iq0,jq1);
1145 qq02 = _mm256_mul_pd(iq0,jq2);
1146 qq10 = _mm256_mul_pd(iq1,jq0);
1147 qq11 = _mm256_mul_pd(iq1,jq1);
1148 qq12 = _mm256_mul_pd(iq1,jq2);
1149 qq20 = _mm256_mul_pd(iq2,jq0);
1150 qq21 = _mm256_mul_pd(iq2,jq1);
1151 qq22 = _mm256_mul_pd(iq2,jq2);
1153 /* Avoid stupid compiler warnings */
1154 jnrA = jnrB = jnrC = jnrD = 0;
1155 j_coord_offsetA = 0;
1156 j_coord_offsetB = 0;
1157 j_coord_offsetC = 0;
1158 j_coord_offsetD = 0;
1163 for(iidx=0;iidx<4*DIM;iidx++)
1165 scratch[iidx] = 0.0;
1168 /* Start outer loop over neighborlists */
1169 for(iidx=0; iidx<nri; iidx++)
1171 /* Load shift vector for this list */
1172 i_shift_offset = DIM*shiftidx[iidx];
1174 /* Load limits for loop over neighbors */
1175 j_index_start = jindex[iidx];
1176 j_index_end = jindex[iidx+1];
1178 /* Get outer coordinate index */
1180 i_coord_offset = DIM*inr;
1182 /* Load i particle coords and add shift vector */
1183 gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1184 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1186 fix0 = _mm256_setzero_pd();
1187 fiy0 = _mm256_setzero_pd();
1188 fiz0 = _mm256_setzero_pd();
1189 fix1 = _mm256_setzero_pd();
1190 fiy1 = _mm256_setzero_pd();
1191 fiz1 = _mm256_setzero_pd();
1192 fix2 = _mm256_setzero_pd();
1193 fiy2 = _mm256_setzero_pd();
1194 fiz2 = _mm256_setzero_pd();
1196 /* Start inner kernel loop */
1197 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1200 /* Get j neighbor index, and coordinate index */
1202 jnrB = jjnr[jidx+1];
1203 jnrC = jjnr[jidx+2];
1204 jnrD = jjnr[jidx+3];
1205 j_coord_offsetA = DIM*jnrA;
1206 j_coord_offsetB = DIM*jnrB;
1207 j_coord_offsetC = DIM*jnrC;
1208 j_coord_offsetD = DIM*jnrD;
1210 /* load j atom coordinates */
1211 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1212 x+j_coord_offsetC,x+j_coord_offsetD,
1213 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1215 /* Calculate displacement vector */
1216 dx00 = _mm256_sub_pd(ix0,jx0);
1217 dy00 = _mm256_sub_pd(iy0,jy0);
1218 dz00 = _mm256_sub_pd(iz0,jz0);
1219 dx01 = _mm256_sub_pd(ix0,jx1);
1220 dy01 = _mm256_sub_pd(iy0,jy1);
1221 dz01 = _mm256_sub_pd(iz0,jz1);
1222 dx02 = _mm256_sub_pd(ix0,jx2);
1223 dy02 = _mm256_sub_pd(iy0,jy2);
1224 dz02 = _mm256_sub_pd(iz0,jz2);
1225 dx10 = _mm256_sub_pd(ix1,jx0);
1226 dy10 = _mm256_sub_pd(iy1,jy0);
1227 dz10 = _mm256_sub_pd(iz1,jz0);
1228 dx11 = _mm256_sub_pd(ix1,jx1);
1229 dy11 = _mm256_sub_pd(iy1,jy1);
1230 dz11 = _mm256_sub_pd(iz1,jz1);
1231 dx12 = _mm256_sub_pd(ix1,jx2);
1232 dy12 = _mm256_sub_pd(iy1,jy2);
1233 dz12 = _mm256_sub_pd(iz1,jz2);
1234 dx20 = _mm256_sub_pd(ix2,jx0);
1235 dy20 = _mm256_sub_pd(iy2,jy0);
1236 dz20 = _mm256_sub_pd(iz2,jz0);
1237 dx21 = _mm256_sub_pd(ix2,jx1);
1238 dy21 = _mm256_sub_pd(iy2,jy1);
1239 dz21 = _mm256_sub_pd(iz2,jz1);
1240 dx22 = _mm256_sub_pd(ix2,jx2);
1241 dy22 = _mm256_sub_pd(iy2,jy2);
1242 dz22 = _mm256_sub_pd(iz2,jz2);
1244 /* Calculate squared distance and things based on it */
1245 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1246 rsq01 = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1247 rsq02 = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1248 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1249 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1250 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1251 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1252 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1253 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1255 rinv00 = avx256_invsqrt_d(rsq00);
1256 rinv01 = avx256_invsqrt_d(rsq01);
1257 rinv02 = avx256_invsqrt_d(rsq02);
1258 rinv10 = avx256_invsqrt_d(rsq10);
1259 rinv11 = avx256_invsqrt_d(rsq11);
1260 rinv12 = avx256_invsqrt_d(rsq12);
1261 rinv20 = avx256_invsqrt_d(rsq20);
1262 rinv21 = avx256_invsqrt_d(rsq21);
1263 rinv22 = avx256_invsqrt_d(rsq22);
1265 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
1266 rinvsq01 = _mm256_mul_pd(rinv01,rinv01);
1267 rinvsq02 = _mm256_mul_pd(rinv02,rinv02);
1268 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
1269 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
1270 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
1271 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
1272 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
1273 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
1275 fjx0 = _mm256_setzero_pd();
1276 fjy0 = _mm256_setzero_pd();
1277 fjz0 = _mm256_setzero_pd();
1278 fjx1 = _mm256_setzero_pd();
1279 fjy1 = _mm256_setzero_pd();
1280 fjz1 = _mm256_setzero_pd();
1281 fjx2 = _mm256_setzero_pd();
1282 fjy2 = _mm256_setzero_pd();
1283 fjz2 = _mm256_setzero_pd();
1285 /**************************
1286 * CALCULATE INTERACTIONS *
1287 **************************/
1289 r00 = _mm256_mul_pd(rsq00,rinv00);
1291 /* Calculate table index by multiplying r with table scale and truncate to integer */
1292 rt = _mm256_mul_pd(r00,vftabscale);
1293 vfitab = _mm256_cvttpd_epi32(rt);
1294 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1295 vfitab = _mm_slli_epi32(vfitab,3);
1297 /* COULOMB ELECTROSTATICS */
1298 velec = _mm256_mul_pd(qq00,rinv00);
1299 felec = _mm256_mul_pd(velec,rinvsq00);
1301 /* CUBIC SPLINE TABLE DISPERSION */
1302 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1303 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1304 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1305 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1306 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1307 Heps = _mm256_mul_pd(vfeps,H);
1308 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1309 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1310 fvdw6 = _mm256_mul_pd(c6_00,FF);
1312 /* CUBIC SPLINE TABLE REPULSION */
1313 vfitab = _mm_add_epi32(vfitab,ifour);
1314 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1315 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1316 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1317 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1318 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1319 Heps = _mm256_mul_pd(vfeps,H);
1320 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1321 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1322 fvdw12 = _mm256_mul_pd(c12_00,FF);
1323 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1325 fscal = _mm256_add_pd(felec,fvdw);
1327 /* Calculate temporary vectorial force */
1328 tx = _mm256_mul_pd(fscal,dx00);
1329 ty = _mm256_mul_pd(fscal,dy00);
1330 tz = _mm256_mul_pd(fscal,dz00);
1332 /* Update vectorial force */
1333 fix0 = _mm256_add_pd(fix0,tx);
1334 fiy0 = _mm256_add_pd(fiy0,ty);
1335 fiz0 = _mm256_add_pd(fiz0,tz);
1337 fjx0 = _mm256_add_pd(fjx0,tx);
1338 fjy0 = _mm256_add_pd(fjy0,ty);
1339 fjz0 = _mm256_add_pd(fjz0,tz);
1341 /**************************
1342 * CALCULATE INTERACTIONS *
1343 **************************/
1345 /* COULOMB ELECTROSTATICS */
1346 velec = _mm256_mul_pd(qq01,rinv01);
1347 felec = _mm256_mul_pd(velec,rinvsq01);
1351 /* Calculate temporary vectorial force */
1352 tx = _mm256_mul_pd(fscal,dx01);
1353 ty = _mm256_mul_pd(fscal,dy01);
1354 tz = _mm256_mul_pd(fscal,dz01);
1356 /* Update vectorial force */
1357 fix0 = _mm256_add_pd(fix0,tx);
1358 fiy0 = _mm256_add_pd(fiy0,ty);
1359 fiz0 = _mm256_add_pd(fiz0,tz);
1361 fjx1 = _mm256_add_pd(fjx1,tx);
1362 fjy1 = _mm256_add_pd(fjy1,ty);
1363 fjz1 = _mm256_add_pd(fjz1,tz);
1365 /**************************
1366 * CALCULATE INTERACTIONS *
1367 **************************/
1369 /* COULOMB ELECTROSTATICS */
1370 velec = _mm256_mul_pd(qq02,rinv02);
1371 felec = _mm256_mul_pd(velec,rinvsq02);
1375 /* Calculate temporary vectorial force */
1376 tx = _mm256_mul_pd(fscal,dx02);
1377 ty = _mm256_mul_pd(fscal,dy02);
1378 tz = _mm256_mul_pd(fscal,dz02);
1380 /* Update vectorial force */
1381 fix0 = _mm256_add_pd(fix0,tx);
1382 fiy0 = _mm256_add_pd(fiy0,ty);
1383 fiz0 = _mm256_add_pd(fiz0,tz);
1385 fjx2 = _mm256_add_pd(fjx2,tx);
1386 fjy2 = _mm256_add_pd(fjy2,ty);
1387 fjz2 = _mm256_add_pd(fjz2,tz);
1389 /**************************
1390 * CALCULATE INTERACTIONS *
1391 **************************/
1393 /* COULOMB ELECTROSTATICS */
1394 velec = _mm256_mul_pd(qq10,rinv10);
1395 felec = _mm256_mul_pd(velec,rinvsq10);
1399 /* Calculate temporary vectorial force */
1400 tx = _mm256_mul_pd(fscal,dx10);
1401 ty = _mm256_mul_pd(fscal,dy10);
1402 tz = _mm256_mul_pd(fscal,dz10);
1404 /* Update vectorial force */
1405 fix1 = _mm256_add_pd(fix1,tx);
1406 fiy1 = _mm256_add_pd(fiy1,ty);
1407 fiz1 = _mm256_add_pd(fiz1,tz);
1409 fjx0 = _mm256_add_pd(fjx0,tx);
1410 fjy0 = _mm256_add_pd(fjy0,ty);
1411 fjz0 = _mm256_add_pd(fjz0,tz);
1413 /**************************
1414 * CALCULATE INTERACTIONS *
1415 **************************/
1417 /* COULOMB ELECTROSTATICS */
1418 velec = _mm256_mul_pd(qq11,rinv11);
1419 felec = _mm256_mul_pd(velec,rinvsq11);
1423 /* Calculate temporary vectorial force */
1424 tx = _mm256_mul_pd(fscal,dx11);
1425 ty = _mm256_mul_pd(fscal,dy11);
1426 tz = _mm256_mul_pd(fscal,dz11);
1428 /* Update vectorial force */
1429 fix1 = _mm256_add_pd(fix1,tx);
1430 fiy1 = _mm256_add_pd(fiy1,ty);
1431 fiz1 = _mm256_add_pd(fiz1,tz);
1433 fjx1 = _mm256_add_pd(fjx1,tx);
1434 fjy1 = _mm256_add_pd(fjy1,ty);
1435 fjz1 = _mm256_add_pd(fjz1,tz);
1437 /**************************
1438 * CALCULATE INTERACTIONS *
1439 **************************/
1441 /* COULOMB ELECTROSTATICS */
1442 velec = _mm256_mul_pd(qq12,rinv12);
1443 felec = _mm256_mul_pd(velec,rinvsq12);
1447 /* Calculate temporary vectorial force */
1448 tx = _mm256_mul_pd(fscal,dx12);
1449 ty = _mm256_mul_pd(fscal,dy12);
1450 tz = _mm256_mul_pd(fscal,dz12);
1452 /* Update vectorial force */
1453 fix1 = _mm256_add_pd(fix1,tx);
1454 fiy1 = _mm256_add_pd(fiy1,ty);
1455 fiz1 = _mm256_add_pd(fiz1,tz);
1457 fjx2 = _mm256_add_pd(fjx2,tx);
1458 fjy2 = _mm256_add_pd(fjy2,ty);
1459 fjz2 = _mm256_add_pd(fjz2,tz);
1461 /**************************
1462 * CALCULATE INTERACTIONS *
1463 **************************/
1465 /* COULOMB ELECTROSTATICS */
1466 velec = _mm256_mul_pd(qq20,rinv20);
1467 felec = _mm256_mul_pd(velec,rinvsq20);
1471 /* Calculate temporary vectorial force */
1472 tx = _mm256_mul_pd(fscal,dx20);
1473 ty = _mm256_mul_pd(fscal,dy20);
1474 tz = _mm256_mul_pd(fscal,dz20);
1476 /* Update vectorial force */
1477 fix2 = _mm256_add_pd(fix2,tx);
1478 fiy2 = _mm256_add_pd(fiy2,ty);
1479 fiz2 = _mm256_add_pd(fiz2,tz);
1481 fjx0 = _mm256_add_pd(fjx0,tx);
1482 fjy0 = _mm256_add_pd(fjy0,ty);
1483 fjz0 = _mm256_add_pd(fjz0,tz);
1485 /**************************
1486 * CALCULATE INTERACTIONS *
1487 **************************/
1489 /* COULOMB ELECTROSTATICS */
1490 velec = _mm256_mul_pd(qq21,rinv21);
1491 felec = _mm256_mul_pd(velec,rinvsq21);
1495 /* Calculate temporary vectorial force */
1496 tx = _mm256_mul_pd(fscal,dx21);
1497 ty = _mm256_mul_pd(fscal,dy21);
1498 tz = _mm256_mul_pd(fscal,dz21);
1500 /* Update vectorial force */
1501 fix2 = _mm256_add_pd(fix2,tx);
1502 fiy2 = _mm256_add_pd(fiy2,ty);
1503 fiz2 = _mm256_add_pd(fiz2,tz);
1505 fjx1 = _mm256_add_pd(fjx1,tx);
1506 fjy1 = _mm256_add_pd(fjy1,ty);
1507 fjz1 = _mm256_add_pd(fjz1,tz);
1509 /**************************
1510 * CALCULATE INTERACTIONS *
1511 **************************/
1513 /* COULOMB ELECTROSTATICS */
1514 velec = _mm256_mul_pd(qq22,rinv22);
1515 felec = _mm256_mul_pd(velec,rinvsq22);
1519 /* Calculate temporary vectorial force */
1520 tx = _mm256_mul_pd(fscal,dx22);
1521 ty = _mm256_mul_pd(fscal,dy22);
1522 tz = _mm256_mul_pd(fscal,dz22);
1524 /* Update vectorial force */
1525 fix2 = _mm256_add_pd(fix2,tx);
1526 fiy2 = _mm256_add_pd(fiy2,ty);
1527 fiz2 = _mm256_add_pd(fiz2,tz);
1529 fjx2 = _mm256_add_pd(fjx2,tx);
1530 fjy2 = _mm256_add_pd(fjy2,ty);
1531 fjz2 = _mm256_add_pd(fjz2,tz);
1533 fjptrA = f+j_coord_offsetA;
1534 fjptrB = f+j_coord_offsetB;
1535 fjptrC = f+j_coord_offsetC;
1536 fjptrD = f+j_coord_offsetD;
1538 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1539 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1541 /* Inner loop uses 261 flops */
1544 if(jidx<j_index_end)
1547 /* Get j neighbor index, and coordinate index */
1548 jnrlistA = jjnr[jidx];
1549 jnrlistB = jjnr[jidx+1];
1550 jnrlistC = jjnr[jidx+2];
1551 jnrlistD = jjnr[jidx+3];
1552 /* Sign of each element will be negative for non-real atoms.
1553 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1554 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
1556 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
1558 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
1559 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
1560 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
1562 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
1563 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
1564 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
1565 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
1566 j_coord_offsetA = DIM*jnrA;
1567 j_coord_offsetB = DIM*jnrB;
1568 j_coord_offsetC = DIM*jnrC;
1569 j_coord_offsetD = DIM*jnrD;
1571 /* load j atom coordinates */
1572 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1573 x+j_coord_offsetC,x+j_coord_offsetD,
1574 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1576 /* Calculate displacement vector */
1577 dx00 = _mm256_sub_pd(ix0,jx0);
1578 dy00 = _mm256_sub_pd(iy0,jy0);
1579 dz00 = _mm256_sub_pd(iz0,jz0);
1580 dx01 = _mm256_sub_pd(ix0,jx1);
1581 dy01 = _mm256_sub_pd(iy0,jy1);
1582 dz01 = _mm256_sub_pd(iz0,jz1);
1583 dx02 = _mm256_sub_pd(ix0,jx2);
1584 dy02 = _mm256_sub_pd(iy0,jy2);
1585 dz02 = _mm256_sub_pd(iz0,jz2);
1586 dx10 = _mm256_sub_pd(ix1,jx0);
1587 dy10 = _mm256_sub_pd(iy1,jy0);
1588 dz10 = _mm256_sub_pd(iz1,jz0);
1589 dx11 = _mm256_sub_pd(ix1,jx1);
1590 dy11 = _mm256_sub_pd(iy1,jy1);
1591 dz11 = _mm256_sub_pd(iz1,jz1);
1592 dx12 = _mm256_sub_pd(ix1,jx2);
1593 dy12 = _mm256_sub_pd(iy1,jy2);
1594 dz12 = _mm256_sub_pd(iz1,jz2);
1595 dx20 = _mm256_sub_pd(ix2,jx0);
1596 dy20 = _mm256_sub_pd(iy2,jy0);
1597 dz20 = _mm256_sub_pd(iz2,jz0);
1598 dx21 = _mm256_sub_pd(ix2,jx1);
1599 dy21 = _mm256_sub_pd(iy2,jy1);
1600 dz21 = _mm256_sub_pd(iz2,jz1);
1601 dx22 = _mm256_sub_pd(ix2,jx2);
1602 dy22 = _mm256_sub_pd(iy2,jy2);
1603 dz22 = _mm256_sub_pd(iz2,jz2);
1605 /* Calculate squared distance and things based on it */
1606 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1607 rsq01 = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1608 rsq02 = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1609 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1610 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1611 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1612 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1613 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1614 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1616 rinv00 = avx256_invsqrt_d(rsq00);
1617 rinv01 = avx256_invsqrt_d(rsq01);
1618 rinv02 = avx256_invsqrt_d(rsq02);
1619 rinv10 = avx256_invsqrt_d(rsq10);
1620 rinv11 = avx256_invsqrt_d(rsq11);
1621 rinv12 = avx256_invsqrt_d(rsq12);
1622 rinv20 = avx256_invsqrt_d(rsq20);
1623 rinv21 = avx256_invsqrt_d(rsq21);
1624 rinv22 = avx256_invsqrt_d(rsq22);
1626 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
1627 rinvsq01 = _mm256_mul_pd(rinv01,rinv01);
1628 rinvsq02 = _mm256_mul_pd(rinv02,rinv02);
1629 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
1630 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
1631 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
1632 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
1633 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
1634 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
1636 fjx0 = _mm256_setzero_pd();
1637 fjy0 = _mm256_setzero_pd();
1638 fjz0 = _mm256_setzero_pd();
1639 fjx1 = _mm256_setzero_pd();
1640 fjy1 = _mm256_setzero_pd();
1641 fjz1 = _mm256_setzero_pd();
1642 fjx2 = _mm256_setzero_pd();
1643 fjy2 = _mm256_setzero_pd();
1644 fjz2 = _mm256_setzero_pd();
1646 /**************************
1647 * CALCULATE INTERACTIONS *
1648 **************************/
1650 r00 = _mm256_mul_pd(rsq00,rinv00);
1651 r00 = _mm256_andnot_pd(dummy_mask,r00);
1653 /* Calculate table index by multiplying r with table scale and truncate to integer */
1654 rt = _mm256_mul_pd(r00,vftabscale);
1655 vfitab = _mm256_cvttpd_epi32(rt);
1656 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1657 vfitab = _mm_slli_epi32(vfitab,3);
1659 /* COULOMB ELECTROSTATICS */
1660 velec = _mm256_mul_pd(qq00,rinv00);
1661 felec = _mm256_mul_pd(velec,rinvsq00);
1663 /* CUBIC SPLINE TABLE DISPERSION */
1664 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1665 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1666 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1667 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1668 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1669 Heps = _mm256_mul_pd(vfeps,H);
1670 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1671 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1672 fvdw6 = _mm256_mul_pd(c6_00,FF);
1674 /* CUBIC SPLINE TABLE REPULSION */
1675 vfitab = _mm_add_epi32(vfitab,ifour);
1676 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1677 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1678 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1679 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1680 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1681 Heps = _mm256_mul_pd(vfeps,H);
1682 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1683 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1684 fvdw12 = _mm256_mul_pd(c12_00,FF);
1685 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1687 fscal = _mm256_add_pd(felec,fvdw);
1689 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1691 /* Calculate temporary vectorial force */
1692 tx = _mm256_mul_pd(fscal,dx00);
1693 ty = _mm256_mul_pd(fscal,dy00);
1694 tz = _mm256_mul_pd(fscal,dz00);
1696 /* Update vectorial force */
1697 fix0 = _mm256_add_pd(fix0,tx);
1698 fiy0 = _mm256_add_pd(fiy0,ty);
1699 fiz0 = _mm256_add_pd(fiz0,tz);
1701 fjx0 = _mm256_add_pd(fjx0,tx);
1702 fjy0 = _mm256_add_pd(fjy0,ty);
1703 fjz0 = _mm256_add_pd(fjz0,tz);
1705 /**************************
1706 * CALCULATE INTERACTIONS *
1707 **************************/
1709 /* COULOMB ELECTROSTATICS */
1710 velec = _mm256_mul_pd(qq01,rinv01);
1711 felec = _mm256_mul_pd(velec,rinvsq01);
1715 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1717 /* Calculate temporary vectorial force */
1718 tx = _mm256_mul_pd(fscal,dx01);
1719 ty = _mm256_mul_pd(fscal,dy01);
1720 tz = _mm256_mul_pd(fscal,dz01);
1722 /* Update vectorial force */
1723 fix0 = _mm256_add_pd(fix0,tx);
1724 fiy0 = _mm256_add_pd(fiy0,ty);
1725 fiz0 = _mm256_add_pd(fiz0,tz);
1727 fjx1 = _mm256_add_pd(fjx1,tx);
1728 fjy1 = _mm256_add_pd(fjy1,ty);
1729 fjz1 = _mm256_add_pd(fjz1,tz);
1731 /**************************
1732 * CALCULATE INTERACTIONS *
1733 **************************/
1735 /* COULOMB ELECTROSTATICS */
1736 velec = _mm256_mul_pd(qq02,rinv02);
1737 felec = _mm256_mul_pd(velec,rinvsq02);
1741 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1743 /* Calculate temporary vectorial force */
1744 tx = _mm256_mul_pd(fscal,dx02);
1745 ty = _mm256_mul_pd(fscal,dy02);
1746 tz = _mm256_mul_pd(fscal,dz02);
1748 /* Update vectorial force */
1749 fix0 = _mm256_add_pd(fix0,tx);
1750 fiy0 = _mm256_add_pd(fiy0,ty);
1751 fiz0 = _mm256_add_pd(fiz0,tz);
1753 fjx2 = _mm256_add_pd(fjx2,tx);
1754 fjy2 = _mm256_add_pd(fjy2,ty);
1755 fjz2 = _mm256_add_pd(fjz2,tz);
1757 /**************************
1758 * CALCULATE INTERACTIONS *
1759 **************************/
1761 /* COULOMB ELECTROSTATICS */
1762 velec = _mm256_mul_pd(qq10,rinv10);
1763 felec = _mm256_mul_pd(velec,rinvsq10);
1767 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1769 /* Calculate temporary vectorial force */
1770 tx = _mm256_mul_pd(fscal,dx10);
1771 ty = _mm256_mul_pd(fscal,dy10);
1772 tz = _mm256_mul_pd(fscal,dz10);
1774 /* Update vectorial force */
1775 fix1 = _mm256_add_pd(fix1,tx);
1776 fiy1 = _mm256_add_pd(fiy1,ty);
1777 fiz1 = _mm256_add_pd(fiz1,tz);
1779 fjx0 = _mm256_add_pd(fjx0,tx);
1780 fjy0 = _mm256_add_pd(fjy0,ty);
1781 fjz0 = _mm256_add_pd(fjz0,tz);
1783 /**************************
1784 * CALCULATE INTERACTIONS *
1785 **************************/
1787 /* COULOMB ELECTROSTATICS */
1788 velec = _mm256_mul_pd(qq11,rinv11);
1789 felec = _mm256_mul_pd(velec,rinvsq11);
1793 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1795 /* Calculate temporary vectorial force */
1796 tx = _mm256_mul_pd(fscal,dx11);
1797 ty = _mm256_mul_pd(fscal,dy11);
1798 tz = _mm256_mul_pd(fscal,dz11);
1800 /* Update vectorial force */
1801 fix1 = _mm256_add_pd(fix1,tx);
1802 fiy1 = _mm256_add_pd(fiy1,ty);
1803 fiz1 = _mm256_add_pd(fiz1,tz);
1805 fjx1 = _mm256_add_pd(fjx1,tx);
1806 fjy1 = _mm256_add_pd(fjy1,ty);
1807 fjz1 = _mm256_add_pd(fjz1,tz);
1809 /**************************
1810 * CALCULATE INTERACTIONS *
1811 **************************/
1813 /* COULOMB ELECTROSTATICS */
1814 velec = _mm256_mul_pd(qq12,rinv12);
1815 felec = _mm256_mul_pd(velec,rinvsq12);
1819 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1821 /* Calculate temporary vectorial force */
1822 tx = _mm256_mul_pd(fscal,dx12);
1823 ty = _mm256_mul_pd(fscal,dy12);
1824 tz = _mm256_mul_pd(fscal,dz12);
1826 /* Update vectorial force */
1827 fix1 = _mm256_add_pd(fix1,tx);
1828 fiy1 = _mm256_add_pd(fiy1,ty);
1829 fiz1 = _mm256_add_pd(fiz1,tz);
1831 fjx2 = _mm256_add_pd(fjx2,tx);
1832 fjy2 = _mm256_add_pd(fjy2,ty);
1833 fjz2 = _mm256_add_pd(fjz2,tz);
1835 /**************************
1836 * CALCULATE INTERACTIONS *
1837 **************************/
1839 /* COULOMB ELECTROSTATICS */
1840 velec = _mm256_mul_pd(qq20,rinv20);
1841 felec = _mm256_mul_pd(velec,rinvsq20);
1845 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1847 /* Calculate temporary vectorial force */
1848 tx = _mm256_mul_pd(fscal,dx20);
1849 ty = _mm256_mul_pd(fscal,dy20);
1850 tz = _mm256_mul_pd(fscal,dz20);
1852 /* Update vectorial force */
1853 fix2 = _mm256_add_pd(fix2,tx);
1854 fiy2 = _mm256_add_pd(fiy2,ty);
1855 fiz2 = _mm256_add_pd(fiz2,tz);
1857 fjx0 = _mm256_add_pd(fjx0,tx);
1858 fjy0 = _mm256_add_pd(fjy0,ty);
1859 fjz0 = _mm256_add_pd(fjz0,tz);
1861 /**************************
1862 * CALCULATE INTERACTIONS *
1863 **************************/
1865 /* COULOMB ELECTROSTATICS */
1866 velec = _mm256_mul_pd(qq21,rinv21);
1867 felec = _mm256_mul_pd(velec,rinvsq21);
1871 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1873 /* Calculate temporary vectorial force */
1874 tx = _mm256_mul_pd(fscal,dx21);
1875 ty = _mm256_mul_pd(fscal,dy21);
1876 tz = _mm256_mul_pd(fscal,dz21);
1878 /* Update vectorial force */
1879 fix2 = _mm256_add_pd(fix2,tx);
1880 fiy2 = _mm256_add_pd(fiy2,ty);
1881 fiz2 = _mm256_add_pd(fiz2,tz);
1883 fjx1 = _mm256_add_pd(fjx1,tx);
1884 fjy1 = _mm256_add_pd(fjy1,ty);
1885 fjz1 = _mm256_add_pd(fjz1,tz);
1887 /**************************
1888 * CALCULATE INTERACTIONS *
1889 **************************/
1891 /* COULOMB ELECTROSTATICS */
1892 velec = _mm256_mul_pd(qq22,rinv22);
1893 felec = _mm256_mul_pd(velec,rinvsq22);
1897 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1899 /* Calculate temporary vectorial force */
1900 tx = _mm256_mul_pd(fscal,dx22);
1901 ty = _mm256_mul_pd(fscal,dy22);
1902 tz = _mm256_mul_pd(fscal,dz22);
1904 /* Update vectorial force */
1905 fix2 = _mm256_add_pd(fix2,tx);
1906 fiy2 = _mm256_add_pd(fiy2,ty);
1907 fiz2 = _mm256_add_pd(fiz2,tz);
1909 fjx2 = _mm256_add_pd(fjx2,tx);
1910 fjy2 = _mm256_add_pd(fjy2,ty);
1911 fjz2 = _mm256_add_pd(fjz2,tz);
1913 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1914 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1915 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1916 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1918 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1919 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1921 /* Inner loop uses 262 flops */
1924 /* End of innermost loop */
1926 gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1927 f+i_coord_offset,fshift+i_shift_offset);
1929 /* Increment number of inner iterations */
1930 inneriter += j_index_end - j_index_start;
1932 /* Outer loop uses 18 flops */
1935 /* Increment number of outer iterations */
1938 /* Update outer/inner flops */
1940 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*262);