2 * Note: this file was generated by the Gromacs avx_256_double kernel generator.
4 * This source code is part of
8 * Copyright (c) 2001-2012, The GROMACS Development Team
10 * Gromacs is a library for molecular simulation and trajectory analysis,
11 * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
12 * a full list of developers and information, check out http://www.gromacs.org
14 * This program is free software; you can redistribute it and/or modify it under
15 * the terms of the GNU Lesser General Public License as published by the Free
16 * Software Foundation; either version 2 of the License, or (at your option) any
19 * To help fund GROMACS development, we humbly ask that you cite
20 * the papers people have written on it - you can find them on the website.
28 #include "../nb_kernel.h"
29 #include "types/simple.h"
33 #include "gmx_math_x86_avx_256_double.h"
34 #include "kernelutil_x86_avx_256_double.h"
37 * Gromacs nonbonded kernel: nb_kernel_ElecGB_VdwCSTab_GeomP1P1_VF_avx_256_double
38 * Electrostatics interaction: GeneralizedBorn
39 * VdW interaction: CubicSplineTable
40 * Geometry: Particle-Particle
41 * Calculate force/pot: PotentialAndForce
44 nb_kernel_ElecGB_VdwCSTab_GeomP1P1_VF_avx_256_double
45 (t_nblist * gmx_restrict nlist,
46 rvec * gmx_restrict xx,
47 rvec * gmx_restrict ff,
48 t_forcerec * gmx_restrict fr,
49 t_mdatoms * gmx_restrict mdatoms,
50 nb_kernel_data_t * gmx_restrict kernel_data,
51 t_nrnb * gmx_restrict nrnb)
53 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
54 * just 0 for non-waters.
55 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
56 * jnr indices corresponding to data put in the four positions in the SIMD register.
58 int i_shift_offset,i_coord_offset,outeriter,inneriter;
59 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
60 int jnrA,jnrB,jnrC,jnrD;
61 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
62 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
63 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
64 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
66 real *shiftvec,*fshift,*x,*f;
67 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
69 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
70 real * vdwioffsetptr0;
71 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
72 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
73 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
74 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
75 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
78 __m256d vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,dvdatmp;
79 __m256d minushalf = _mm256_set1_pd(-0.5);
80 real *invsqrta,*dvda,*gbtab;
82 __m256d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
85 __m256d one_sixth = _mm256_set1_pd(1.0/6.0);
86 __m256d one_twelfth = _mm256_set1_pd(1.0/12.0);
88 __m128i ifour = _mm_set1_epi32(4);
89 __m256d rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
91 __m256d dummy_mask,cutoff_mask;
92 __m128 tmpmask0,tmpmask1;
93 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
94 __m256d one = _mm256_set1_pd(1.0);
95 __m256d two = _mm256_set1_pd(2.0);
101 jindex = nlist->jindex;
103 shiftidx = nlist->shift;
105 shiftvec = fr->shift_vec[0];
106 fshift = fr->fshift[0];
107 facel = _mm256_set1_pd(fr->epsfac);
108 charge = mdatoms->chargeA;
109 nvdwtype = fr->ntype;
111 vdwtype = mdatoms->typeA;
113 vftab = kernel_data->table_vdw->data;
114 vftabscale = _mm256_set1_pd(kernel_data->table_vdw->scale);
116 invsqrta = fr->invsqrta;
118 gbtabscale = _mm256_set1_pd(fr->gbtab.scale);
119 gbtab = fr->gbtab.data;
120 gbinvepsdiff = _mm256_set1_pd((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
122 /* Avoid stupid compiler warnings */
123 jnrA = jnrB = jnrC = jnrD = 0;
132 for(iidx=0;iidx<4*DIM;iidx++)
137 /* Start outer loop over neighborlists */
138 for(iidx=0; iidx<nri; iidx++)
140 /* Load shift vector for this list */
141 i_shift_offset = DIM*shiftidx[iidx];
143 /* Load limits for loop over neighbors */
144 j_index_start = jindex[iidx];
145 j_index_end = jindex[iidx+1];
147 /* Get outer coordinate index */
149 i_coord_offset = DIM*inr;
151 /* Load i particle coords and add shift vector */
152 gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
154 fix0 = _mm256_setzero_pd();
155 fiy0 = _mm256_setzero_pd();
156 fiz0 = _mm256_setzero_pd();
158 /* Load parameters for i particles */
159 iq0 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
160 isai0 = _mm256_set1_pd(invsqrta[inr+0]);
161 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
163 /* Reset potential sums */
164 velecsum = _mm256_setzero_pd();
165 vgbsum = _mm256_setzero_pd();
166 vvdwsum = _mm256_setzero_pd();
167 dvdasum = _mm256_setzero_pd();
169 /* Start inner kernel loop */
170 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
173 /* Get j neighbor index, and coordinate index */
178 j_coord_offsetA = DIM*jnrA;
179 j_coord_offsetB = DIM*jnrB;
180 j_coord_offsetC = DIM*jnrC;
181 j_coord_offsetD = DIM*jnrD;
183 /* load j atom coordinates */
184 gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
185 x+j_coord_offsetC,x+j_coord_offsetD,
188 /* Calculate displacement vector */
189 dx00 = _mm256_sub_pd(ix0,jx0);
190 dy00 = _mm256_sub_pd(iy0,jy0);
191 dz00 = _mm256_sub_pd(iz0,jz0);
193 /* Calculate squared distance and things based on it */
194 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
196 rinv00 = gmx_mm256_invsqrt_pd(rsq00);
198 /* Load parameters for j particles */
199 jq0 = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
200 charge+jnrC+0,charge+jnrD+0);
201 isaj0 = gmx_mm256_load_4real_swizzle_pd(invsqrta+jnrA+0,invsqrta+jnrB+0,
202 invsqrta+jnrC+0,invsqrta+jnrD+0);
203 vdwjidx0A = 2*vdwtype[jnrA+0];
204 vdwjidx0B = 2*vdwtype[jnrB+0];
205 vdwjidx0C = 2*vdwtype[jnrC+0];
206 vdwjidx0D = 2*vdwtype[jnrD+0];
208 /**************************
209 * CALCULATE INTERACTIONS *
210 **************************/
212 r00 = _mm256_mul_pd(rsq00,rinv00);
214 /* Compute parameters for interactions between i and j atoms */
215 qq00 = _mm256_mul_pd(iq0,jq0);
216 gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
217 vdwioffsetptr0+vdwjidx0B,
218 vdwioffsetptr0+vdwjidx0C,
219 vdwioffsetptr0+vdwjidx0D,
222 /* Calculate table index by multiplying r with table scale and truncate to integer */
223 rt = _mm256_mul_pd(r00,vftabscale);
224 vfitab = _mm256_cvttpd_epi32(rt);
225 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
226 vfitab = _mm_slli_epi32(vfitab,3);
228 /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
229 isaprod = _mm256_mul_pd(isai0,isaj0);
230 gbqqfactor = _mm256_xor_pd(signbit,_mm256_mul_pd(qq00,_mm256_mul_pd(isaprod,gbinvepsdiff)));
231 gbscale = _mm256_mul_pd(isaprod,gbtabscale);
233 /* Calculate generalized born table index - this is a separate table from the normal one,
234 * but we use the same procedure by multiplying r with scale and truncating to integer.
236 rt = _mm256_mul_pd(r00,gbscale);
237 gbitab = _mm256_cvttpd_epi32(rt);
238 gbeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
239 gbitab = _mm_slli_epi32(gbitab,2);
240 Y = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,0) );
241 F = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,1) );
242 G = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,2) );
243 H = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,3) );
244 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
245 Heps = _mm256_mul_pd(gbeps,H);
246 Fp = _mm256_add_pd(F,_mm256_mul_pd(gbeps,_mm256_add_pd(G,Heps)));
247 VV = _mm256_add_pd(Y,_mm256_mul_pd(gbeps,Fp));
248 vgb = _mm256_mul_pd(gbqqfactor,VV);
250 FF = _mm256_add_pd(Fp,_mm256_mul_pd(gbeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
251 fgb = _mm256_mul_pd(gbqqfactor,_mm256_mul_pd(FF,gbscale));
252 dvdatmp = _mm256_mul_pd(minushalf,_mm256_add_pd(vgb,_mm256_mul_pd(fgb,r00)));
253 dvdasum = _mm256_add_pd(dvdasum,dvdatmp);
258 gmx_mm256_increment_4real_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
259 _mm256_mul_pd(dvdatmp,_mm256_mul_pd(isaj0,isaj0)));
260 velec = _mm256_mul_pd(qq00,rinv00);
261 felec = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(velec,rinv00),fgb),rinv00);
263 /* CUBIC SPLINE TABLE DISPERSION */
264 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
265 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
266 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
267 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
268 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
269 Heps = _mm256_mul_pd(vfeps,H);
270 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
271 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
272 vvdw6 = _mm256_mul_pd(c6_00,VV);
273 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
274 fvdw6 = _mm256_mul_pd(c6_00,FF);
276 /* CUBIC SPLINE TABLE REPULSION */
277 vfitab = _mm_add_epi32(vfitab,ifour);
278 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
279 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
280 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
281 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
282 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
283 Heps = _mm256_mul_pd(vfeps,H);
284 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
285 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
286 vvdw12 = _mm256_mul_pd(c12_00,VV);
287 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
288 fvdw12 = _mm256_mul_pd(c12_00,FF);
289 vvdw = _mm256_add_pd(vvdw12,vvdw6);
290 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
292 /* Update potential sum for this i atom from the interaction with this j atom. */
293 velecsum = _mm256_add_pd(velecsum,velec);
294 vgbsum = _mm256_add_pd(vgbsum,vgb);
295 vvdwsum = _mm256_add_pd(vvdwsum,vvdw);
297 fscal = _mm256_add_pd(felec,fvdw);
299 /* Calculate temporary vectorial force */
300 tx = _mm256_mul_pd(fscal,dx00);
301 ty = _mm256_mul_pd(fscal,dy00);
302 tz = _mm256_mul_pd(fscal,dz00);
304 /* Update vectorial force */
305 fix0 = _mm256_add_pd(fix0,tx);
306 fiy0 = _mm256_add_pd(fiy0,ty);
307 fiz0 = _mm256_add_pd(fiz0,tz);
309 fjptrA = f+j_coord_offsetA;
310 fjptrB = f+j_coord_offsetB;
311 fjptrC = f+j_coord_offsetC;
312 fjptrD = f+j_coord_offsetD;
313 gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
315 /* Inner loop uses 91 flops */
321 /* Get j neighbor index, and coordinate index */
322 jnrlistA = jjnr[jidx];
323 jnrlistB = jjnr[jidx+1];
324 jnrlistC = jjnr[jidx+2];
325 jnrlistD = jjnr[jidx+3];
326 /* Sign of each element will be negative for non-real atoms.
327 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
328 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
330 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
332 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
333 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
334 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
336 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
337 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
338 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
339 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
340 j_coord_offsetA = DIM*jnrA;
341 j_coord_offsetB = DIM*jnrB;
342 j_coord_offsetC = DIM*jnrC;
343 j_coord_offsetD = DIM*jnrD;
345 /* load j atom coordinates */
346 gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
347 x+j_coord_offsetC,x+j_coord_offsetD,
350 /* Calculate displacement vector */
351 dx00 = _mm256_sub_pd(ix0,jx0);
352 dy00 = _mm256_sub_pd(iy0,jy0);
353 dz00 = _mm256_sub_pd(iz0,jz0);
355 /* Calculate squared distance and things based on it */
356 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
358 rinv00 = gmx_mm256_invsqrt_pd(rsq00);
360 /* Load parameters for j particles */
361 jq0 = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
362 charge+jnrC+0,charge+jnrD+0);
363 isaj0 = gmx_mm256_load_4real_swizzle_pd(invsqrta+jnrA+0,invsqrta+jnrB+0,
364 invsqrta+jnrC+0,invsqrta+jnrD+0);
365 vdwjidx0A = 2*vdwtype[jnrA+0];
366 vdwjidx0B = 2*vdwtype[jnrB+0];
367 vdwjidx0C = 2*vdwtype[jnrC+0];
368 vdwjidx0D = 2*vdwtype[jnrD+0];
370 /**************************
371 * CALCULATE INTERACTIONS *
372 **************************/
374 r00 = _mm256_mul_pd(rsq00,rinv00);
375 r00 = _mm256_andnot_pd(dummy_mask,r00);
377 /* Compute parameters for interactions between i and j atoms */
378 qq00 = _mm256_mul_pd(iq0,jq0);
379 gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
380 vdwioffsetptr0+vdwjidx0B,
381 vdwioffsetptr0+vdwjidx0C,
382 vdwioffsetptr0+vdwjidx0D,
385 /* Calculate table index by multiplying r with table scale and truncate to integer */
386 rt = _mm256_mul_pd(r00,vftabscale);
387 vfitab = _mm256_cvttpd_epi32(rt);
388 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
389 vfitab = _mm_slli_epi32(vfitab,3);
391 /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
392 isaprod = _mm256_mul_pd(isai0,isaj0);
393 gbqqfactor = _mm256_xor_pd(signbit,_mm256_mul_pd(qq00,_mm256_mul_pd(isaprod,gbinvepsdiff)));
394 gbscale = _mm256_mul_pd(isaprod,gbtabscale);
396 /* Calculate generalized born table index - this is a separate table from the normal one,
397 * but we use the same procedure by multiplying r with scale and truncating to integer.
399 rt = _mm256_mul_pd(r00,gbscale);
400 gbitab = _mm256_cvttpd_epi32(rt);
401 gbeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
402 gbitab = _mm_slli_epi32(gbitab,2);
403 Y = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,0) );
404 F = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,1) );
405 G = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,2) );
406 H = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,3) );
407 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
408 Heps = _mm256_mul_pd(gbeps,H);
409 Fp = _mm256_add_pd(F,_mm256_mul_pd(gbeps,_mm256_add_pd(G,Heps)));
410 VV = _mm256_add_pd(Y,_mm256_mul_pd(gbeps,Fp));
411 vgb = _mm256_mul_pd(gbqqfactor,VV);
413 FF = _mm256_add_pd(Fp,_mm256_mul_pd(gbeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
414 fgb = _mm256_mul_pd(gbqqfactor,_mm256_mul_pd(FF,gbscale));
415 dvdatmp = _mm256_mul_pd(minushalf,_mm256_add_pd(vgb,_mm256_mul_pd(fgb,r00)));
416 dvdatmp = _mm256_andnot_ps(dummy_mask,dvdatmp);
417 dvdasum = _mm256_add_pd(dvdasum,dvdatmp);
418 /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
419 fjptrA = (jnrlistA>=0) ? dvda+jnrA : scratch;
420 fjptrB = (jnrlistB>=0) ? dvda+jnrB : scratch;
421 fjptrC = (jnrlistC>=0) ? dvda+jnrC : scratch;
422 fjptrD = (jnrlistD>=0) ? dvda+jnrD : scratch;
423 gmx_mm256_increment_4real_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
424 _mm256_mul_pd(dvdatmp,_mm256_mul_pd(isaj0,isaj0)));
425 velec = _mm256_mul_pd(qq00,rinv00);
426 felec = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(velec,rinv00),fgb),rinv00);
428 /* CUBIC SPLINE TABLE DISPERSION */
429 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
430 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
431 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
432 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
433 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
434 Heps = _mm256_mul_pd(vfeps,H);
435 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
436 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
437 vvdw6 = _mm256_mul_pd(c6_00,VV);
438 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
439 fvdw6 = _mm256_mul_pd(c6_00,FF);
441 /* CUBIC SPLINE TABLE REPULSION */
442 vfitab = _mm_add_epi32(vfitab,ifour);
443 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
444 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
445 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
446 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
447 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
448 Heps = _mm256_mul_pd(vfeps,H);
449 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
450 VV = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
451 vvdw12 = _mm256_mul_pd(c12_00,VV);
452 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
453 fvdw12 = _mm256_mul_pd(c12_00,FF);
454 vvdw = _mm256_add_pd(vvdw12,vvdw6);
455 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
457 /* Update potential sum for this i atom from the interaction with this j atom. */
458 velec = _mm256_andnot_pd(dummy_mask,velec);
459 velecsum = _mm256_add_pd(velecsum,velec);
460 vgb = _mm256_andnot_pd(dummy_mask,vgb);
461 vgbsum = _mm256_add_pd(vgbsum,vgb);
462 vvdw = _mm256_andnot_pd(dummy_mask,vvdw);
463 vvdwsum = _mm256_add_pd(vvdwsum,vvdw);
465 fscal = _mm256_add_pd(felec,fvdw);
467 fscal = _mm256_andnot_pd(dummy_mask,fscal);
469 /* Calculate temporary vectorial force */
470 tx = _mm256_mul_pd(fscal,dx00);
471 ty = _mm256_mul_pd(fscal,dy00);
472 tz = _mm256_mul_pd(fscal,dz00);
474 /* Update vectorial force */
475 fix0 = _mm256_add_pd(fix0,tx);
476 fiy0 = _mm256_add_pd(fiy0,ty);
477 fiz0 = _mm256_add_pd(fiz0,tz);
479 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
480 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
481 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
482 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
483 gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
485 /* Inner loop uses 92 flops */
488 /* End of innermost loop */
490 gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
491 f+i_coord_offset,fshift+i_shift_offset);
494 /* Update potential energies */
495 gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
496 gmx_mm256_update_1pot_pd(vgbsum,kernel_data->energygrp_polarization+ggid);
497 gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
498 dvdasum = _mm256_mul_pd(dvdasum, _mm256_mul_pd(isai0,isai0));
499 gmx_mm256_update_1pot_pd(dvdasum,dvda+inr);
501 /* Increment number of inner iterations */
502 inneriter += j_index_end - j_index_start;
504 /* Outer loop uses 10 flops */
507 /* Increment number of outer iterations */
510 /* Update outer/inner flops */
512 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_VF,outeriter*10 + inneriter*92);
515 * Gromacs nonbonded kernel: nb_kernel_ElecGB_VdwCSTab_GeomP1P1_F_avx_256_double
516 * Electrostatics interaction: GeneralizedBorn
517 * VdW interaction: CubicSplineTable
518 * Geometry: Particle-Particle
519 * Calculate force/pot: Force
522 nb_kernel_ElecGB_VdwCSTab_GeomP1P1_F_avx_256_double
523 (t_nblist * gmx_restrict nlist,
524 rvec * gmx_restrict xx,
525 rvec * gmx_restrict ff,
526 t_forcerec * gmx_restrict fr,
527 t_mdatoms * gmx_restrict mdatoms,
528 nb_kernel_data_t * gmx_restrict kernel_data,
529 t_nrnb * gmx_restrict nrnb)
531 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
532 * just 0 for non-waters.
533 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
534 * jnr indices corresponding to data put in the four positions in the SIMD register.
536 int i_shift_offset,i_coord_offset,outeriter,inneriter;
537 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
538 int jnrA,jnrB,jnrC,jnrD;
539 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
540 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
541 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
542 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
544 real *shiftvec,*fshift,*x,*f;
545 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
547 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
548 real * vdwioffsetptr0;
549 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
550 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
551 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
552 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
553 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
556 __m256d vgb,fgb,vgbsum,dvdasum,gbscale,gbtabscale,isaprod,gbqqfactor,gbinvepsdiff,gbeps,dvdatmp;
557 __m256d minushalf = _mm256_set1_pd(-0.5);
558 real *invsqrta,*dvda,*gbtab;
560 __m256d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
563 __m256d one_sixth = _mm256_set1_pd(1.0/6.0);
564 __m256d one_twelfth = _mm256_set1_pd(1.0/12.0);
566 __m128i ifour = _mm_set1_epi32(4);
567 __m256d rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
569 __m256d dummy_mask,cutoff_mask;
570 __m128 tmpmask0,tmpmask1;
571 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
572 __m256d one = _mm256_set1_pd(1.0);
573 __m256d two = _mm256_set1_pd(2.0);
579 jindex = nlist->jindex;
581 shiftidx = nlist->shift;
583 shiftvec = fr->shift_vec[0];
584 fshift = fr->fshift[0];
585 facel = _mm256_set1_pd(fr->epsfac);
586 charge = mdatoms->chargeA;
587 nvdwtype = fr->ntype;
589 vdwtype = mdatoms->typeA;
591 vftab = kernel_data->table_vdw->data;
592 vftabscale = _mm256_set1_pd(kernel_data->table_vdw->scale);
594 invsqrta = fr->invsqrta;
596 gbtabscale = _mm256_set1_pd(fr->gbtab.scale);
597 gbtab = fr->gbtab.data;
598 gbinvepsdiff = _mm256_set1_pd((1.0/fr->epsilon_r) - (1.0/fr->gb_epsilon_solvent));
600 /* Avoid stupid compiler warnings */
601 jnrA = jnrB = jnrC = jnrD = 0;
610 for(iidx=0;iidx<4*DIM;iidx++)
615 /* Start outer loop over neighborlists */
616 for(iidx=0; iidx<nri; iidx++)
618 /* Load shift vector for this list */
619 i_shift_offset = DIM*shiftidx[iidx];
621 /* Load limits for loop over neighbors */
622 j_index_start = jindex[iidx];
623 j_index_end = jindex[iidx+1];
625 /* Get outer coordinate index */
627 i_coord_offset = DIM*inr;
629 /* Load i particle coords and add shift vector */
630 gmx_mm256_load_shift_and_1rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,&ix0,&iy0,&iz0);
632 fix0 = _mm256_setzero_pd();
633 fiy0 = _mm256_setzero_pd();
634 fiz0 = _mm256_setzero_pd();
636 /* Load parameters for i particles */
637 iq0 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
638 isai0 = _mm256_set1_pd(invsqrta[inr+0]);
639 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
641 dvdasum = _mm256_setzero_pd();
643 /* Start inner kernel loop */
644 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
647 /* Get j neighbor index, and coordinate index */
652 j_coord_offsetA = DIM*jnrA;
653 j_coord_offsetB = DIM*jnrB;
654 j_coord_offsetC = DIM*jnrC;
655 j_coord_offsetD = DIM*jnrD;
657 /* load j atom coordinates */
658 gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
659 x+j_coord_offsetC,x+j_coord_offsetD,
662 /* Calculate displacement vector */
663 dx00 = _mm256_sub_pd(ix0,jx0);
664 dy00 = _mm256_sub_pd(iy0,jy0);
665 dz00 = _mm256_sub_pd(iz0,jz0);
667 /* Calculate squared distance and things based on it */
668 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
670 rinv00 = gmx_mm256_invsqrt_pd(rsq00);
672 /* Load parameters for j particles */
673 jq0 = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
674 charge+jnrC+0,charge+jnrD+0);
675 isaj0 = gmx_mm256_load_4real_swizzle_pd(invsqrta+jnrA+0,invsqrta+jnrB+0,
676 invsqrta+jnrC+0,invsqrta+jnrD+0);
677 vdwjidx0A = 2*vdwtype[jnrA+0];
678 vdwjidx0B = 2*vdwtype[jnrB+0];
679 vdwjidx0C = 2*vdwtype[jnrC+0];
680 vdwjidx0D = 2*vdwtype[jnrD+0];
682 /**************************
683 * CALCULATE INTERACTIONS *
684 **************************/
686 r00 = _mm256_mul_pd(rsq00,rinv00);
688 /* Compute parameters for interactions between i and j atoms */
689 qq00 = _mm256_mul_pd(iq0,jq0);
690 gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
691 vdwioffsetptr0+vdwjidx0B,
692 vdwioffsetptr0+vdwjidx0C,
693 vdwioffsetptr0+vdwjidx0D,
696 /* Calculate table index by multiplying r with table scale and truncate to integer */
697 rt = _mm256_mul_pd(r00,vftabscale);
698 vfitab = _mm256_cvttpd_epi32(rt);
699 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
700 vfitab = _mm_slli_epi32(vfitab,3);
702 /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
703 isaprod = _mm256_mul_pd(isai0,isaj0);
704 gbqqfactor = _mm256_xor_pd(signbit,_mm256_mul_pd(qq00,_mm256_mul_pd(isaprod,gbinvepsdiff)));
705 gbscale = _mm256_mul_pd(isaprod,gbtabscale);
707 /* Calculate generalized born table index - this is a separate table from the normal one,
708 * but we use the same procedure by multiplying r with scale and truncating to integer.
710 rt = _mm256_mul_pd(r00,gbscale);
711 gbitab = _mm256_cvttpd_epi32(rt);
712 gbeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
713 gbitab = _mm_slli_epi32(gbitab,2);
714 Y = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,0) );
715 F = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,1) );
716 G = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,2) );
717 H = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,3) );
718 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
719 Heps = _mm256_mul_pd(gbeps,H);
720 Fp = _mm256_add_pd(F,_mm256_mul_pd(gbeps,_mm256_add_pd(G,Heps)));
721 VV = _mm256_add_pd(Y,_mm256_mul_pd(gbeps,Fp));
722 vgb = _mm256_mul_pd(gbqqfactor,VV);
724 FF = _mm256_add_pd(Fp,_mm256_mul_pd(gbeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
725 fgb = _mm256_mul_pd(gbqqfactor,_mm256_mul_pd(FF,gbscale));
726 dvdatmp = _mm256_mul_pd(minushalf,_mm256_add_pd(vgb,_mm256_mul_pd(fgb,r00)));
727 dvdasum = _mm256_add_pd(dvdasum,dvdatmp);
732 gmx_mm256_increment_4real_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
733 _mm256_mul_pd(dvdatmp,_mm256_mul_pd(isaj0,isaj0)));
734 velec = _mm256_mul_pd(qq00,rinv00);
735 felec = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(velec,rinv00),fgb),rinv00);
737 /* CUBIC SPLINE TABLE DISPERSION */
738 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
739 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
740 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
741 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
742 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
743 Heps = _mm256_mul_pd(vfeps,H);
744 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
745 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
746 fvdw6 = _mm256_mul_pd(c6_00,FF);
748 /* CUBIC SPLINE TABLE REPULSION */
749 vfitab = _mm_add_epi32(vfitab,ifour);
750 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
751 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
752 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
753 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
754 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
755 Heps = _mm256_mul_pd(vfeps,H);
756 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
757 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
758 fvdw12 = _mm256_mul_pd(c12_00,FF);
759 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
761 fscal = _mm256_add_pd(felec,fvdw);
763 /* Calculate temporary vectorial force */
764 tx = _mm256_mul_pd(fscal,dx00);
765 ty = _mm256_mul_pd(fscal,dy00);
766 tz = _mm256_mul_pd(fscal,dz00);
768 /* Update vectorial force */
769 fix0 = _mm256_add_pd(fix0,tx);
770 fiy0 = _mm256_add_pd(fiy0,ty);
771 fiz0 = _mm256_add_pd(fiz0,tz);
773 fjptrA = f+j_coord_offsetA;
774 fjptrB = f+j_coord_offsetB;
775 fjptrC = f+j_coord_offsetC;
776 fjptrD = f+j_coord_offsetD;
777 gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
779 /* Inner loop uses 81 flops */
785 /* Get j neighbor index, and coordinate index */
786 jnrlistA = jjnr[jidx];
787 jnrlistB = jjnr[jidx+1];
788 jnrlistC = jjnr[jidx+2];
789 jnrlistD = jjnr[jidx+3];
790 /* Sign of each element will be negative for non-real atoms.
791 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
792 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
794 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
796 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
797 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
798 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
800 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
801 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
802 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
803 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
804 j_coord_offsetA = DIM*jnrA;
805 j_coord_offsetB = DIM*jnrB;
806 j_coord_offsetC = DIM*jnrC;
807 j_coord_offsetD = DIM*jnrD;
809 /* load j atom coordinates */
810 gmx_mm256_load_1rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
811 x+j_coord_offsetC,x+j_coord_offsetD,
814 /* Calculate displacement vector */
815 dx00 = _mm256_sub_pd(ix0,jx0);
816 dy00 = _mm256_sub_pd(iy0,jy0);
817 dz00 = _mm256_sub_pd(iz0,jz0);
819 /* Calculate squared distance and things based on it */
820 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
822 rinv00 = gmx_mm256_invsqrt_pd(rsq00);
824 /* Load parameters for j particles */
825 jq0 = gmx_mm256_load_4real_swizzle_pd(charge+jnrA+0,charge+jnrB+0,
826 charge+jnrC+0,charge+jnrD+0);
827 isaj0 = gmx_mm256_load_4real_swizzle_pd(invsqrta+jnrA+0,invsqrta+jnrB+0,
828 invsqrta+jnrC+0,invsqrta+jnrD+0);
829 vdwjidx0A = 2*vdwtype[jnrA+0];
830 vdwjidx0B = 2*vdwtype[jnrB+0];
831 vdwjidx0C = 2*vdwtype[jnrC+0];
832 vdwjidx0D = 2*vdwtype[jnrD+0];
834 /**************************
835 * CALCULATE INTERACTIONS *
836 **************************/
838 r00 = _mm256_mul_pd(rsq00,rinv00);
839 r00 = _mm256_andnot_pd(dummy_mask,r00);
841 /* Compute parameters for interactions between i and j atoms */
842 qq00 = _mm256_mul_pd(iq0,jq0);
843 gmx_mm256_load_4pair_swizzle_pd(vdwioffsetptr0+vdwjidx0A,
844 vdwioffsetptr0+vdwjidx0B,
845 vdwioffsetptr0+vdwjidx0C,
846 vdwioffsetptr0+vdwjidx0D,
849 /* Calculate table index by multiplying r with table scale and truncate to integer */
850 rt = _mm256_mul_pd(r00,vftabscale);
851 vfitab = _mm256_cvttpd_epi32(rt);
852 vfeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
853 vfitab = _mm_slli_epi32(vfitab,3);
855 /* GENERALIZED BORN AND COULOMB ELECTROSTATICS */
856 isaprod = _mm256_mul_pd(isai0,isaj0);
857 gbqqfactor = _mm256_xor_pd(signbit,_mm256_mul_pd(qq00,_mm256_mul_pd(isaprod,gbinvepsdiff)));
858 gbscale = _mm256_mul_pd(isaprod,gbtabscale);
860 /* Calculate generalized born table index - this is a separate table from the normal one,
861 * but we use the same procedure by multiplying r with scale and truncating to integer.
863 rt = _mm256_mul_pd(r00,gbscale);
864 gbitab = _mm256_cvttpd_epi32(rt);
865 gbeps = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
866 gbitab = _mm_slli_epi32(gbitab,2);
867 Y = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,0) );
868 F = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,1) );
869 G = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,2) );
870 H = _mm256_load_pd( gbtab + _mm_extract_epi32(gbitab,3) );
871 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
872 Heps = _mm256_mul_pd(gbeps,H);
873 Fp = _mm256_add_pd(F,_mm256_mul_pd(gbeps,_mm256_add_pd(G,Heps)));
874 VV = _mm256_add_pd(Y,_mm256_mul_pd(gbeps,Fp));
875 vgb = _mm256_mul_pd(gbqqfactor,VV);
877 FF = _mm256_add_pd(Fp,_mm256_mul_pd(gbeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
878 fgb = _mm256_mul_pd(gbqqfactor,_mm256_mul_pd(FF,gbscale));
879 dvdatmp = _mm256_mul_pd(minushalf,_mm256_add_pd(vgb,_mm256_mul_pd(fgb,r00)));
880 dvdatmp = _mm256_andnot_ps(dummy_mask,dvdatmp);
881 dvdasum = _mm256_add_pd(dvdasum,dvdatmp);
882 /* The pointers to scratch make sure that this code with compilers that take gmx_restrict seriously (e.g. icc 13) really can't screw things up. */
883 fjptrA = (jnrlistA>=0) ? dvda+jnrA : scratch;
884 fjptrB = (jnrlistB>=0) ? dvda+jnrB : scratch;
885 fjptrC = (jnrlistC>=0) ? dvda+jnrC : scratch;
886 fjptrD = (jnrlistD>=0) ? dvda+jnrD : scratch;
887 gmx_mm256_increment_4real_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
888 _mm256_mul_pd(dvdatmp,_mm256_mul_pd(isaj0,isaj0)));
889 velec = _mm256_mul_pd(qq00,rinv00);
890 felec = _mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(velec,rinv00),fgb),rinv00);
892 /* CUBIC SPLINE TABLE DISPERSION */
893 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
894 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
895 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
896 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
897 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
898 Heps = _mm256_mul_pd(vfeps,H);
899 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
900 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
901 fvdw6 = _mm256_mul_pd(c6_00,FF);
903 /* CUBIC SPLINE TABLE REPULSION */
904 vfitab = _mm_add_epi32(vfitab,ifour);
905 Y = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
906 F = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
907 G = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
908 H = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
909 GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
910 Heps = _mm256_mul_pd(vfeps,H);
911 Fp = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
912 FF = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
913 fvdw12 = _mm256_mul_pd(c12_00,FF);
914 fvdw = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
916 fscal = _mm256_add_pd(felec,fvdw);
918 fscal = _mm256_andnot_pd(dummy_mask,fscal);
920 /* Calculate temporary vectorial force */
921 tx = _mm256_mul_pd(fscal,dx00);
922 ty = _mm256_mul_pd(fscal,dy00);
923 tz = _mm256_mul_pd(fscal,dz00);
925 /* Update vectorial force */
926 fix0 = _mm256_add_pd(fix0,tx);
927 fiy0 = _mm256_add_pd(fiy0,ty);
928 fiz0 = _mm256_add_pd(fiz0,tz);
930 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
931 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
932 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
933 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
934 gmx_mm256_decrement_1rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,tx,ty,tz);
936 /* Inner loop uses 82 flops */
939 /* End of innermost loop */
941 gmx_mm256_update_iforce_1atom_swizzle_pd(fix0,fiy0,fiz0,
942 f+i_coord_offset,fshift+i_shift_offset);
944 dvdasum = _mm256_mul_pd(dvdasum, _mm256_mul_pd(isai0,isai0));
945 gmx_mm256_update_1pot_pd(dvdasum,dvda+inr);
947 /* Increment number of inner iterations */
948 inneriter += j_index_end - j_index_start;
950 /* Outer loop uses 7 flops */
953 /* Increment number of outer iterations */
956 /* Update outer/inner flops */
958 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_F,outeriter*7 + inneriter*82);