2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_single kernel generator.
42 #include "../nb_kernel.h"
43 #include "gromacs/legacyheaders/types/simple.h"
44 #include "gromacs/math/vec.h"
45 #include "gromacs/legacyheaders/nrnb.h"
47 #include "gromacs/simd/math_x86_avx_256_single.h"
48 #include "kernelutil_x86_avx_256_single.h"
51 * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_VF_avx_256_single
52 * Electrostatics interaction: Ewald
53 * VdW interaction: LennardJones
54 * Geometry: Water4-Particle
55 * Calculate force/pot: PotentialAndForce
58 nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_VF_avx_256_single
59 (t_nblist * gmx_restrict nlist,
60 rvec * gmx_restrict xx,
61 rvec * gmx_restrict ff,
62 t_forcerec * gmx_restrict fr,
63 t_mdatoms * gmx_restrict mdatoms,
64 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65 t_nrnb * gmx_restrict nrnb)
67 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
68 * just 0 for non-waters.
69 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
70 * jnr indices corresponding to data put in the four positions in the SIMD register.
72 int i_shift_offset,i_coord_offset,outeriter,inneriter;
73 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74 int jnrA,jnrB,jnrC,jnrD;
75 int jnrE,jnrF,jnrG,jnrH;
76 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
77 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
78 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
79 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
80 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
82 real *shiftvec,*fshift,*x,*f;
83 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
85 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86 real * vdwioffsetptr0;
87 __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
88 real * vdwioffsetptr1;
89 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
90 real * vdwioffsetptr2;
91 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
92 real * vdwioffsetptr3;
93 __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
94 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
95 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
96 __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
97 __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
98 __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
99 __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
100 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
103 __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
106 __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
107 __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
109 __m128i ewitab_lo,ewitab_hi;
110 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
111 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
113 __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
114 real rswitch_scalar,d_scalar;
115 __m256 dummy_mask,cutoff_mask;
116 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
117 __m256 one = _mm256_set1_ps(1.0);
118 __m256 two = _mm256_set1_ps(2.0);
124 jindex = nlist->jindex;
126 shiftidx = nlist->shift;
128 shiftvec = fr->shift_vec[0];
129 fshift = fr->fshift[0];
130 facel = _mm256_set1_ps(fr->epsfac);
131 charge = mdatoms->chargeA;
132 nvdwtype = fr->ntype;
134 vdwtype = mdatoms->typeA;
136 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
137 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
138 beta2 = _mm256_mul_ps(beta,beta);
139 beta3 = _mm256_mul_ps(beta,beta2);
141 ewtab = fr->ic->tabq_coul_FDV0;
142 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
143 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
145 /* Setup water-specific parameters */
146 inr = nlist->iinr[0];
147 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
148 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
149 iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
150 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
152 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
153 rcutoff_scalar = fr->rcoulomb;
154 rcutoff = _mm256_set1_ps(rcutoff_scalar);
155 rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
157 rswitch_scalar = fr->rcoulomb_switch;
158 rswitch = _mm256_set1_ps(rswitch_scalar);
159 /* Setup switch parameters */
160 d_scalar = rcutoff_scalar-rswitch_scalar;
161 d = _mm256_set1_ps(d_scalar);
162 swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
163 swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
164 swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
165 swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
166 swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
167 swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
169 /* Avoid stupid compiler warnings */
170 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
183 for(iidx=0;iidx<4*DIM;iidx++)
188 /* Start outer loop over neighborlists */
189 for(iidx=0; iidx<nri; iidx++)
191 /* Load shift vector for this list */
192 i_shift_offset = DIM*shiftidx[iidx];
194 /* Load limits for loop over neighbors */
195 j_index_start = jindex[iidx];
196 j_index_end = jindex[iidx+1];
198 /* Get outer coordinate index */
200 i_coord_offset = DIM*inr;
202 /* Load i particle coords and add shift vector */
203 gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
204 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
206 fix0 = _mm256_setzero_ps();
207 fiy0 = _mm256_setzero_ps();
208 fiz0 = _mm256_setzero_ps();
209 fix1 = _mm256_setzero_ps();
210 fiy1 = _mm256_setzero_ps();
211 fiz1 = _mm256_setzero_ps();
212 fix2 = _mm256_setzero_ps();
213 fiy2 = _mm256_setzero_ps();
214 fiz2 = _mm256_setzero_ps();
215 fix3 = _mm256_setzero_ps();
216 fiy3 = _mm256_setzero_ps();
217 fiz3 = _mm256_setzero_ps();
219 /* Reset potential sums */
220 velecsum = _mm256_setzero_ps();
221 vvdwsum = _mm256_setzero_ps();
223 /* Start inner kernel loop */
224 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
227 /* Get j neighbor index, and coordinate index */
236 j_coord_offsetA = DIM*jnrA;
237 j_coord_offsetB = DIM*jnrB;
238 j_coord_offsetC = DIM*jnrC;
239 j_coord_offsetD = DIM*jnrD;
240 j_coord_offsetE = DIM*jnrE;
241 j_coord_offsetF = DIM*jnrF;
242 j_coord_offsetG = DIM*jnrG;
243 j_coord_offsetH = DIM*jnrH;
245 /* load j atom coordinates */
246 gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
247 x+j_coord_offsetC,x+j_coord_offsetD,
248 x+j_coord_offsetE,x+j_coord_offsetF,
249 x+j_coord_offsetG,x+j_coord_offsetH,
252 /* Calculate displacement vector */
253 dx00 = _mm256_sub_ps(ix0,jx0);
254 dy00 = _mm256_sub_ps(iy0,jy0);
255 dz00 = _mm256_sub_ps(iz0,jz0);
256 dx10 = _mm256_sub_ps(ix1,jx0);
257 dy10 = _mm256_sub_ps(iy1,jy0);
258 dz10 = _mm256_sub_ps(iz1,jz0);
259 dx20 = _mm256_sub_ps(ix2,jx0);
260 dy20 = _mm256_sub_ps(iy2,jy0);
261 dz20 = _mm256_sub_ps(iz2,jz0);
262 dx30 = _mm256_sub_ps(ix3,jx0);
263 dy30 = _mm256_sub_ps(iy3,jy0);
264 dz30 = _mm256_sub_ps(iz3,jz0);
266 /* Calculate squared distance and things based on it */
267 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
268 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
269 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
270 rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
272 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
273 rinv10 = gmx_mm256_invsqrt_ps(rsq10);
274 rinv20 = gmx_mm256_invsqrt_ps(rsq20);
275 rinv30 = gmx_mm256_invsqrt_ps(rsq30);
277 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
278 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
279 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
280 rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
282 /* Load parameters for j particles */
283 jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
284 charge+jnrC+0,charge+jnrD+0,
285 charge+jnrE+0,charge+jnrF+0,
286 charge+jnrG+0,charge+jnrH+0);
287 vdwjidx0A = 2*vdwtype[jnrA+0];
288 vdwjidx0B = 2*vdwtype[jnrB+0];
289 vdwjidx0C = 2*vdwtype[jnrC+0];
290 vdwjidx0D = 2*vdwtype[jnrD+0];
291 vdwjidx0E = 2*vdwtype[jnrE+0];
292 vdwjidx0F = 2*vdwtype[jnrF+0];
293 vdwjidx0G = 2*vdwtype[jnrG+0];
294 vdwjidx0H = 2*vdwtype[jnrH+0];
296 fjx0 = _mm256_setzero_ps();
297 fjy0 = _mm256_setzero_ps();
298 fjz0 = _mm256_setzero_ps();
300 /**************************
301 * CALCULATE INTERACTIONS *
302 **************************/
304 if (gmx_mm256_any_lt(rsq00,rcutoff2))
307 r00 = _mm256_mul_ps(rsq00,rinv00);
309 /* Compute parameters for interactions between i and j atoms */
310 gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
311 vdwioffsetptr0+vdwjidx0B,
312 vdwioffsetptr0+vdwjidx0C,
313 vdwioffsetptr0+vdwjidx0D,
314 vdwioffsetptr0+vdwjidx0E,
315 vdwioffsetptr0+vdwjidx0F,
316 vdwioffsetptr0+vdwjidx0G,
317 vdwioffsetptr0+vdwjidx0H,
320 /* LENNARD-JONES DISPERSION/REPULSION */
322 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
323 vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
324 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
325 vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
326 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
328 d = _mm256_sub_ps(r00,rswitch);
329 d = _mm256_max_ps(d,_mm256_setzero_ps());
330 d2 = _mm256_mul_ps(d,d);
331 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
333 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
335 /* Evaluate switch function */
336 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
337 fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
338 vvdw = _mm256_mul_ps(vvdw,sw);
339 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
341 /* Update potential sum for this i atom from the interaction with this j atom. */
342 vvdw = _mm256_and_ps(vvdw,cutoff_mask);
343 vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
347 fscal = _mm256_and_ps(fscal,cutoff_mask);
349 /* Calculate temporary vectorial force */
350 tx = _mm256_mul_ps(fscal,dx00);
351 ty = _mm256_mul_ps(fscal,dy00);
352 tz = _mm256_mul_ps(fscal,dz00);
354 /* Update vectorial force */
355 fix0 = _mm256_add_ps(fix0,tx);
356 fiy0 = _mm256_add_ps(fiy0,ty);
357 fiz0 = _mm256_add_ps(fiz0,tz);
359 fjx0 = _mm256_add_ps(fjx0,tx);
360 fjy0 = _mm256_add_ps(fjy0,ty);
361 fjz0 = _mm256_add_ps(fjz0,tz);
365 /**************************
366 * CALCULATE INTERACTIONS *
367 **************************/
369 if (gmx_mm256_any_lt(rsq10,rcutoff2))
372 r10 = _mm256_mul_ps(rsq10,rinv10);
374 /* Compute parameters for interactions between i and j atoms */
375 qq10 = _mm256_mul_ps(iq1,jq0);
377 /* EWALD ELECTROSTATICS */
379 /* Analytical PME correction */
380 zeta2 = _mm256_mul_ps(beta2,rsq10);
381 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
382 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
383 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
384 felec = _mm256_mul_ps(qq10,felec);
385 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
386 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
387 velec = _mm256_sub_ps(rinv10,pmecorrV);
388 velec = _mm256_mul_ps(qq10,velec);
390 d = _mm256_sub_ps(r10,rswitch);
391 d = _mm256_max_ps(d,_mm256_setzero_ps());
392 d2 = _mm256_mul_ps(d,d);
393 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
395 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
397 /* Evaluate switch function */
398 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
399 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
400 velec = _mm256_mul_ps(velec,sw);
401 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
403 /* Update potential sum for this i atom from the interaction with this j atom. */
404 velec = _mm256_and_ps(velec,cutoff_mask);
405 velecsum = _mm256_add_ps(velecsum,velec);
409 fscal = _mm256_and_ps(fscal,cutoff_mask);
411 /* Calculate temporary vectorial force */
412 tx = _mm256_mul_ps(fscal,dx10);
413 ty = _mm256_mul_ps(fscal,dy10);
414 tz = _mm256_mul_ps(fscal,dz10);
416 /* Update vectorial force */
417 fix1 = _mm256_add_ps(fix1,tx);
418 fiy1 = _mm256_add_ps(fiy1,ty);
419 fiz1 = _mm256_add_ps(fiz1,tz);
421 fjx0 = _mm256_add_ps(fjx0,tx);
422 fjy0 = _mm256_add_ps(fjy0,ty);
423 fjz0 = _mm256_add_ps(fjz0,tz);
427 /**************************
428 * CALCULATE INTERACTIONS *
429 **************************/
431 if (gmx_mm256_any_lt(rsq20,rcutoff2))
434 r20 = _mm256_mul_ps(rsq20,rinv20);
436 /* Compute parameters for interactions between i and j atoms */
437 qq20 = _mm256_mul_ps(iq2,jq0);
439 /* EWALD ELECTROSTATICS */
441 /* Analytical PME correction */
442 zeta2 = _mm256_mul_ps(beta2,rsq20);
443 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
444 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
445 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
446 felec = _mm256_mul_ps(qq20,felec);
447 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
448 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
449 velec = _mm256_sub_ps(rinv20,pmecorrV);
450 velec = _mm256_mul_ps(qq20,velec);
452 d = _mm256_sub_ps(r20,rswitch);
453 d = _mm256_max_ps(d,_mm256_setzero_ps());
454 d2 = _mm256_mul_ps(d,d);
455 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
457 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
459 /* Evaluate switch function */
460 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
461 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
462 velec = _mm256_mul_ps(velec,sw);
463 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
465 /* Update potential sum for this i atom from the interaction with this j atom. */
466 velec = _mm256_and_ps(velec,cutoff_mask);
467 velecsum = _mm256_add_ps(velecsum,velec);
471 fscal = _mm256_and_ps(fscal,cutoff_mask);
473 /* Calculate temporary vectorial force */
474 tx = _mm256_mul_ps(fscal,dx20);
475 ty = _mm256_mul_ps(fscal,dy20);
476 tz = _mm256_mul_ps(fscal,dz20);
478 /* Update vectorial force */
479 fix2 = _mm256_add_ps(fix2,tx);
480 fiy2 = _mm256_add_ps(fiy2,ty);
481 fiz2 = _mm256_add_ps(fiz2,tz);
483 fjx0 = _mm256_add_ps(fjx0,tx);
484 fjy0 = _mm256_add_ps(fjy0,ty);
485 fjz0 = _mm256_add_ps(fjz0,tz);
489 /**************************
490 * CALCULATE INTERACTIONS *
491 **************************/
493 if (gmx_mm256_any_lt(rsq30,rcutoff2))
496 r30 = _mm256_mul_ps(rsq30,rinv30);
498 /* Compute parameters for interactions between i and j atoms */
499 qq30 = _mm256_mul_ps(iq3,jq0);
501 /* EWALD ELECTROSTATICS */
503 /* Analytical PME correction */
504 zeta2 = _mm256_mul_ps(beta2,rsq30);
505 rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
506 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
507 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
508 felec = _mm256_mul_ps(qq30,felec);
509 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
510 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
511 velec = _mm256_sub_ps(rinv30,pmecorrV);
512 velec = _mm256_mul_ps(qq30,velec);
514 d = _mm256_sub_ps(r30,rswitch);
515 d = _mm256_max_ps(d,_mm256_setzero_ps());
516 d2 = _mm256_mul_ps(d,d);
517 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
519 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
521 /* Evaluate switch function */
522 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
523 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
524 velec = _mm256_mul_ps(velec,sw);
525 cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
527 /* Update potential sum for this i atom from the interaction with this j atom. */
528 velec = _mm256_and_ps(velec,cutoff_mask);
529 velecsum = _mm256_add_ps(velecsum,velec);
533 fscal = _mm256_and_ps(fscal,cutoff_mask);
535 /* Calculate temporary vectorial force */
536 tx = _mm256_mul_ps(fscal,dx30);
537 ty = _mm256_mul_ps(fscal,dy30);
538 tz = _mm256_mul_ps(fscal,dz30);
540 /* Update vectorial force */
541 fix3 = _mm256_add_ps(fix3,tx);
542 fiy3 = _mm256_add_ps(fiy3,ty);
543 fiz3 = _mm256_add_ps(fiz3,tz);
545 fjx0 = _mm256_add_ps(fjx0,tx);
546 fjy0 = _mm256_add_ps(fjy0,ty);
547 fjz0 = _mm256_add_ps(fjz0,tz);
551 fjptrA = f+j_coord_offsetA;
552 fjptrB = f+j_coord_offsetB;
553 fjptrC = f+j_coord_offsetC;
554 fjptrD = f+j_coord_offsetD;
555 fjptrE = f+j_coord_offsetE;
556 fjptrF = f+j_coord_offsetF;
557 fjptrG = f+j_coord_offsetG;
558 fjptrH = f+j_coord_offsetH;
560 gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
562 /* Inner loop uses 386 flops */
568 /* Get j neighbor index, and coordinate index */
569 jnrlistA = jjnr[jidx];
570 jnrlistB = jjnr[jidx+1];
571 jnrlistC = jjnr[jidx+2];
572 jnrlistD = jjnr[jidx+3];
573 jnrlistE = jjnr[jidx+4];
574 jnrlistF = jjnr[jidx+5];
575 jnrlistG = jjnr[jidx+6];
576 jnrlistH = jjnr[jidx+7];
577 /* Sign of each element will be negative for non-real atoms.
578 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
579 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
581 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
582 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
584 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
585 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
586 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
587 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
588 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
589 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
590 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
591 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
592 j_coord_offsetA = DIM*jnrA;
593 j_coord_offsetB = DIM*jnrB;
594 j_coord_offsetC = DIM*jnrC;
595 j_coord_offsetD = DIM*jnrD;
596 j_coord_offsetE = DIM*jnrE;
597 j_coord_offsetF = DIM*jnrF;
598 j_coord_offsetG = DIM*jnrG;
599 j_coord_offsetH = DIM*jnrH;
601 /* load j atom coordinates */
602 gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
603 x+j_coord_offsetC,x+j_coord_offsetD,
604 x+j_coord_offsetE,x+j_coord_offsetF,
605 x+j_coord_offsetG,x+j_coord_offsetH,
608 /* Calculate displacement vector */
609 dx00 = _mm256_sub_ps(ix0,jx0);
610 dy00 = _mm256_sub_ps(iy0,jy0);
611 dz00 = _mm256_sub_ps(iz0,jz0);
612 dx10 = _mm256_sub_ps(ix1,jx0);
613 dy10 = _mm256_sub_ps(iy1,jy0);
614 dz10 = _mm256_sub_ps(iz1,jz0);
615 dx20 = _mm256_sub_ps(ix2,jx0);
616 dy20 = _mm256_sub_ps(iy2,jy0);
617 dz20 = _mm256_sub_ps(iz2,jz0);
618 dx30 = _mm256_sub_ps(ix3,jx0);
619 dy30 = _mm256_sub_ps(iy3,jy0);
620 dz30 = _mm256_sub_ps(iz3,jz0);
622 /* Calculate squared distance and things based on it */
623 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
624 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
625 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
626 rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
628 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
629 rinv10 = gmx_mm256_invsqrt_ps(rsq10);
630 rinv20 = gmx_mm256_invsqrt_ps(rsq20);
631 rinv30 = gmx_mm256_invsqrt_ps(rsq30);
633 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
634 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
635 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
636 rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
638 /* Load parameters for j particles */
639 jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
640 charge+jnrC+0,charge+jnrD+0,
641 charge+jnrE+0,charge+jnrF+0,
642 charge+jnrG+0,charge+jnrH+0);
643 vdwjidx0A = 2*vdwtype[jnrA+0];
644 vdwjidx0B = 2*vdwtype[jnrB+0];
645 vdwjidx0C = 2*vdwtype[jnrC+0];
646 vdwjidx0D = 2*vdwtype[jnrD+0];
647 vdwjidx0E = 2*vdwtype[jnrE+0];
648 vdwjidx0F = 2*vdwtype[jnrF+0];
649 vdwjidx0G = 2*vdwtype[jnrG+0];
650 vdwjidx0H = 2*vdwtype[jnrH+0];
652 fjx0 = _mm256_setzero_ps();
653 fjy0 = _mm256_setzero_ps();
654 fjz0 = _mm256_setzero_ps();
656 /**************************
657 * CALCULATE INTERACTIONS *
658 **************************/
660 if (gmx_mm256_any_lt(rsq00,rcutoff2))
663 r00 = _mm256_mul_ps(rsq00,rinv00);
664 r00 = _mm256_andnot_ps(dummy_mask,r00);
666 /* Compute parameters for interactions between i and j atoms */
667 gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
668 vdwioffsetptr0+vdwjidx0B,
669 vdwioffsetptr0+vdwjidx0C,
670 vdwioffsetptr0+vdwjidx0D,
671 vdwioffsetptr0+vdwjidx0E,
672 vdwioffsetptr0+vdwjidx0F,
673 vdwioffsetptr0+vdwjidx0G,
674 vdwioffsetptr0+vdwjidx0H,
677 /* LENNARD-JONES DISPERSION/REPULSION */
679 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
680 vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
681 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
682 vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
683 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
685 d = _mm256_sub_ps(r00,rswitch);
686 d = _mm256_max_ps(d,_mm256_setzero_ps());
687 d2 = _mm256_mul_ps(d,d);
688 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
690 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
692 /* Evaluate switch function */
693 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
694 fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
695 vvdw = _mm256_mul_ps(vvdw,sw);
696 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
698 /* Update potential sum for this i atom from the interaction with this j atom. */
699 vvdw = _mm256_and_ps(vvdw,cutoff_mask);
700 vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
701 vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
705 fscal = _mm256_and_ps(fscal,cutoff_mask);
707 fscal = _mm256_andnot_ps(dummy_mask,fscal);
709 /* Calculate temporary vectorial force */
710 tx = _mm256_mul_ps(fscal,dx00);
711 ty = _mm256_mul_ps(fscal,dy00);
712 tz = _mm256_mul_ps(fscal,dz00);
714 /* Update vectorial force */
715 fix0 = _mm256_add_ps(fix0,tx);
716 fiy0 = _mm256_add_ps(fiy0,ty);
717 fiz0 = _mm256_add_ps(fiz0,tz);
719 fjx0 = _mm256_add_ps(fjx0,tx);
720 fjy0 = _mm256_add_ps(fjy0,ty);
721 fjz0 = _mm256_add_ps(fjz0,tz);
725 /**************************
726 * CALCULATE INTERACTIONS *
727 **************************/
729 if (gmx_mm256_any_lt(rsq10,rcutoff2))
732 r10 = _mm256_mul_ps(rsq10,rinv10);
733 r10 = _mm256_andnot_ps(dummy_mask,r10);
735 /* Compute parameters for interactions between i and j atoms */
736 qq10 = _mm256_mul_ps(iq1,jq0);
738 /* EWALD ELECTROSTATICS */
740 /* Analytical PME correction */
741 zeta2 = _mm256_mul_ps(beta2,rsq10);
742 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
743 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
744 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
745 felec = _mm256_mul_ps(qq10,felec);
746 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
747 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
748 velec = _mm256_sub_ps(rinv10,pmecorrV);
749 velec = _mm256_mul_ps(qq10,velec);
751 d = _mm256_sub_ps(r10,rswitch);
752 d = _mm256_max_ps(d,_mm256_setzero_ps());
753 d2 = _mm256_mul_ps(d,d);
754 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
756 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
758 /* Evaluate switch function */
759 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
760 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
761 velec = _mm256_mul_ps(velec,sw);
762 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
764 /* Update potential sum for this i atom from the interaction with this j atom. */
765 velec = _mm256_and_ps(velec,cutoff_mask);
766 velec = _mm256_andnot_ps(dummy_mask,velec);
767 velecsum = _mm256_add_ps(velecsum,velec);
771 fscal = _mm256_and_ps(fscal,cutoff_mask);
773 fscal = _mm256_andnot_ps(dummy_mask,fscal);
775 /* Calculate temporary vectorial force */
776 tx = _mm256_mul_ps(fscal,dx10);
777 ty = _mm256_mul_ps(fscal,dy10);
778 tz = _mm256_mul_ps(fscal,dz10);
780 /* Update vectorial force */
781 fix1 = _mm256_add_ps(fix1,tx);
782 fiy1 = _mm256_add_ps(fiy1,ty);
783 fiz1 = _mm256_add_ps(fiz1,tz);
785 fjx0 = _mm256_add_ps(fjx0,tx);
786 fjy0 = _mm256_add_ps(fjy0,ty);
787 fjz0 = _mm256_add_ps(fjz0,tz);
791 /**************************
792 * CALCULATE INTERACTIONS *
793 **************************/
795 if (gmx_mm256_any_lt(rsq20,rcutoff2))
798 r20 = _mm256_mul_ps(rsq20,rinv20);
799 r20 = _mm256_andnot_ps(dummy_mask,r20);
801 /* Compute parameters for interactions between i and j atoms */
802 qq20 = _mm256_mul_ps(iq2,jq0);
804 /* EWALD ELECTROSTATICS */
806 /* Analytical PME correction */
807 zeta2 = _mm256_mul_ps(beta2,rsq20);
808 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
809 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
810 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
811 felec = _mm256_mul_ps(qq20,felec);
812 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
813 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
814 velec = _mm256_sub_ps(rinv20,pmecorrV);
815 velec = _mm256_mul_ps(qq20,velec);
817 d = _mm256_sub_ps(r20,rswitch);
818 d = _mm256_max_ps(d,_mm256_setzero_ps());
819 d2 = _mm256_mul_ps(d,d);
820 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
822 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
824 /* Evaluate switch function */
825 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
826 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
827 velec = _mm256_mul_ps(velec,sw);
828 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
830 /* Update potential sum for this i atom from the interaction with this j atom. */
831 velec = _mm256_and_ps(velec,cutoff_mask);
832 velec = _mm256_andnot_ps(dummy_mask,velec);
833 velecsum = _mm256_add_ps(velecsum,velec);
837 fscal = _mm256_and_ps(fscal,cutoff_mask);
839 fscal = _mm256_andnot_ps(dummy_mask,fscal);
841 /* Calculate temporary vectorial force */
842 tx = _mm256_mul_ps(fscal,dx20);
843 ty = _mm256_mul_ps(fscal,dy20);
844 tz = _mm256_mul_ps(fscal,dz20);
846 /* Update vectorial force */
847 fix2 = _mm256_add_ps(fix2,tx);
848 fiy2 = _mm256_add_ps(fiy2,ty);
849 fiz2 = _mm256_add_ps(fiz2,tz);
851 fjx0 = _mm256_add_ps(fjx0,tx);
852 fjy0 = _mm256_add_ps(fjy0,ty);
853 fjz0 = _mm256_add_ps(fjz0,tz);
857 /**************************
858 * CALCULATE INTERACTIONS *
859 **************************/
861 if (gmx_mm256_any_lt(rsq30,rcutoff2))
864 r30 = _mm256_mul_ps(rsq30,rinv30);
865 r30 = _mm256_andnot_ps(dummy_mask,r30);
867 /* Compute parameters for interactions between i and j atoms */
868 qq30 = _mm256_mul_ps(iq3,jq0);
870 /* EWALD ELECTROSTATICS */
872 /* Analytical PME correction */
873 zeta2 = _mm256_mul_ps(beta2,rsq30);
874 rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
875 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
876 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
877 felec = _mm256_mul_ps(qq30,felec);
878 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
879 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
880 velec = _mm256_sub_ps(rinv30,pmecorrV);
881 velec = _mm256_mul_ps(qq30,velec);
883 d = _mm256_sub_ps(r30,rswitch);
884 d = _mm256_max_ps(d,_mm256_setzero_ps());
885 d2 = _mm256_mul_ps(d,d);
886 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
888 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
890 /* Evaluate switch function */
891 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
892 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
893 velec = _mm256_mul_ps(velec,sw);
894 cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
896 /* Update potential sum for this i atom from the interaction with this j atom. */
897 velec = _mm256_and_ps(velec,cutoff_mask);
898 velec = _mm256_andnot_ps(dummy_mask,velec);
899 velecsum = _mm256_add_ps(velecsum,velec);
903 fscal = _mm256_and_ps(fscal,cutoff_mask);
905 fscal = _mm256_andnot_ps(dummy_mask,fscal);
907 /* Calculate temporary vectorial force */
908 tx = _mm256_mul_ps(fscal,dx30);
909 ty = _mm256_mul_ps(fscal,dy30);
910 tz = _mm256_mul_ps(fscal,dz30);
912 /* Update vectorial force */
913 fix3 = _mm256_add_ps(fix3,tx);
914 fiy3 = _mm256_add_ps(fiy3,ty);
915 fiz3 = _mm256_add_ps(fiz3,tz);
917 fjx0 = _mm256_add_ps(fjx0,tx);
918 fjy0 = _mm256_add_ps(fjy0,ty);
919 fjz0 = _mm256_add_ps(fjz0,tz);
923 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
924 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
925 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
926 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
927 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
928 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
929 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
930 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
932 gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
934 /* Inner loop uses 390 flops */
937 /* End of innermost loop */
939 gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
940 f+i_coord_offset,fshift+i_shift_offset);
943 /* Update potential energies */
944 gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
945 gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
947 /* Increment number of inner iterations */
948 inneriter += j_index_end - j_index_start;
950 /* Outer loop uses 26 flops */
953 /* Increment number of outer iterations */
956 /* Update outer/inner flops */
958 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*390);
961 * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_F_avx_256_single
962 * Electrostatics interaction: Ewald
963 * VdW interaction: LennardJones
964 * Geometry: Water4-Particle
965 * Calculate force/pot: Force
968 nb_kernel_ElecEwSw_VdwLJSw_GeomW4P1_F_avx_256_single
969 (t_nblist * gmx_restrict nlist,
970 rvec * gmx_restrict xx,
971 rvec * gmx_restrict ff,
972 t_forcerec * gmx_restrict fr,
973 t_mdatoms * gmx_restrict mdatoms,
974 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
975 t_nrnb * gmx_restrict nrnb)
977 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
978 * just 0 for non-waters.
979 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
980 * jnr indices corresponding to data put in the four positions in the SIMD register.
982 int i_shift_offset,i_coord_offset,outeriter,inneriter;
983 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
984 int jnrA,jnrB,jnrC,jnrD;
985 int jnrE,jnrF,jnrG,jnrH;
986 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
987 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
988 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
989 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
990 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
992 real *shiftvec,*fshift,*x,*f;
993 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
995 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
996 real * vdwioffsetptr0;
997 __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
998 real * vdwioffsetptr1;
999 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1000 real * vdwioffsetptr2;
1001 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1002 real * vdwioffsetptr3;
1003 __m256 ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1004 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1005 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1006 __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1007 __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1008 __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1009 __m256 dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
1010 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
1013 __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1016 __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
1017 __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
1019 __m128i ewitab_lo,ewitab_hi;
1020 __m256 ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1021 __m256 beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1023 __m256 rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
1024 real rswitch_scalar,d_scalar;
1025 __m256 dummy_mask,cutoff_mask;
1026 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1027 __m256 one = _mm256_set1_ps(1.0);
1028 __m256 two = _mm256_set1_ps(2.0);
1034 jindex = nlist->jindex;
1036 shiftidx = nlist->shift;
1038 shiftvec = fr->shift_vec[0];
1039 fshift = fr->fshift[0];
1040 facel = _mm256_set1_ps(fr->epsfac);
1041 charge = mdatoms->chargeA;
1042 nvdwtype = fr->ntype;
1043 vdwparam = fr->nbfp;
1044 vdwtype = mdatoms->typeA;
1046 sh_ewald = _mm256_set1_ps(fr->ic->sh_ewald);
1047 beta = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1048 beta2 = _mm256_mul_ps(beta,beta);
1049 beta3 = _mm256_mul_ps(beta,beta2);
1051 ewtab = fr->ic->tabq_coul_FDV0;
1052 ewtabscale = _mm256_set1_ps(fr->ic->tabq_scale);
1053 ewtabhalfspace = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1055 /* Setup water-specific parameters */
1056 inr = nlist->iinr[0];
1057 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1058 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1059 iq3 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
1060 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
1062 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1063 rcutoff_scalar = fr->rcoulomb;
1064 rcutoff = _mm256_set1_ps(rcutoff_scalar);
1065 rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
1067 rswitch_scalar = fr->rcoulomb_switch;
1068 rswitch = _mm256_set1_ps(rswitch_scalar);
1069 /* Setup switch parameters */
1070 d_scalar = rcutoff_scalar-rswitch_scalar;
1071 d = _mm256_set1_ps(d_scalar);
1072 swV3 = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
1073 swV4 = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1074 swV5 = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1075 swF2 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
1076 swF3 = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1077 swF4 = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1079 /* Avoid stupid compiler warnings */
1080 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1081 j_coord_offsetA = 0;
1082 j_coord_offsetB = 0;
1083 j_coord_offsetC = 0;
1084 j_coord_offsetD = 0;
1085 j_coord_offsetE = 0;
1086 j_coord_offsetF = 0;
1087 j_coord_offsetG = 0;
1088 j_coord_offsetH = 0;
1093 for(iidx=0;iidx<4*DIM;iidx++)
1095 scratch[iidx] = 0.0;
1098 /* Start outer loop over neighborlists */
1099 for(iidx=0; iidx<nri; iidx++)
1101 /* Load shift vector for this list */
1102 i_shift_offset = DIM*shiftidx[iidx];
1104 /* Load limits for loop over neighbors */
1105 j_index_start = jindex[iidx];
1106 j_index_end = jindex[iidx+1];
1108 /* Get outer coordinate index */
1110 i_coord_offset = DIM*inr;
1112 /* Load i particle coords and add shift vector */
1113 gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1114 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1116 fix0 = _mm256_setzero_ps();
1117 fiy0 = _mm256_setzero_ps();
1118 fiz0 = _mm256_setzero_ps();
1119 fix1 = _mm256_setzero_ps();
1120 fiy1 = _mm256_setzero_ps();
1121 fiz1 = _mm256_setzero_ps();
1122 fix2 = _mm256_setzero_ps();
1123 fiy2 = _mm256_setzero_ps();
1124 fiz2 = _mm256_setzero_ps();
1125 fix3 = _mm256_setzero_ps();
1126 fiy3 = _mm256_setzero_ps();
1127 fiz3 = _mm256_setzero_ps();
1129 /* Start inner kernel loop */
1130 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1133 /* Get j neighbor index, and coordinate index */
1135 jnrB = jjnr[jidx+1];
1136 jnrC = jjnr[jidx+2];
1137 jnrD = jjnr[jidx+3];
1138 jnrE = jjnr[jidx+4];
1139 jnrF = jjnr[jidx+5];
1140 jnrG = jjnr[jidx+6];
1141 jnrH = jjnr[jidx+7];
1142 j_coord_offsetA = DIM*jnrA;
1143 j_coord_offsetB = DIM*jnrB;
1144 j_coord_offsetC = DIM*jnrC;
1145 j_coord_offsetD = DIM*jnrD;
1146 j_coord_offsetE = DIM*jnrE;
1147 j_coord_offsetF = DIM*jnrF;
1148 j_coord_offsetG = DIM*jnrG;
1149 j_coord_offsetH = DIM*jnrH;
1151 /* load j atom coordinates */
1152 gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1153 x+j_coord_offsetC,x+j_coord_offsetD,
1154 x+j_coord_offsetE,x+j_coord_offsetF,
1155 x+j_coord_offsetG,x+j_coord_offsetH,
1158 /* Calculate displacement vector */
1159 dx00 = _mm256_sub_ps(ix0,jx0);
1160 dy00 = _mm256_sub_ps(iy0,jy0);
1161 dz00 = _mm256_sub_ps(iz0,jz0);
1162 dx10 = _mm256_sub_ps(ix1,jx0);
1163 dy10 = _mm256_sub_ps(iy1,jy0);
1164 dz10 = _mm256_sub_ps(iz1,jz0);
1165 dx20 = _mm256_sub_ps(ix2,jx0);
1166 dy20 = _mm256_sub_ps(iy2,jy0);
1167 dz20 = _mm256_sub_ps(iz2,jz0);
1168 dx30 = _mm256_sub_ps(ix3,jx0);
1169 dy30 = _mm256_sub_ps(iy3,jy0);
1170 dz30 = _mm256_sub_ps(iz3,jz0);
1172 /* Calculate squared distance and things based on it */
1173 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1174 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1175 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1176 rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
1178 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
1179 rinv10 = gmx_mm256_invsqrt_ps(rsq10);
1180 rinv20 = gmx_mm256_invsqrt_ps(rsq20);
1181 rinv30 = gmx_mm256_invsqrt_ps(rsq30);
1183 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
1184 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
1185 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
1186 rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
1188 /* Load parameters for j particles */
1189 jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
1190 charge+jnrC+0,charge+jnrD+0,
1191 charge+jnrE+0,charge+jnrF+0,
1192 charge+jnrG+0,charge+jnrH+0);
1193 vdwjidx0A = 2*vdwtype[jnrA+0];
1194 vdwjidx0B = 2*vdwtype[jnrB+0];
1195 vdwjidx0C = 2*vdwtype[jnrC+0];
1196 vdwjidx0D = 2*vdwtype[jnrD+0];
1197 vdwjidx0E = 2*vdwtype[jnrE+0];
1198 vdwjidx0F = 2*vdwtype[jnrF+0];
1199 vdwjidx0G = 2*vdwtype[jnrG+0];
1200 vdwjidx0H = 2*vdwtype[jnrH+0];
1202 fjx0 = _mm256_setzero_ps();
1203 fjy0 = _mm256_setzero_ps();
1204 fjz0 = _mm256_setzero_ps();
1206 /**************************
1207 * CALCULATE INTERACTIONS *
1208 **************************/
1210 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1213 r00 = _mm256_mul_ps(rsq00,rinv00);
1215 /* Compute parameters for interactions between i and j atoms */
1216 gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
1217 vdwioffsetptr0+vdwjidx0B,
1218 vdwioffsetptr0+vdwjidx0C,
1219 vdwioffsetptr0+vdwjidx0D,
1220 vdwioffsetptr0+vdwjidx0E,
1221 vdwioffsetptr0+vdwjidx0F,
1222 vdwioffsetptr0+vdwjidx0G,
1223 vdwioffsetptr0+vdwjidx0H,
1226 /* LENNARD-JONES DISPERSION/REPULSION */
1228 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1229 vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
1230 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
1231 vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
1232 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
1234 d = _mm256_sub_ps(r00,rswitch);
1235 d = _mm256_max_ps(d,_mm256_setzero_ps());
1236 d2 = _mm256_mul_ps(d,d);
1237 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1239 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1241 /* Evaluate switch function */
1242 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1243 fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
1244 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1248 fscal = _mm256_and_ps(fscal,cutoff_mask);
1250 /* Calculate temporary vectorial force */
1251 tx = _mm256_mul_ps(fscal,dx00);
1252 ty = _mm256_mul_ps(fscal,dy00);
1253 tz = _mm256_mul_ps(fscal,dz00);
1255 /* Update vectorial force */
1256 fix0 = _mm256_add_ps(fix0,tx);
1257 fiy0 = _mm256_add_ps(fiy0,ty);
1258 fiz0 = _mm256_add_ps(fiz0,tz);
1260 fjx0 = _mm256_add_ps(fjx0,tx);
1261 fjy0 = _mm256_add_ps(fjy0,ty);
1262 fjz0 = _mm256_add_ps(fjz0,tz);
1266 /**************************
1267 * CALCULATE INTERACTIONS *
1268 **************************/
1270 if (gmx_mm256_any_lt(rsq10,rcutoff2))
1273 r10 = _mm256_mul_ps(rsq10,rinv10);
1275 /* Compute parameters for interactions between i and j atoms */
1276 qq10 = _mm256_mul_ps(iq1,jq0);
1278 /* EWALD ELECTROSTATICS */
1280 /* Analytical PME correction */
1281 zeta2 = _mm256_mul_ps(beta2,rsq10);
1282 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
1283 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1284 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1285 felec = _mm256_mul_ps(qq10,felec);
1286 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1287 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1288 velec = _mm256_sub_ps(rinv10,pmecorrV);
1289 velec = _mm256_mul_ps(qq10,velec);
1291 d = _mm256_sub_ps(r10,rswitch);
1292 d = _mm256_max_ps(d,_mm256_setzero_ps());
1293 d2 = _mm256_mul_ps(d,d);
1294 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1296 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1298 /* Evaluate switch function */
1299 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1300 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
1301 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1305 fscal = _mm256_and_ps(fscal,cutoff_mask);
1307 /* Calculate temporary vectorial force */
1308 tx = _mm256_mul_ps(fscal,dx10);
1309 ty = _mm256_mul_ps(fscal,dy10);
1310 tz = _mm256_mul_ps(fscal,dz10);
1312 /* Update vectorial force */
1313 fix1 = _mm256_add_ps(fix1,tx);
1314 fiy1 = _mm256_add_ps(fiy1,ty);
1315 fiz1 = _mm256_add_ps(fiz1,tz);
1317 fjx0 = _mm256_add_ps(fjx0,tx);
1318 fjy0 = _mm256_add_ps(fjy0,ty);
1319 fjz0 = _mm256_add_ps(fjz0,tz);
1323 /**************************
1324 * CALCULATE INTERACTIONS *
1325 **************************/
1327 if (gmx_mm256_any_lt(rsq20,rcutoff2))
1330 r20 = _mm256_mul_ps(rsq20,rinv20);
1332 /* Compute parameters for interactions between i and j atoms */
1333 qq20 = _mm256_mul_ps(iq2,jq0);
1335 /* EWALD ELECTROSTATICS */
1337 /* Analytical PME correction */
1338 zeta2 = _mm256_mul_ps(beta2,rsq20);
1339 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
1340 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1341 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1342 felec = _mm256_mul_ps(qq20,felec);
1343 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1344 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1345 velec = _mm256_sub_ps(rinv20,pmecorrV);
1346 velec = _mm256_mul_ps(qq20,velec);
1348 d = _mm256_sub_ps(r20,rswitch);
1349 d = _mm256_max_ps(d,_mm256_setzero_ps());
1350 d2 = _mm256_mul_ps(d,d);
1351 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1353 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1355 /* Evaluate switch function */
1356 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1357 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
1358 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1362 fscal = _mm256_and_ps(fscal,cutoff_mask);
1364 /* Calculate temporary vectorial force */
1365 tx = _mm256_mul_ps(fscal,dx20);
1366 ty = _mm256_mul_ps(fscal,dy20);
1367 tz = _mm256_mul_ps(fscal,dz20);
1369 /* Update vectorial force */
1370 fix2 = _mm256_add_ps(fix2,tx);
1371 fiy2 = _mm256_add_ps(fiy2,ty);
1372 fiz2 = _mm256_add_ps(fiz2,tz);
1374 fjx0 = _mm256_add_ps(fjx0,tx);
1375 fjy0 = _mm256_add_ps(fjy0,ty);
1376 fjz0 = _mm256_add_ps(fjz0,tz);
1380 /**************************
1381 * CALCULATE INTERACTIONS *
1382 **************************/
1384 if (gmx_mm256_any_lt(rsq30,rcutoff2))
1387 r30 = _mm256_mul_ps(rsq30,rinv30);
1389 /* Compute parameters for interactions between i and j atoms */
1390 qq30 = _mm256_mul_ps(iq3,jq0);
1392 /* EWALD ELECTROSTATICS */
1394 /* Analytical PME correction */
1395 zeta2 = _mm256_mul_ps(beta2,rsq30);
1396 rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
1397 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1398 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1399 felec = _mm256_mul_ps(qq30,felec);
1400 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1401 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1402 velec = _mm256_sub_ps(rinv30,pmecorrV);
1403 velec = _mm256_mul_ps(qq30,velec);
1405 d = _mm256_sub_ps(r30,rswitch);
1406 d = _mm256_max_ps(d,_mm256_setzero_ps());
1407 d2 = _mm256_mul_ps(d,d);
1408 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1410 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1412 /* Evaluate switch function */
1413 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1414 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
1415 cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
1419 fscal = _mm256_and_ps(fscal,cutoff_mask);
1421 /* Calculate temporary vectorial force */
1422 tx = _mm256_mul_ps(fscal,dx30);
1423 ty = _mm256_mul_ps(fscal,dy30);
1424 tz = _mm256_mul_ps(fscal,dz30);
1426 /* Update vectorial force */
1427 fix3 = _mm256_add_ps(fix3,tx);
1428 fiy3 = _mm256_add_ps(fiy3,ty);
1429 fiz3 = _mm256_add_ps(fiz3,tz);
1431 fjx0 = _mm256_add_ps(fjx0,tx);
1432 fjy0 = _mm256_add_ps(fjy0,ty);
1433 fjz0 = _mm256_add_ps(fjz0,tz);
1437 fjptrA = f+j_coord_offsetA;
1438 fjptrB = f+j_coord_offsetB;
1439 fjptrC = f+j_coord_offsetC;
1440 fjptrD = f+j_coord_offsetD;
1441 fjptrE = f+j_coord_offsetE;
1442 fjptrF = f+j_coord_offsetF;
1443 fjptrG = f+j_coord_offsetG;
1444 fjptrH = f+j_coord_offsetH;
1446 gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1448 /* Inner loop uses 374 flops */
1451 if(jidx<j_index_end)
1454 /* Get j neighbor index, and coordinate index */
1455 jnrlistA = jjnr[jidx];
1456 jnrlistB = jjnr[jidx+1];
1457 jnrlistC = jjnr[jidx+2];
1458 jnrlistD = jjnr[jidx+3];
1459 jnrlistE = jjnr[jidx+4];
1460 jnrlistF = jjnr[jidx+5];
1461 jnrlistG = jjnr[jidx+6];
1462 jnrlistH = jjnr[jidx+7];
1463 /* Sign of each element will be negative for non-real atoms.
1464 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1465 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
1467 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
1468 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
1470 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
1471 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
1472 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
1473 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
1474 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
1475 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
1476 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
1477 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
1478 j_coord_offsetA = DIM*jnrA;
1479 j_coord_offsetB = DIM*jnrB;
1480 j_coord_offsetC = DIM*jnrC;
1481 j_coord_offsetD = DIM*jnrD;
1482 j_coord_offsetE = DIM*jnrE;
1483 j_coord_offsetF = DIM*jnrF;
1484 j_coord_offsetG = DIM*jnrG;
1485 j_coord_offsetH = DIM*jnrH;
1487 /* load j atom coordinates */
1488 gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1489 x+j_coord_offsetC,x+j_coord_offsetD,
1490 x+j_coord_offsetE,x+j_coord_offsetF,
1491 x+j_coord_offsetG,x+j_coord_offsetH,
1494 /* Calculate displacement vector */
1495 dx00 = _mm256_sub_ps(ix0,jx0);
1496 dy00 = _mm256_sub_ps(iy0,jy0);
1497 dz00 = _mm256_sub_ps(iz0,jz0);
1498 dx10 = _mm256_sub_ps(ix1,jx0);
1499 dy10 = _mm256_sub_ps(iy1,jy0);
1500 dz10 = _mm256_sub_ps(iz1,jz0);
1501 dx20 = _mm256_sub_ps(ix2,jx0);
1502 dy20 = _mm256_sub_ps(iy2,jy0);
1503 dz20 = _mm256_sub_ps(iz2,jz0);
1504 dx30 = _mm256_sub_ps(ix3,jx0);
1505 dy30 = _mm256_sub_ps(iy3,jy0);
1506 dz30 = _mm256_sub_ps(iz3,jz0);
1508 /* Calculate squared distance and things based on it */
1509 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1510 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1511 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1512 rsq30 = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
1514 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
1515 rinv10 = gmx_mm256_invsqrt_ps(rsq10);
1516 rinv20 = gmx_mm256_invsqrt_ps(rsq20);
1517 rinv30 = gmx_mm256_invsqrt_ps(rsq30);
1519 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
1520 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
1521 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
1522 rinvsq30 = _mm256_mul_ps(rinv30,rinv30);
1524 /* Load parameters for j particles */
1525 jq0 = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
1526 charge+jnrC+0,charge+jnrD+0,
1527 charge+jnrE+0,charge+jnrF+0,
1528 charge+jnrG+0,charge+jnrH+0);
1529 vdwjidx0A = 2*vdwtype[jnrA+0];
1530 vdwjidx0B = 2*vdwtype[jnrB+0];
1531 vdwjidx0C = 2*vdwtype[jnrC+0];
1532 vdwjidx0D = 2*vdwtype[jnrD+0];
1533 vdwjidx0E = 2*vdwtype[jnrE+0];
1534 vdwjidx0F = 2*vdwtype[jnrF+0];
1535 vdwjidx0G = 2*vdwtype[jnrG+0];
1536 vdwjidx0H = 2*vdwtype[jnrH+0];
1538 fjx0 = _mm256_setzero_ps();
1539 fjy0 = _mm256_setzero_ps();
1540 fjz0 = _mm256_setzero_ps();
1542 /**************************
1543 * CALCULATE INTERACTIONS *
1544 **************************/
1546 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1549 r00 = _mm256_mul_ps(rsq00,rinv00);
1550 r00 = _mm256_andnot_ps(dummy_mask,r00);
1552 /* Compute parameters for interactions between i and j atoms */
1553 gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
1554 vdwioffsetptr0+vdwjidx0B,
1555 vdwioffsetptr0+vdwjidx0C,
1556 vdwioffsetptr0+vdwjidx0D,
1557 vdwioffsetptr0+vdwjidx0E,
1558 vdwioffsetptr0+vdwjidx0F,
1559 vdwioffsetptr0+vdwjidx0G,
1560 vdwioffsetptr0+vdwjidx0H,
1563 /* LENNARD-JONES DISPERSION/REPULSION */
1565 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1566 vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
1567 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
1568 vvdw = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
1569 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
1571 d = _mm256_sub_ps(r00,rswitch);
1572 d = _mm256_max_ps(d,_mm256_setzero_ps());
1573 d2 = _mm256_mul_ps(d,d);
1574 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1576 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1578 /* Evaluate switch function */
1579 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1580 fvdw = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
1581 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1585 fscal = _mm256_and_ps(fscal,cutoff_mask);
1587 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1589 /* Calculate temporary vectorial force */
1590 tx = _mm256_mul_ps(fscal,dx00);
1591 ty = _mm256_mul_ps(fscal,dy00);
1592 tz = _mm256_mul_ps(fscal,dz00);
1594 /* Update vectorial force */
1595 fix0 = _mm256_add_ps(fix0,tx);
1596 fiy0 = _mm256_add_ps(fiy0,ty);
1597 fiz0 = _mm256_add_ps(fiz0,tz);
1599 fjx0 = _mm256_add_ps(fjx0,tx);
1600 fjy0 = _mm256_add_ps(fjy0,ty);
1601 fjz0 = _mm256_add_ps(fjz0,tz);
1605 /**************************
1606 * CALCULATE INTERACTIONS *
1607 **************************/
1609 if (gmx_mm256_any_lt(rsq10,rcutoff2))
1612 r10 = _mm256_mul_ps(rsq10,rinv10);
1613 r10 = _mm256_andnot_ps(dummy_mask,r10);
1615 /* Compute parameters for interactions between i and j atoms */
1616 qq10 = _mm256_mul_ps(iq1,jq0);
1618 /* EWALD ELECTROSTATICS */
1620 /* Analytical PME correction */
1621 zeta2 = _mm256_mul_ps(beta2,rsq10);
1622 rinv3 = _mm256_mul_ps(rinvsq10,rinv10);
1623 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1624 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1625 felec = _mm256_mul_ps(qq10,felec);
1626 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1627 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1628 velec = _mm256_sub_ps(rinv10,pmecorrV);
1629 velec = _mm256_mul_ps(qq10,velec);
1631 d = _mm256_sub_ps(r10,rswitch);
1632 d = _mm256_max_ps(d,_mm256_setzero_ps());
1633 d2 = _mm256_mul_ps(d,d);
1634 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1636 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1638 /* Evaluate switch function */
1639 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1640 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
1641 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1645 fscal = _mm256_and_ps(fscal,cutoff_mask);
1647 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1649 /* Calculate temporary vectorial force */
1650 tx = _mm256_mul_ps(fscal,dx10);
1651 ty = _mm256_mul_ps(fscal,dy10);
1652 tz = _mm256_mul_ps(fscal,dz10);
1654 /* Update vectorial force */
1655 fix1 = _mm256_add_ps(fix1,tx);
1656 fiy1 = _mm256_add_ps(fiy1,ty);
1657 fiz1 = _mm256_add_ps(fiz1,tz);
1659 fjx0 = _mm256_add_ps(fjx0,tx);
1660 fjy0 = _mm256_add_ps(fjy0,ty);
1661 fjz0 = _mm256_add_ps(fjz0,tz);
1665 /**************************
1666 * CALCULATE INTERACTIONS *
1667 **************************/
1669 if (gmx_mm256_any_lt(rsq20,rcutoff2))
1672 r20 = _mm256_mul_ps(rsq20,rinv20);
1673 r20 = _mm256_andnot_ps(dummy_mask,r20);
1675 /* Compute parameters for interactions between i and j atoms */
1676 qq20 = _mm256_mul_ps(iq2,jq0);
1678 /* EWALD ELECTROSTATICS */
1680 /* Analytical PME correction */
1681 zeta2 = _mm256_mul_ps(beta2,rsq20);
1682 rinv3 = _mm256_mul_ps(rinvsq20,rinv20);
1683 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1684 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1685 felec = _mm256_mul_ps(qq20,felec);
1686 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1687 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1688 velec = _mm256_sub_ps(rinv20,pmecorrV);
1689 velec = _mm256_mul_ps(qq20,velec);
1691 d = _mm256_sub_ps(r20,rswitch);
1692 d = _mm256_max_ps(d,_mm256_setzero_ps());
1693 d2 = _mm256_mul_ps(d,d);
1694 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1696 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1698 /* Evaluate switch function */
1699 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1700 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
1701 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1705 fscal = _mm256_and_ps(fscal,cutoff_mask);
1707 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1709 /* Calculate temporary vectorial force */
1710 tx = _mm256_mul_ps(fscal,dx20);
1711 ty = _mm256_mul_ps(fscal,dy20);
1712 tz = _mm256_mul_ps(fscal,dz20);
1714 /* Update vectorial force */
1715 fix2 = _mm256_add_ps(fix2,tx);
1716 fiy2 = _mm256_add_ps(fiy2,ty);
1717 fiz2 = _mm256_add_ps(fiz2,tz);
1719 fjx0 = _mm256_add_ps(fjx0,tx);
1720 fjy0 = _mm256_add_ps(fjy0,ty);
1721 fjz0 = _mm256_add_ps(fjz0,tz);
1725 /**************************
1726 * CALCULATE INTERACTIONS *
1727 **************************/
1729 if (gmx_mm256_any_lt(rsq30,rcutoff2))
1732 r30 = _mm256_mul_ps(rsq30,rinv30);
1733 r30 = _mm256_andnot_ps(dummy_mask,r30);
1735 /* Compute parameters for interactions between i and j atoms */
1736 qq30 = _mm256_mul_ps(iq3,jq0);
1738 /* EWALD ELECTROSTATICS */
1740 /* Analytical PME correction */
1741 zeta2 = _mm256_mul_ps(beta2,rsq30);
1742 rinv3 = _mm256_mul_ps(rinvsq30,rinv30);
1743 pmecorrF = gmx_mm256_pmecorrF_ps(zeta2);
1744 felec = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1745 felec = _mm256_mul_ps(qq30,felec);
1746 pmecorrV = gmx_mm256_pmecorrV_ps(zeta2);
1747 pmecorrV = _mm256_mul_ps(pmecorrV,beta);
1748 velec = _mm256_sub_ps(rinv30,pmecorrV);
1749 velec = _mm256_mul_ps(qq30,velec);
1751 d = _mm256_sub_ps(r30,rswitch);
1752 d = _mm256_max_ps(d,_mm256_setzero_ps());
1753 d2 = _mm256_mul_ps(d,d);
1754 sw = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1756 dsw = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1758 /* Evaluate switch function */
1759 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1760 felec = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv30,_mm256_mul_ps(velec,dsw)) );
1761 cutoff_mask = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
1765 fscal = _mm256_and_ps(fscal,cutoff_mask);
1767 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1769 /* Calculate temporary vectorial force */
1770 tx = _mm256_mul_ps(fscal,dx30);
1771 ty = _mm256_mul_ps(fscal,dy30);
1772 tz = _mm256_mul_ps(fscal,dz30);
1774 /* Update vectorial force */
1775 fix3 = _mm256_add_ps(fix3,tx);
1776 fiy3 = _mm256_add_ps(fiy3,ty);
1777 fiz3 = _mm256_add_ps(fiz3,tz);
1779 fjx0 = _mm256_add_ps(fjx0,tx);
1780 fjy0 = _mm256_add_ps(fjy0,ty);
1781 fjz0 = _mm256_add_ps(fjz0,tz);
1785 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1786 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1787 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1788 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1789 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1790 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1791 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1792 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1794 gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1796 /* Inner loop uses 378 flops */
1799 /* End of innermost loop */
1801 gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1802 f+i_coord_offset,fshift+i_shift_offset);
1804 /* Increment number of inner iterations */
1805 inneriter += j_index_end - j_index_start;
1807 /* Outer loop uses 24 flops */
1810 /* Increment number of outer iterations */
1813 /* Update outer/inner flops */
1815 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*378);