2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_double kernel generator.
44 #include "../nb_kernel.h"
45 #include "gromacs/legacyheaders/types/simple.h"
46 #include "gromacs/math/vec.h"
47 #include "gromacs/legacyheaders/nrnb.h"
49 #include "gromacs/simd/math_x86_avx_256_double.h"
50 #include "kernelutil_x86_avx_256_double.h"
53 * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_VF_avx_256_double
54 * Electrostatics interaction: Ewald
55 * VdW interaction: LennardJones
56 * Geometry: Water3-Water3
57 * Calculate force/pot: PotentialAndForce
60 nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_VF_avx_256_double
61 (t_nblist * gmx_restrict nlist,
62 rvec * gmx_restrict xx,
63 rvec * gmx_restrict ff,
64 t_forcerec * gmx_restrict fr,
65 t_mdatoms * gmx_restrict mdatoms,
66 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67 t_nrnb * gmx_restrict nrnb)
69 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
70 * just 0 for non-waters.
71 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
72 * jnr indices corresponding to data put in the four positions in the SIMD register.
74 int i_shift_offset,i_coord_offset,outeriter,inneriter;
75 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76 int jnrA,jnrB,jnrC,jnrD;
77 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
78 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
79 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
80 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
82 real *shiftvec,*fshift,*x,*f;
83 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
85 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86 real * vdwioffsetptr0;
87 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
88 real * vdwioffsetptr1;
89 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
90 real * vdwioffsetptr2;
91 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
92 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
93 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
94 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
95 __m256d jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
96 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
97 __m256d jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
98 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
99 __m256d dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
100 __m256d dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
101 __m256d dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
102 __m256d dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
103 __m256d dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
104 __m256d dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
105 __m256d dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
106 __m256d dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
107 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
110 __m256d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
113 __m256d one_sixth = _mm256_set1_pd(1.0/6.0);
114 __m256d one_twelfth = _mm256_set1_pd(1.0/12.0);
116 __m256d ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
117 __m256d beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
119 __m256d rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
120 real rswitch_scalar,d_scalar;
121 __m256d dummy_mask,cutoff_mask;
122 __m128 tmpmask0,tmpmask1;
123 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
124 __m256d one = _mm256_set1_pd(1.0);
125 __m256d two = _mm256_set1_pd(2.0);
131 jindex = nlist->jindex;
133 shiftidx = nlist->shift;
135 shiftvec = fr->shift_vec[0];
136 fshift = fr->fshift[0];
137 facel = _mm256_set1_pd(fr->epsfac);
138 charge = mdatoms->chargeA;
139 nvdwtype = fr->ntype;
141 vdwtype = mdatoms->typeA;
143 sh_ewald = _mm256_set1_pd(fr->ic->sh_ewald);
144 beta = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
145 beta2 = _mm256_mul_pd(beta,beta);
146 beta3 = _mm256_mul_pd(beta,beta2);
148 ewtab = fr->ic->tabq_coul_FDV0;
149 ewtabscale = _mm256_set1_pd(fr->ic->tabq_scale);
150 ewtabhalfspace = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
152 /* Setup water-specific parameters */
153 inr = nlist->iinr[0];
154 iq0 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
155 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
156 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
157 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
159 jq0 = _mm256_set1_pd(charge[inr+0]);
160 jq1 = _mm256_set1_pd(charge[inr+1]);
161 jq2 = _mm256_set1_pd(charge[inr+2]);
162 vdwjidx0A = 2*vdwtype[inr+0];
163 qq00 = _mm256_mul_pd(iq0,jq0);
164 c6_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
165 c12_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
166 qq01 = _mm256_mul_pd(iq0,jq1);
167 qq02 = _mm256_mul_pd(iq0,jq2);
168 qq10 = _mm256_mul_pd(iq1,jq0);
169 qq11 = _mm256_mul_pd(iq1,jq1);
170 qq12 = _mm256_mul_pd(iq1,jq2);
171 qq20 = _mm256_mul_pd(iq2,jq0);
172 qq21 = _mm256_mul_pd(iq2,jq1);
173 qq22 = _mm256_mul_pd(iq2,jq2);
175 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
176 rcutoff_scalar = fr->rcoulomb;
177 rcutoff = _mm256_set1_pd(rcutoff_scalar);
178 rcutoff2 = _mm256_mul_pd(rcutoff,rcutoff);
180 rswitch_scalar = fr->rcoulomb_switch;
181 rswitch = _mm256_set1_pd(rswitch_scalar);
182 /* Setup switch parameters */
183 d_scalar = rcutoff_scalar-rswitch_scalar;
184 d = _mm256_set1_pd(d_scalar);
185 swV3 = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
186 swV4 = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
187 swV5 = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
188 swF2 = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
189 swF3 = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
190 swF4 = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
192 /* Avoid stupid compiler warnings */
193 jnrA = jnrB = jnrC = jnrD = 0;
202 for(iidx=0;iidx<4*DIM;iidx++)
207 /* Start outer loop over neighborlists */
208 for(iidx=0; iidx<nri; iidx++)
210 /* Load shift vector for this list */
211 i_shift_offset = DIM*shiftidx[iidx];
213 /* Load limits for loop over neighbors */
214 j_index_start = jindex[iidx];
215 j_index_end = jindex[iidx+1];
217 /* Get outer coordinate index */
219 i_coord_offset = DIM*inr;
221 /* Load i particle coords and add shift vector */
222 gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
223 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
225 fix0 = _mm256_setzero_pd();
226 fiy0 = _mm256_setzero_pd();
227 fiz0 = _mm256_setzero_pd();
228 fix1 = _mm256_setzero_pd();
229 fiy1 = _mm256_setzero_pd();
230 fiz1 = _mm256_setzero_pd();
231 fix2 = _mm256_setzero_pd();
232 fiy2 = _mm256_setzero_pd();
233 fiz2 = _mm256_setzero_pd();
235 /* Reset potential sums */
236 velecsum = _mm256_setzero_pd();
237 vvdwsum = _mm256_setzero_pd();
239 /* Start inner kernel loop */
240 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
243 /* Get j neighbor index, and coordinate index */
248 j_coord_offsetA = DIM*jnrA;
249 j_coord_offsetB = DIM*jnrB;
250 j_coord_offsetC = DIM*jnrC;
251 j_coord_offsetD = DIM*jnrD;
253 /* load j atom coordinates */
254 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
255 x+j_coord_offsetC,x+j_coord_offsetD,
256 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
258 /* Calculate displacement vector */
259 dx00 = _mm256_sub_pd(ix0,jx0);
260 dy00 = _mm256_sub_pd(iy0,jy0);
261 dz00 = _mm256_sub_pd(iz0,jz0);
262 dx01 = _mm256_sub_pd(ix0,jx1);
263 dy01 = _mm256_sub_pd(iy0,jy1);
264 dz01 = _mm256_sub_pd(iz0,jz1);
265 dx02 = _mm256_sub_pd(ix0,jx2);
266 dy02 = _mm256_sub_pd(iy0,jy2);
267 dz02 = _mm256_sub_pd(iz0,jz2);
268 dx10 = _mm256_sub_pd(ix1,jx0);
269 dy10 = _mm256_sub_pd(iy1,jy0);
270 dz10 = _mm256_sub_pd(iz1,jz0);
271 dx11 = _mm256_sub_pd(ix1,jx1);
272 dy11 = _mm256_sub_pd(iy1,jy1);
273 dz11 = _mm256_sub_pd(iz1,jz1);
274 dx12 = _mm256_sub_pd(ix1,jx2);
275 dy12 = _mm256_sub_pd(iy1,jy2);
276 dz12 = _mm256_sub_pd(iz1,jz2);
277 dx20 = _mm256_sub_pd(ix2,jx0);
278 dy20 = _mm256_sub_pd(iy2,jy0);
279 dz20 = _mm256_sub_pd(iz2,jz0);
280 dx21 = _mm256_sub_pd(ix2,jx1);
281 dy21 = _mm256_sub_pd(iy2,jy1);
282 dz21 = _mm256_sub_pd(iz2,jz1);
283 dx22 = _mm256_sub_pd(ix2,jx2);
284 dy22 = _mm256_sub_pd(iy2,jy2);
285 dz22 = _mm256_sub_pd(iz2,jz2);
287 /* Calculate squared distance and things based on it */
288 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
289 rsq01 = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
290 rsq02 = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
291 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
292 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
293 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
294 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
295 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
296 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
298 rinv00 = gmx_mm256_invsqrt_pd(rsq00);
299 rinv01 = gmx_mm256_invsqrt_pd(rsq01);
300 rinv02 = gmx_mm256_invsqrt_pd(rsq02);
301 rinv10 = gmx_mm256_invsqrt_pd(rsq10);
302 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
303 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
304 rinv20 = gmx_mm256_invsqrt_pd(rsq20);
305 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
306 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
308 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
309 rinvsq01 = _mm256_mul_pd(rinv01,rinv01);
310 rinvsq02 = _mm256_mul_pd(rinv02,rinv02);
311 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
312 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
313 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
314 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
315 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
316 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
318 fjx0 = _mm256_setzero_pd();
319 fjy0 = _mm256_setzero_pd();
320 fjz0 = _mm256_setzero_pd();
321 fjx1 = _mm256_setzero_pd();
322 fjy1 = _mm256_setzero_pd();
323 fjz1 = _mm256_setzero_pd();
324 fjx2 = _mm256_setzero_pd();
325 fjy2 = _mm256_setzero_pd();
326 fjz2 = _mm256_setzero_pd();
328 /**************************
329 * CALCULATE INTERACTIONS *
330 **************************/
332 if (gmx_mm256_any_lt(rsq00,rcutoff2))
335 r00 = _mm256_mul_pd(rsq00,rinv00);
337 /* EWALD ELECTROSTATICS */
339 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
340 ewrt = _mm256_mul_pd(r00,ewtabscale);
341 ewitab = _mm256_cvttpd_epi32(ewrt);
342 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
343 ewitab = _mm_slli_epi32(ewitab,2);
344 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
345 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
346 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
347 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
348 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
349 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
350 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
351 velec = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
352 felec = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
354 /* LENNARD-JONES DISPERSION/REPULSION */
356 rinvsix = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
357 vvdw6 = _mm256_mul_pd(c6_00,rinvsix);
358 vvdw12 = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
359 vvdw = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
360 fvdw = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
362 d = _mm256_sub_pd(r00,rswitch);
363 d = _mm256_max_pd(d,_mm256_setzero_pd());
364 d2 = _mm256_mul_pd(d,d);
365 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
367 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
369 /* Evaluate switch function */
370 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
371 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
372 fvdw = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
373 velec = _mm256_mul_pd(velec,sw);
374 vvdw = _mm256_mul_pd(vvdw,sw);
375 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
377 /* Update potential sum for this i atom from the interaction with this j atom. */
378 velec = _mm256_and_pd(velec,cutoff_mask);
379 velecsum = _mm256_add_pd(velecsum,velec);
380 vvdw = _mm256_and_pd(vvdw,cutoff_mask);
381 vvdwsum = _mm256_add_pd(vvdwsum,vvdw);
383 fscal = _mm256_add_pd(felec,fvdw);
385 fscal = _mm256_and_pd(fscal,cutoff_mask);
387 /* Calculate temporary vectorial force */
388 tx = _mm256_mul_pd(fscal,dx00);
389 ty = _mm256_mul_pd(fscal,dy00);
390 tz = _mm256_mul_pd(fscal,dz00);
392 /* Update vectorial force */
393 fix0 = _mm256_add_pd(fix0,tx);
394 fiy0 = _mm256_add_pd(fiy0,ty);
395 fiz0 = _mm256_add_pd(fiz0,tz);
397 fjx0 = _mm256_add_pd(fjx0,tx);
398 fjy0 = _mm256_add_pd(fjy0,ty);
399 fjz0 = _mm256_add_pd(fjz0,tz);
403 /**************************
404 * CALCULATE INTERACTIONS *
405 **************************/
407 if (gmx_mm256_any_lt(rsq01,rcutoff2))
410 r01 = _mm256_mul_pd(rsq01,rinv01);
412 /* EWALD ELECTROSTATICS */
414 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
415 ewrt = _mm256_mul_pd(r01,ewtabscale);
416 ewitab = _mm256_cvttpd_epi32(ewrt);
417 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
418 ewitab = _mm_slli_epi32(ewitab,2);
419 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
420 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
421 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
422 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
423 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
424 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
425 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
426 velec = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
427 felec = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
429 d = _mm256_sub_pd(r01,rswitch);
430 d = _mm256_max_pd(d,_mm256_setzero_pd());
431 d2 = _mm256_mul_pd(d,d);
432 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
434 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
436 /* Evaluate switch function */
437 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
438 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
439 velec = _mm256_mul_pd(velec,sw);
440 cutoff_mask = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
442 /* Update potential sum for this i atom from the interaction with this j atom. */
443 velec = _mm256_and_pd(velec,cutoff_mask);
444 velecsum = _mm256_add_pd(velecsum,velec);
448 fscal = _mm256_and_pd(fscal,cutoff_mask);
450 /* Calculate temporary vectorial force */
451 tx = _mm256_mul_pd(fscal,dx01);
452 ty = _mm256_mul_pd(fscal,dy01);
453 tz = _mm256_mul_pd(fscal,dz01);
455 /* Update vectorial force */
456 fix0 = _mm256_add_pd(fix0,tx);
457 fiy0 = _mm256_add_pd(fiy0,ty);
458 fiz0 = _mm256_add_pd(fiz0,tz);
460 fjx1 = _mm256_add_pd(fjx1,tx);
461 fjy1 = _mm256_add_pd(fjy1,ty);
462 fjz1 = _mm256_add_pd(fjz1,tz);
466 /**************************
467 * CALCULATE INTERACTIONS *
468 **************************/
470 if (gmx_mm256_any_lt(rsq02,rcutoff2))
473 r02 = _mm256_mul_pd(rsq02,rinv02);
475 /* EWALD ELECTROSTATICS */
477 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
478 ewrt = _mm256_mul_pd(r02,ewtabscale);
479 ewitab = _mm256_cvttpd_epi32(ewrt);
480 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
481 ewitab = _mm_slli_epi32(ewitab,2);
482 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
483 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
484 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
485 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
486 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
487 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
488 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
489 velec = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
490 felec = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
492 d = _mm256_sub_pd(r02,rswitch);
493 d = _mm256_max_pd(d,_mm256_setzero_pd());
494 d2 = _mm256_mul_pd(d,d);
495 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
497 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
499 /* Evaluate switch function */
500 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
501 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
502 velec = _mm256_mul_pd(velec,sw);
503 cutoff_mask = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
505 /* Update potential sum for this i atom from the interaction with this j atom. */
506 velec = _mm256_and_pd(velec,cutoff_mask);
507 velecsum = _mm256_add_pd(velecsum,velec);
511 fscal = _mm256_and_pd(fscal,cutoff_mask);
513 /* Calculate temporary vectorial force */
514 tx = _mm256_mul_pd(fscal,dx02);
515 ty = _mm256_mul_pd(fscal,dy02);
516 tz = _mm256_mul_pd(fscal,dz02);
518 /* Update vectorial force */
519 fix0 = _mm256_add_pd(fix0,tx);
520 fiy0 = _mm256_add_pd(fiy0,ty);
521 fiz0 = _mm256_add_pd(fiz0,tz);
523 fjx2 = _mm256_add_pd(fjx2,tx);
524 fjy2 = _mm256_add_pd(fjy2,ty);
525 fjz2 = _mm256_add_pd(fjz2,tz);
529 /**************************
530 * CALCULATE INTERACTIONS *
531 **************************/
533 if (gmx_mm256_any_lt(rsq10,rcutoff2))
536 r10 = _mm256_mul_pd(rsq10,rinv10);
538 /* EWALD ELECTROSTATICS */
540 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
541 ewrt = _mm256_mul_pd(r10,ewtabscale);
542 ewitab = _mm256_cvttpd_epi32(ewrt);
543 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
544 ewitab = _mm_slli_epi32(ewitab,2);
545 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
546 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
547 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
548 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
549 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
550 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
551 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
552 velec = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
553 felec = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
555 d = _mm256_sub_pd(r10,rswitch);
556 d = _mm256_max_pd(d,_mm256_setzero_pd());
557 d2 = _mm256_mul_pd(d,d);
558 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
560 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
562 /* Evaluate switch function */
563 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
564 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
565 velec = _mm256_mul_pd(velec,sw);
566 cutoff_mask = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
568 /* Update potential sum for this i atom from the interaction with this j atom. */
569 velec = _mm256_and_pd(velec,cutoff_mask);
570 velecsum = _mm256_add_pd(velecsum,velec);
574 fscal = _mm256_and_pd(fscal,cutoff_mask);
576 /* Calculate temporary vectorial force */
577 tx = _mm256_mul_pd(fscal,dx10);
578 ty = _mm256_mul_pd(fscal,dy10);
579 tz = _mm256_mul_pd(fscal,dz10);
581 /* Update vectorial force */
582 fix1 = _mm256_add_pd(fix1,tx);
583 fiy1 = _mm256_add_pd(fiy1,ty);
584 fiz1 = _mm256_add_pd(fiz1,tz);
586 fjx0 = _mm256_add_pd(fjx0,tx);
587 fjy0 = _mm256_add_pd(fjy0,ty);
588 fjz0 = _mm256_add_pd(fjz0,tz);
592 /**************************
593 * CALCULATE INTERACTIONS *
594 **************************/
596 if (gmx_mm256_any_lt(rsq11,rcutoff2))
599 r11 = _mm256_mul_pd(rsq11,rinv11);
601 /* EWALD ELECTROSTATICS */
603 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
604 ewrt = _mm256_mul_pd(r11,ewtabscale);
605 ewitab = _mm256_cvttpd_epi32(ewrt);
606 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
607 ewitab = _mm_slli_epi32(ewitab,2);
608 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
609 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
610 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
611 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
612 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
613 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
614 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
615 velec = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
616 felec = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
618 d = _mm256_sub_pd(r11,rswitch);
619 d = _mm256_max_pd(d,_mm256_setzero_pd());
620 d2 = _mm256_mul_pd(d,d);
621 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
623 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
625 /* Evaluate switch function */
626 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
627 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
628 velec = _mm256_mul_pd(velec,sw);
629 cutoff_mask = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
631 /* Update potential sum for this i atom from the interaction with this j atom. */
632 velec = _mm256_and_pd(velec,cutoff_mask);
633 velecsum = _mm256_add_pd(velecsum,velec);
637 fscal = _mm256_and_pd(fscal,cutoff_mask);
639 /* Calculate temporary vectorial force */
640 tx = _mm256_mul_pd(fscal,dx11);
641 ty = _mm256_mul_pd(fscal,dy11);
642 tz = _mm256_mul_pd(fscal,dz11);
644 /* Update vectorial force */
645 fix1 = _mm256_add_pd(fix1,tx);
646 fiy1 = _mm256_add_pd(fiy1,ty);
647 fiz1 = _mm256_add_pd(fiz1,tz);
649 fjx1 = _mm256_add_pd(fjx1,tx);
650 fjy1 = _mm256_add_pd(fjy1,ty);
651 fjz1 = _mm256_add_pd(fjz1,tz);
655 /**************************
656 * CALCULATE INTERACTIONS *
657 **************************/
659 if (gmx_mm256_any_lt(rsq12,rcutoff2))
662 r12 = _mm256_mul_pd(rsq12,rinv12);
664 /* EWALD ELECTROSTATICS */
666 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
667 ewrt = _mm256_mul_pd(r12,ewtabscale);
668 ewitab = _mm256_cvttpd_epi32(ewrt);
669 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
670 ewitab = _mm_slli_epi32(ewitab,2);
671 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
672 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
673 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
674 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
675 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
676 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
677 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
678 velec = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
679 felec = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
681 d = _mm256_sub_pd(r12,rswitch);
682 d = _mm256_max_pd(d,_mm256_setzero_pd());
683 d2 = _mm256_mul_pd(d,d);
684 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
686 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
688 /* Evaluate switch function */
689 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
690 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
691 velec = _mm256_mul_pd(velec,sw);
692 cutoff_mask = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
694 /* Update potential sum for this i atom from the interaction with this j atom. */
695 velec = _mm256_and_pd(velec,cutoff_mask);
696 velecsum = _mm256_add_pd(velecsum,velec);
700 fscal = _mm256_and_pd(fscal,cutoff_mask);
702 /* Calculate temporary vectorial force */
703 tx = _mm256_mul_pd(fscal,dx12);
704 ty = _mm256_mul_pd(fscal,dy12);
705 tz = _mm256_mul_pd(fscal,dz12);
707 /* Update vectorial force */
708 fix1 = _mm256_add_pd(fix1,tx);
709 fiy1 = _mm256_add_pd(fiy1,ty);
710 fiz1 = _mm256_add_pd(fiz1,tz);
712 fjx2 = _mm256_add_pd(fjx2,tx);
713 fjy2 = _mm256_add_pd(fjy2,ty);
714 fjz2 = _mm256_add_pd(fjz2,tz);
718 /**************************
719 * CALCULATE INTERACTIONS *
720 **************************/
722 if (gmx_mm256_any_lt(rsq20,rcutoff2))
725 r20 = _mm256_mul_pd(rsq20,rinv20);
727 /* EWALD ELECTROSTATICS */
729 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
730 ewrt = _mm256_mul_pd(r20,ewtabscale);
731 ewitab = _mm256_cvttpd_epi32(ewrt);
732 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
733 ewitab = _mm_slli_epi32(ewitab,2);
734 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
735 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
736 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
737 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
738 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
739 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
740 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
741 velec = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
742 felec = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
744 d = _mm256_sub_pd(r20,rswitch);
745 d = _mm256_max_pd(d,_mm256_setzero_pd());
746 d2 = _mm256_mul_pd(d,d);
747 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
749 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
751 /* Evaluate switch function */
752 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
753 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
754 velec = _mm256_mul_pd(velec,sw);
755 cutoff_mask = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
757 /* Update potential sum for this i atom from the interaction with this j atom. */
758 velec = _mm256_and_pd(velec,cutoff_mask);
759 velecsum = _mm256_add_pd(velecsum,velec);
763 fscal = _mm256_and_pd(fscal,cutoff_mask);
765 /* Calculate temporary vectorial force */
766 tx = _mm256_mul_pd(fscal,dx20);
767 ty = _mm256_mul_pd(fscal,dy20);
768 tz = _mm256_mul_pd(fscal,dz20);
770 /* Update vectorial force */
771 fix2 = _mm256_add_pd(fix2,tx);
772 fiy2 = _mm256_add_pd(fiy2,ty);
773 fiz2 = _mm256_add_pd(fiz2,tz);
775 fjx0 = _mm256_add_pd(fjx0,tx);
776 fjy0 = _mm256_add_pd(fjy0,ty);
777 fjz0 = _mm256_add_pd(fjz0,tz);
781 /**************************
782 * CALCULATE INTERACTIONS *
783 **************************/
785 if (gmx_mm256_any_lt(rsq21,rcutoff2))
788 r21 = _mm256_mul_pd(rsq21,rinv21);
790 /* EWALD ELECTROSTATICS */
792 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
793 ewrt = _mm256_mul_pd(r21,ewtabscale);
794 ewitab = _mm256_cvttpd_epi32(ewrt);
795 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
796 ewitab = _mm_slli_epi32(ewitab,2);
797 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
798 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
799 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
800 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
801 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
802 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
803 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
804 velec = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
805 felec = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
807 d = _mm256_sub_pd(r21,rswitch);
808 d = _mm256_max_pd(d,_mm256_setzero_pd());
809 d2 = _mm256_mul_pd(d,d);
810 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
812 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
814 /* Evaluate switch function */
815 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
816 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
817 velec = _mm256_mul_pd(velec,sw);
818 cutoff_mask = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
820 /* Update potential sum for this i atom from the interaction with this j atom. */
821 velec = _mm256_and_pd(velec,cutoff_mask);
822 velecsum = _mm256_add_pd(velecsum,velec);
826 fscal = _mm256_and_pd(fscal,cutoff_mask);
828 /* Calculate temporary vectorial force */
829 tx = _mm256_mul_pd(fscal,dx21);
830 ty = _mm256_mul_pd(fscal,dy21);
831 tz = _mm256_mul_pd(fscal,dz21);
833 /* Update vectorial force */
834 fix2 = _mm256_add_pd(fix2,tx);
835 fiy2 = _mm256_add_pd(fiy2,ty);
836 fiz2 = _mm256_add_pd(fiz2,tz);
838 fjx1 = _mm256_add_pd(fjx1,tx);
839 fjy1 = _mm256_add_pd(fjy1,ty);
840 fjz1 = _mm256_add_pd(fjz1,tz);
844 /**************************
845 * CALCULATE INTERACTIONS *
846 **************************/
848 if (gmx_mm256_any_lt(rsq22,rcutoff2))
851 r22 = _mm256_mul_pd(rsq22,rinv22);
853 /* EWALD ELECTROSTATICS */
855 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
856 ewrt = _mm256_mul_pd(r22,ewtabscale);
857 ewitab = _mm256_cvttpd_epi32(ewrt);
858 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
859 ewitab = _mm_slli_epi32(ewitab,2);
860 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
861 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
862 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
863 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
864 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
865 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
866 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
867 velec = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
868 felec = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
870 d = _mm256_sub_pd(r22,rswitch);
871 d = _mm256_max_pd(d,_mm256_setzero_pd());
872 d2 = _mm256_mul_pd(d,d);
873 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
875 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
877 /* Evaluate switch function */
878 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
879 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
880 velec = _mm256_mul_pd(velec,sw);
881 cutoff_mask = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
883 /* Update potential sum for this i atom from the interaction with this j atom. */
884 velec = _mm256_and_pd(velec,cutoff_mask);
885 velecsum = _mm256_add_pd(velecsum,velec);
889 fscal = _mm256_and_pd(fscal,cutoff_mask);
891 /* Calculate temporary vectorial force */
892 tx = _mm256_mul_pd(fscal,dx22);
893 ty = _mm256_mul_pd(fscal,dy22);
894 tz = _mm256_mul_pd(fscal,dz22);
896 /* Update vectorial force */
897 fix2 = _mm256_add_pd(fix2,tx);
898 fiy2 = _mm256_add_pd(fiy2,ty);
899 fiz2 = _mm256_add_pd(fiz2,tz);
901 fjx2 = _mm256_add_pd(fjx2,tx);
902 fjy2 = _mm256_add_pd(fjy2,ty);
903 fjz2 = _mm256_add_pd(fjz2,tz);
907 fjptrA = f+j_coord_offsetA;
908 fjptrB = f+j_coord_offsetB;
909 fjptrC = f+j_coord_offsetC;
910 fjptrD = f+j_coord_offsetD;
912 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
913 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
915 /* Inner loop uses 603 flops */
921 /* Get j neighbor index, and coordinate index */
922 jnrlistA = jjnr[jidx];
923 jnrlistB = jjnr[jidx+1];
924 jnrlistC = jjnr[jidx+2];
925 jnrlistD = jjnr[jidx+3];
926 /* Sign of each element will be negative for non-real atoms.
927 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
928 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
930 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
932 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
933 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
934 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
936 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
937 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
938 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
939 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
940 j_coord_offsetA = DIM*jnrA;
941 j_coord_offsetB = DIM*jnrB;
942 j_coord_offsetC = DIM*jnrC;
943 j_coord_offsetD = DIM*jnrD;
945 /* load j atom coordinates */
946 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
947 x+j_coord_offsetC,x+j_coord_offsetD,
948 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
950 /* Calculate displacement vector */
951 dx00 = _mm256_sub_pd(ix0,jx0);
952 dy00 = _mm256_sub_pd(iy0,jy0);
953 dz00 = _mm256_sub_pd(iz0,jz0);
954 dx01 = _mm256_sub_pd(ix0,jx1);
955 dy01 = _mm256_sub_pd(iy0,jy1);
956 dz01 = _mm256_sub_pd(iz0,jz1);
957 dx02 = _mm256_sub_pd(ix0,jx2);
958 dy02 = _mm256_sub_pd(iy0,jy2);
959 dz02 = _mm256_sub_pd(iz0,jz2);
960 dx10 = _mm256_sub_pd(ix1,jx0);
961 dy10 = _mm256_sub_pd(iy1,jy0);
962 dz10 = _mm256_sub_pd(iz1,jz0);
963 dx11 = _mm256_sub_pd(ix1,jx1);
964 dy11 = _mm256_sub_pd(iy1,jy1);
965 dz11 = _mm256_sub_pd(iz1,jz1);
966 dx12 = _mm256_sub_pd(ix1,jx2);
967 dy12 = _mm256_sub_pd(iy1,jy2);
968 dz12 = _mm256_sub_pd(iz1,jz2);
969 dx20 = _mm256_sub_pd(ix2,jx0);
970 dy20 = _mm256_sub_pd(iy2,jy0);
971 dz20 = _mm256_sub_pd(iz2,jz0);
972 dx21 = _mm256_sub_pd(ix2,jx1);
973 dy21 = _mm256_sub_pd(iy2,jy1);
974 dz21 = _mm256_sub_pd(iz2,jz1);
975 dx22 = _mm256_sub_pd(ix2,jx2);
976 dy22 = _mm256_sub_pd(iy2,jy2);
977 dz22 = _mm256_sub_pd(iz2,jz2);
979 /* Calculate squared distance and things based on it */
980 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
981 rsq01 = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
982 rsq02 = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
983 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
984 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
985 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
986 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
987 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
988 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
990 rinv00 = gmx_mm256_invsqrt_pd(rsq00);
991 rinv01 = gmx_mm256_invsqrt_pd(rsq01);
992 rinv02 = gmx_mm256_invsqrt_pd(rsq02);
993 rinv10 = gmx_mm256_invsqrt_pd(rsq10);
994 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
995 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
996 rinv20 = gmx_mm256_invsqrt_pd(rsq20);
997 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
998 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
1000 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
1001 rinvsq01 = _mm256_mul_pd(rinv01,rinv01);
1002 rinvsq02 = _mm256_mul_pd(rinv02,rinv02);
1003 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
1004 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
1005 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
1006 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
1007 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
1008 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
1010 fjx0 = _mm256_setzero_pd();
1011 fjy0 = _mm256_setzero_pd();
1012 fjz0 = _mm256_setzero_pd();
1013 fjx1 = _mm256_setzero_pd();
1014 fjy1 = _mm256_setzero_pd();
1015 fjz1 = _mm256_setzero_pd();
1016 fjx2 = _mm256_setzero_pd();
1017 fjy2 = _mm256_setzero_pd();
1018 fjz2 = _mm256_setzero_pd();
1020 /**************************
1021 * CALCULATE INTERACTIONS *
1022 **************************/
1024 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1027 r00 = _mm256_mul_pd(rsq00,rinv00);
1028 r00 = _mm256_andnot_pd(dummy_mask,r00);
1030 /* EWALD ELECTROSTATICS */
1032 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1033 ewrt = _mm256_mul_pd(r00,ewtabscale);
1034 ewitab = _mm256_cvttpd_epi32(ewrt);
1035 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1036 ewitab = _mm_slli_epi32(ewitab,2);
1037 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1038 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1039 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1040 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1041 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1042 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1043 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1044 velec = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
1045 felec = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
1047 /* LENNARD-JONES DISPERSION/REPULSION */
1049 rinvsix = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
1050 vvdw6 = _mm256_mul_pd(c6_00,rinvsix);
1051 vvdw12 = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
1052 vvdw = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
1053 fvdw = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
1055 d = _mm256_sub_pd(r00,rswitch);
1056 d = _mm256_max_pd(d,_mm256_setzero_pd());
1057 d2 = _mm256_mul_pd(d,d);
1058 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1060 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1062 /* Evaluate switch function */
1063 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1064 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
1065 fvdw = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
1066 velec = _mm256_mul_pd(velec,sw);
1067 vvdw = _mm256_mul_pd(vvdw,sw);
1068 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1070 /* Update potential sum for this i atom from the interaction with this j atom. */
1071 velec = _mm256_and_pd(velec,cutoff_mask);
1072 velec = _mm256_andnot_pd(dummy_mask,velec);
1073 velecsum = _mm256_add_pd(velecsum,velec);
1074 vvdw = _mm256_and_pd(vvdw,cutoff_mask);
1075 vvdw = _mm256_andnot_pd(dummy_mask,vvdw);
1076 vvdwsum = _mm256_add_pd(vvdwsum,vvdw);
1078 fscal = _mm256_add_pd(felec,fvdw);
1080 fscal = _mm256_and_pd(fscal,cutoff_mask);
1082 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1084 /* Calculate temporary vectorial force */
1085 tx = _mm256_mul_pd(fscal,dx00);
1086 ty = _mm256_mul_pd(fscal,dy00);
1087 tz = _mm256_mul_pd(fscal,dz00);
1089 /* Update vectorial force */
1090 fix0 = _mm256_add_pd(fix0,tx);
1091 fiy0 = _mm256_add_pd(fiy0,ty);
1092 fiz0 = _mm256_add_pd(fiz0,tz);
1094 fjx0 = _mm256_add_pd(fjx0,tx);
1095 fjy0 = _mm256_add_pd(fjy0,ty);
1096 fjz0 = _mm256_add_pd(fjz0,tz);
1100 /**************************
1101 * CALCULATE INTERACTIONS *
1102 **************************/
1104 if (gmx_mm256_any_lt(rsq01,rcutoff2))
1107 r01 = _mm256_mul_pd(rsq01,rinv01);
1108 r01 = _mm256_andnot_pd(dummy_mask,r01);
1110 /* EWALD ELECTROSTATICS */
1112 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1113 ewrt = _mm256_mul_pd(r01,ewtabscale);
1114 ewitab = _mm256_cvttpd_epi32(ewrt);
1115 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1116 ewitab = _mm_slli_epi32(ewitab,2);
1117 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1118 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1119 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1120 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1121 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1122 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1123 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1124 velec = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
1125 felec = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
1127 d = _mm256_sub_pd(r01,rswitch);
1128 d = _mm256_max_pd(d,_mm256_setzero_pd());
1129 d2 = _mm256_mul_pd(d,d);
1130 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1132 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1134 /* Evaluate switch function */
1135 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1136 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
1137 velec = _mm256_mul_pd(velec,sw);
1138 cutoff_mask = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
1140 /* Update potential sum for this i atom from the interaction with this j atom. */
1141 velec = _mm256_and_pd(velec,cutoff_mask);
1142 velec = _mm256_andnot_pd(dummy_mask,velec);
1143 velecsum = _mm256_add_pd(velecsum,velec);
1147 fscal = _mm256_and_pd(fscal,cutoff_mask);
1149 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1151 /* Calculate temporary vectorial force */
1152 tx = _mm256_mul_pd(fscal,dx01);
1153 ty = _mm256_mul_pd(fscal,dy01);
1154 tz = _mm256_mul_pd(fscal,dz01);
1156 /* Update vectorial force */
1157 fix0 = _mm256_add_pd(fix0,tx);
1158 fiy0 = _mm256_add_pd(fiy0,ty);
1159 fiz0 = _mm256_add_pd(fiz0,tz);
1161 fjx1 = _mm256_add_pd(fjx1,tx);
1162 fjy1 = _mm256_add_pd(fjy1,ty);
1163 fjz1 = _mm256_add_pd(fjz1,tz);
1167 /**************************
1168 * CALCULATE INTERACTIONS *
1169 **************************/
1171 if (gmx_mm256_any_lt(rsq02,rcutoff2))
1174 r02 = _mm256_mul_pd(rsq02,rinv02);
1175 r02 = _mm256_andnot_pd(dummy_mask,r02);
1177 /* EWALD ELECTROSTATICS */
1179 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1180 ewrt = _mm256_mul_pd(r02,ewtabscale);
1181 ewitab = _mm256_cvttpd_epi32(ewrt);
1182 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1183 ewitab = _mm_slli_epi32(ewitab,2);
1184 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1185 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1186 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1187 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1188 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1189 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1190 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1191 velec = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
1192 felec = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
1194 d = _mm256_sub_pd(r02,rswitch);
1195 d = _mm256_max_pd(d,_mm256_setzero_pd());
1196 d2 = _mm256_mul_pd(d,d);
1197 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1199 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1201 /* Evaluate switch function */
1202 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1203 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
1204 velec = _mm256_mul_pd(velec,sw);
1205 cutoff_mask = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
1207 /* Update potential sum for this i atom from the interaction with this j atom. */
1208 velec = _mm256_and_pd(velec,cutoff_mask);
1209 velec = _mm256_andnot_pd(dummy_mask,velec);
1210 velecsum = _mm256_add_pd(velecsum,velec);
1214 fscal = _mm256_and_pd(fscal,cutoff_mask);
1216 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1218 /* Calculate temporary vectorial force */
1219 tx = _mm256_mul_pd(fscal,dx02);
1220 ty = _mm256_mul_pd(fscal,dy02);
1221 tz = _mm256_mul_pd(fscal,dz02);
1223 /* Update vectorial force */
1224 fix0 = _mm256_add_pd(fix0,tx);
1225 fiy0 = _mm256_add_pd(fiy0,ty);
1226 fiz0 = _mm256_add_pd(fiz0,tz);
1228 fjx2 = _mm256_add_pd(fjx2,tx);
1229 fjy2 = _mm256_add_pd(fjy2,ty);
1230 fjz2 = _mm256_add_pd(fjz2,tz);
1234 /**************************
1235 * CALCULATE INTERACTIONS *
1236 **************************/
1238 if (gmx_mm256_any_lt(rsq10,rcutoff2))
1241 r10 = _mm256_mul_pd(rsq10,rinv10);
1242 r10 = _mm256_andnot_pd(dummy_mask,r10);
1244 /* EWALD ELECTROSTATICS */
1246 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1247 ewrt = _mm256_mul_pd(r10,ewtabscale);
1248 ewitab = _mm256_cvttpd_epi32(ewrt);
1249 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1250 ewitab = _mm_slli_epi32(ewitab,2);
1251 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1252 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1253 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1254 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1255 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1256 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1257 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1258 velec = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
1259 felec = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
1261 d = _mm256_sub_pd(r10,rswitch);
1262 d = _mm256_max_pd(d,_mm256_setzero_pd());
1263 d2 = _mm256_mul_pd(d,d);
1264 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1266 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1268 /* Evaluate switch function */
1269 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1270 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
1271 velec = _mm256_mul_pd(velec,sw);
1272 cutoff_mask = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1274 /* Update potential sum for this i atom from the interaction with this j atom. */
1275 velec = _mm256_and_pd(velec,cutoff_mask);
1276 velec = _mm256_andnot_pd(dummy_mask,velec);
1277 velecsum = _mm256_add_pd(velecsum,velec);
1281 fscal = _mm256_and_pd(fscal,cutoff_mask);
1283 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1285 /* Calculate temporary vectorial force */
1286 tx = _mm256_mul_pd(fscal,dx10);
1287 ty = _mm256_mul_pd(fscal,dy10);
1288 tz = _mm256_mul_pd(fscal,dz10);
1290 /* Update vectorial force */
1291 fix1 = _mm256_add_pd(fix1,tx);
1292 fiy1 = _mm256_add_pd(fiy1,ty);
1293 fiz1 = _mm256_add_pd(fiz1,tz);
1295 fjx0 = _mm256_add_pd(fjx0,tx);
1296 fjy0 = _mm256_add_pd(fjy0,ty);
1297 fjz0 = _mm256_add_pd(fjz0,tz);
1301 /**************************
1302 * CALCULATE INTERACTIONS *
1303 **************************/
1305 if (gmx_mm256_any_lt(rsq11,rcutoff2))
1308 r11 = _mm256_mul_pd(rsq11,rinv11);
1309 r11 = _mm256_andnot_pd(dummy_mask,r11);
1311 /* EWALD ELECTROSTATICS */
1313 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1314 ewrt = _mm256_mul_pd(r11,ewtabscale);
1315 ewitab = _mm256_cvttpd_epi32(ewrt);
1316 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1317 ewitab = _mm_slli_epi32(ewitab,2);
1318 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1319 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1320 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1321 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1322 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1323 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1324 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1325 velec = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
1326 felec = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
1328 d = _mm256_sub_pd(r11,rswitch);
1329 d = _mm256_max_pd(d,_mm256_setzero_pd());
1330 d2 = _mm256_mul_pd(d,d);
1331 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1333 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1335 /* Evaluate switch function */
1336 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1337 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
1338 velec = _mm256_mul_pd(velec,sw);
1339 cutoff_mask = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
1341 /* Update potential sum for this i atom from the interaction with this j atom. */
1342 velec = _mm256_and_pd(velec,cutoff_mask);
1343 velec = _mm256_andnot_pd(dummy_mask,velec);
1344 velecsum = _mm256_add_pd(velecsum,velec);
1348 fscal = _mm256_and_pd(fscal,cutoff_mask);
1350 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1352 /* Calculate temporary vectorial force */
1353 tx = _mm256_mul_pd(fscal,dx11);
1354 ty = _mm256_mul_pd(fscal,dy11);
1355 tz = _mm256_mul_pd(fscal,dz11);
1357 /* Update vectorial force */
1358 fix1 = _mm256_add_pd(fix1,tx);
1359 fiy1 = _mm256_add_pd(fiy1,ty);
1360 fiz1 = _mm256_add_pd(fiz1,tz);
1362 fjx1 = _mm256_add_pd(fjx1,tx);
1363 fjy1 = _mm256_add_pd(fjy1,ty);
1364 fjz1 = _mm256_add_pd(fjz1,tz);
1368 /**************************
1369 * CALCULATE INTERACTIONS *
1370 **************************/
1372 if (gmx_mm256_any_lt(rsq12,rcutoff2))
1375 r12 = _mm256_mul_pd(rsq12,rinv12);
1376 r12 = _mm256_andnot_pd(dummy_mask,r12);
1378 /* EWALD ELECTROSTATICS */
1380 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1381 ewrt = _mm256_mul_pd(r12,ewtabscale);
1382 ewitab = _mm256_cvttpd_epi32(ewrt);
1383 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1384 ewitab = _mm_slli_epi32(ewitab,2);
1385 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1386 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1387 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1388 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1389 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1390 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1391 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1392 velec = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
1393 felec = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
1395 d = _mm256_sub_pd(r12,rswitch);
1396 d = _mm256_max_pd(d,_mm256_setzero_pd());
1397 d2 = _mm256_mul_pd(d,d);
1398 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1400 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1402 /* Evaluate switch function */
1403 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1404 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
1405 velec = _mm256_mul_pd(velec,sw);
1406 cutoff_mask = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
1408 /* Update potential sum for this i atom from the interaction with this j atom. */
1409 velec = _mm256_and_pd(velec,cutoff_mask);
1410 velec = _mm256_andnot_pd(dummy_mask,velec);
1411 velecsum = _mm256_add_pd(velecsum,velec);
1415 fscal = _mm256_and_pd(fscal,cutoff_mask);
1417 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1419 /* Calculate temporary vectorial force */
1420 tx = _mm256_mul_pd(fscal,dx12);
1421 ty = _mm256_mul_pd(fscal,dy12);
1422 tz = _mm256_mul_pd(fscal,dz12);
1424 /* Update vectorial force */
1425 fix1 = _mm256_add_pd(fix1,tx);
1426 fiy1 = _mm256_add_pd(fiy1,ty);
1427 fiz1 = _mm256_add_pd(fiz1,tz);
1429 fjx2 = _mm256_add_pd(fjx2,tx);
1430 fjy2 = _mm256_add_pd(fjy2,ty);
1431 fjz2 = _mm256_add_pd(fjz2,tz);
1435 /**************************
1436 * CALCULATE INTERACTIONS *
1437 **************************/
1439 if (gmx_mm256_any_lt(rsq20,rcutoff2))
1442 r20 = _mm256_mul_pd(rsq20,rinv20);
1443 r20 = _mm256_andnot_pd(dummy_mask,r20);
1445 /* EWALD ELECTROSTATICS */
1447 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1448 ewrt = _mm256_mul_pd(r20,ewtabscale);
1449 ewitab = _mm256_cvttpd_epi32(ewrt);
1450 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1451 ewitab = _mm_slli_epi32(ewitab,2);
1452 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1453 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1454 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1455 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1456 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1457 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1458 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1459 velec = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
1460 felec = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
1462 d = _mm256_sub_pd(r20,rswitch);
1463 d = _mm256_max_pd(d,_mm256_setzero_pd());
1464 d2 = _mm256_mul_pd(d,d);
1465 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1467 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1469 /* Evaluate switch function */
1470 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1471 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
1472 velec = _mm256_mul_pd(velec,sw);
1473 cutoff_mask = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1475 /* Update potential sum for this i atom from the interaction with this j atom. */
1476 velec = _mm256_and_pd(velec,cutoff_mask);
1477 velec = _mm256_andnot_pd(dummy_mask,velec);
1478 velecsum = _mm256_add_pd(velecsum,velec);
1482 fscal = _mm256_and_pd(fscal,cutoff_mask);
1484 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1486 /* Calculate temporary vectorial force */
1487 tx = _mm256_mul_pd(fscal,dx20);
1488 ty = _mm256_mul_pd(fscal,dy20);
1489 tz = _mm256_mul_pd(fscal,dz20);
1491 /* Update vectorial force */
1492 fix2 = _mm256_add_pd(fix2,tx);
1493 fiy2 = _mm256_add_pd(fiy2,ty);
1494 fiz2 = _mm256_add_pd(fiz2,tz);
1496 fjx0 = _mm256_add_pd(fjx0,tx);
1497 fjy0 = _mm256_add_pd(fjy0,ty);
1498 fjz0 = _mm256_add_pd(fjz0,tz);
1502 /**************************
1503 * CALCULATE INTERACTIONS *
1504 **************************/
1506 if (gmx_mm256_any_lt(rsq21,rcutoff2))
1509 r21 = _mm256_mul_pd(rsq21,rinv21);
1510 r21 = _mm256_andnot_pd(dummy_mask,r21);
1512 /* EWALD ELECTROSTATICS */
1514 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1515 ewrt = _mm256_mul_pd(r21,ewtabscale);
1516 ewitab = _mm256_cvttpd_epi32(ewrt);
1517 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1518 ewitab = _mm_slli_epi32(ewitab,2);
1519 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1520 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1521 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1522 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1523 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1524 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1525 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1526 velec = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
1527 felec = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
1529 d = _mm256_sub_pd(r21,rswitch);
1530 d = _mm256_max_pd(d,_mm256_setzero_pd());
1531 d2 = _mm256_mul_pd(d,d);
1532 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1534 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1536 /* Evaluate switch function */
1537 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1538 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
1539 velec = _mm256_mul_pd(velec,sw);
1540 cutoff_mask = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
1542 /* Update potential sum for this i atom from the interaction with this j atom. */
1543 velec = _mm256_and_pd(velec,cutoff_mask);
1544 velec = _mm256_andnot_pd(dummy_mask,velec);
1545 velecsum = _mm256_add_pd(velecsum,velec);
1549 fscal = _mm256_and_pd(fscal,cutoff_mask);
1551 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1553 /* Calculate temporary vectorial force */
1554 tx = _mm256_mul_pd(fscal,dx21);
1555 ty = _mm256_mul_pd(fscal,dy21);
1556 tz = _mm256_mul_pd(fscal,dz21);
1558 /* Update vectorial force */
1559 fix2 = _mm256_add_pd(fix2,tx);
1560 fiy2 = _mm256_add_pd(fiy2,ty);
1561 fiz2 = _mm256_add_pd(fiz2,tz);
1563 fjx1 = _mm256_add_pd(fjx1,tx);
1564 fjy1 = _mm256_add_pd(fjy1,ty);
1565 fjz1 = _mm256_add_pd(fjz1,tz);
1569 /**************************
1570 * CALCULATE INTERACTIONS *
1571 **************************/
1573 if (gmx_mm256_any_lt(rsq22,rcutoff2))
1576 r22 = _mm256_mul_pd(rsq22,rinv22);
1577 r22 = _mm256_andnot_pd(dummy_mask,r22);
1579 /* EWALD ELECTROSTATICS */
1581 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1582 ewrt = _mm256_mul_pd(r22,ewtabscale);
1583 ewitab = _mm256_cvttpd_epi32(ewrt);
1584 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1585 ewitab = _mm_slli_epi32(ewitab,2);
1586 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1587 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1588 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1589 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1590 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1591 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1592 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1593 velec = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
1594 felec = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
1596 d = _mm256_sub_pd(r22,rswitch);
1597 d = _mm256_max_pd(d,_mm256_setzero_pd());
1598 d2 = _mm256_mul_pd(d,d);
1599 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1601 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1603 /* Evaluate switch function */
1604 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1605 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
1606 velec = _mm256_mul_pd(velec,sw);
1607 cutoff_mask = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
1609 /* Update potential sum for this i atom from the interaction with this j atom. */
1610 velec = _mm256_and_pd(velec,cutoff_mask);
1611 velec = _mm256_andnot_pd(dummy_mask,velec);
1612 velecsum = _mm256_add_pd(velecsum,velec);
1616 fscal = _mm256_and_pd(fscal,cutoff_mask);
1618 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1620 /* Calculate temporary vectorial force */
1621 tx = _mm256_mul_pd(fscal,dx22);
1622 ty = _mm256_mul_pd(fscal,dy22);
1623 tz = _mm256_mul_pd(fscal,dz22);
1625 /* Update vectorial force */
1626 fix2 = _mm256_add_pd(fix2,tx);
1627 fiy2 = _mm256_add_pd(fiy2,ty);
1628 fiz2 = _mm256_add_pd(fiz2,tz);
1630 fjx2 = _mm256_add_pd(fjx2,tx);
1631 fjy2 = _mm256_add_pd(fjy2,ty);
1632 fjz2 = _mm256_add_pd(fjz2,tz);
1636 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1637 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1638 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1639 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1641 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1642 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1644 /* Inner loop uses 612 flops */
1647 /* End of innermost loop */
1649 gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1650 f+i_coord_offset,fshift+i_shift_offset);
1653 /* Update potential energies */
1654 gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1655 gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
1657 /* Increment number of inner iterations */
1658 inneriter += j_index_end - j_index_start;
1660 /* Outer loop uses 20 flops */
1663 /* Increment number of outer iterations */
1666 /* Update outer/inner flops */
1668 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*612);
1671 * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_F_avx_256_double
1672 * Electrostatics interaction: Ewald
1673 * VdW interaction: LennardJones
1674 * Geometry: Water3-Water3
1675 * Calculate force/pot: Force
1678 nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_F_avx_256_double
1679 (t_nblist * gmx_restrict nlist,
1680 rvec * gmx_restrict xx,
1681 rvec * gmx_restrict ff,
1682 t_forcerec * gmx_restrict fr,
1683 t_mdatoms * gmx_restrict mdatoms,
1684 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1685 t_nrnb * gmx_restrict nrnb)
1687 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
1688 * just 0 for non-waters.
1689 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1690 * jnr indices corresponding to data put in the four positions in the SIMD register.
1692 int i_shift_offset,i_coord_offset,outeriter,inneriter;
1693 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1694 int jnrA,jnrB,jnrC,jnrD;
1695 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1696 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1697 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1698 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
1699 real rcutoff_scalar;
1700 real *shiftvec,*fshift,*x,*f;
1701 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1702 real scratch[4*DIM];
1703 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1704 real * vdwioffsetptr0;
1705 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1706 real * vdwioffsetptr1;
1707 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1708 real * vdwioffsetptr2;
1709 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1710 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
1711 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1712 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1713 __m256d jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1714 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1715 __m256d jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1716 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1717 __m256d dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1718 __m256d dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1719 __m256d dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1720 __m256d dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1721 __m256d dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1722 __m256d dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1723 __m256d dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1724 __m256d dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1725 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
1728 __m256d rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1731 __m256d one_sixth = _mm256_set1_pd(1.0/6.0);
1732 __m256d one_twelfth = _mm256_set1_pd(1.0/12.0);
1734 __m256d ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1735 __m256d beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1737 __m256d rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
1738 real rswitch_scalar,d_scalar;
1739 __m256d dummy_mask,cutoff_mask;
1740 __m128 tmpmask0,tmpmask1;
1741 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1742 __m256d one = _mm256_set1_pd(1.0);
1743 __m256d two = _mm256_set1_pd(2.0);
1749 jindex = nlist->jindex;
1751 shiftidx = nlist->shift;
1753 shiftvec = fr->shift_vec[0];
1754 fshift = fr->fshift[0];
1755 facel = _mm256_set1_pd(fr->epsfac);
1756 charge = mdatoms->chargeA;
1757 nvdwtype = fr->ntype;
1758 vdwparam = fr->nbfp;
1759 vdwtype = mdatoms->typeA;
1761 sh_ewald = _mm256_set1_pd(fr->ic->sh_ewald);
1762 beta = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
1763 beta2 = _mm256_mul_pd(beta,beta);
1764 beta3 = _mm256_mul_pd(beta,beta2);
1766 ewtab = fr->ic->tabq_coul_FDV0;
1767 ewtabscale = _mm256_set1_pd(fr->ic->tabq_scale);
1768 ewtabhalfspace = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
1770 /* Setup water-specific parameters */
1771 inr = nlist->iinr[0];
1772 iq0 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
1773 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1774 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1775 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
1777 jq0 = _mm256_set1_pd(charge[inr+0]);
1778 jq1 = _mm256_set1_pd(charge[inr+1]);
1779 jq2 = _mm256_set1_pd(charge[inr+2]);
1780 vdwjidx0A = 2*vdwtype[inr+0];
1781 qq00 = _mm256_mul_pd(iq0,jq0);
1782 c6_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
1783 c12_00 = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
1784 qq01 = _mm256_mul_pd(iq0,jq1);
1785 qq02 = _mm256_mul_pd(iq0,jq2);
1786 qq10 = _mm256_mul_pd(iq1,jq0);
1787 qq11 = _mm256_mul_pd(iq1,jq1);
1788 qq12 = _mm256_mul_pd(iq1,jq2);
1789 qq20 = _mm256_mul_pd(iq2,jq0);
1790 qq21 = _mm256_mul_pd(iq2,jq1);
1791 qq22 = _mm256_mul_pd(iq2,jq2);
1793 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1794 rcutoff_scalar = fr->rcoulomb;
1795 rcutoff = _mm256_set1_pd(rcutoff_scalar);
1796 rcutoff2 = _mm256_mul_pd(rcutoff,rcutoff);
1798 rswitch_scalar = fr->rcoulomb_switch;
1799 rswitch = _mm256_set1_pd(rswitch_scalar);
1800 /* Setup switch parameters */
1801 d_scalar = rcutoff_scalar-rswitch_scalar;
1802 d = _mm256_set1_pd(d_scalar);
1803 swV3 = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
1804 swV4 = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1805 swV5 = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1806 swF2 = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
1807 swF3 = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1808 swF4 = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1810 /* Avoid stupid compiler warnings */
1811 jnrA = jnrB = jnrC = jnrD = 0;
1812 j_coord_offsetA = 0;
1813 j_coord_offsetB = 0;
1814 j_coord_offsetC = 0;
1815 j_coord_offsetD = 0;
1820 for(iidx=0;iidx<4*DIM;iidx++)
1822 scratch[iidx] = 0.0;
1825 /* Start outer loop over neighborlists */
1826 for(iidx=0; iidx<nri; iidx++)
1828 /* Load shift vector for this list */
1829 i_shift_offset = DIM*shiftidx[iidx];
1831 /* Load limits for loop over neighbors */
1832 j_index_start = jindex[iidx];
1833 j_index_end = jindex[iidx+1];
1835 /* Get outer coordinate index */
1837 i_coord_offset = DIM*inr;
1839 /* Load i particle coords and add shift vector */
1840 gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1841 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1843 fix0 = _mm256_setzero_pd();
1844 fiy0 = _mm256_setzero_pd();
1845 fiz0 = _mm256_setzero_pd();
1846 fix1 = _mm256_setzero_pd();
1847 fiy1 = _mm256_setzero_pd();
1848 fiz1 = _mm256_setzero_pd();
1849 fix2 = _mm256_setzero_pd();
1850 fiy2 = _mm256_setzero_pd();
1851 fiz2 = _mm256_setzero_pd();
1853 /* Start inner kernel loop */
1854 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1857 /* Get j neighbor index, and coordinate index */
1859 jnrB = jjnr[jidx+1];
1860 jnrC = jjnr[jidx+2];
1861 jnrD = jjnr[jidx+3];
1862 j_coord_offsetA = DIM*jnrA;
1863 j_coord_offsetB = DIM*jnrB;
1864 j_coord_offsetC = DIM*jnrC;
1865 j_coord_offsetD = DIM*jnrD;
1867 /* load j atom coordinates */
1868 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1869 x+j_coord_offsetC,x+j_coord_offsetD,
1870 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1872 /* Calculate displacement vector */
1873 dx00 = _mm256_sub_pd(ix0,jx0);
1874 dy00 = _mm256_sub_pd(iy0,jy0);
1875 dz00 = _mm256_sub_pd(iz0,jz0);
1876 dx01 = _mm256_sub_pd(ix0,jx1);
1877 dy01 = _mm256_sub_pd(iy0,jy1);
1878 dz01 = _mm256_sub_pd(iz0,jz1);
1879 dx02 = _mm256_sub_pd(ix0,jx2);
1880 dy02 = _mm256_sub_pd(iy0,jy2);
1881 dz02 = _mm256_sub_pd(iz0,jz2);
1882 dx10 = _mm256_sub_pd(ix1,jx0);
1883 dy10 = _mm256_sub_pd(iy1,jy0);
1884 dz10 = _mm256_sub_pd(iz1,jz0);
1885 dx11 = _mm256_sub_pd(ix1,jx1);
1886 dy11 = _mm256_sub_pd(iy1,jy1);
1887 dz11 = _mm256_sub_pd(iz1,jz1);
1888 dx12 = _mm256_sub_pd(ix1,jx2);
1889 dy12 = _mm256_sub_pd(iy1,jy2);
1890 dz12 = _mm256_sub_pd(iz1,jz2);
1891 dx20 = _mm256_sub_pd(ix2,jx0);
1892 dy20 = _mm256_sub_pd(iy2,jy0);
1893 dz20 = _mm256_sub_pd(iz2,jz0);
1894 dx21 = _mm256_sub_pd(ix2,jx1);
1895 dy21 = _mm256_sub_pd(iy2,jy1);
1896 dz21 = _mm256_sub_pd(iz2,jz1);
1897 dx22 = _mm256_sub_pd(ix2,jx2);
1898 dy22 = _mm256_sub_pd(iy2,jy2);
1899 dz22 = _mm256_sub_pd(iz2,jz2);
1901 /* Calculate squared distance and things based on it */
1902 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1903 rsq01 = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1904 rsq02 = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1905 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1906 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1907 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1908 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1909 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1910 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1912 rinv00 = gmx_mm256_invsqrt_pd(rsq00);
1913 rinv01 = gmx_mm256_invsqrt_pd(rsq01);
1914 rinv02 = gmx_mm256_invsqrt_pd(rsq02);
1915 rinv10 = gmx_mm256_invsqrt_pd(rsq10);
1916 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
1917 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
1918 rinv20 = gmx_mm256_invsqrt_pd(rsq20);
1919 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
1920 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
1922 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
1923 rinvsq01 = _mm256_mul_pd(rinv01,rinv01);
1924 rinvsq02 = _mm256_mul_pd(rinv02,rinv02);
1925 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
1926 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
1927 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
1928 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
1929 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
1930 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
1932 fjx0 = _mm256_setzero_pd();
1933 fjy0 = _mm256_setzero_pd();
1934 fjz0 = _mm256_setzero_pd();
1935 fjx1 = _mm256_setzero_pd();
1936 fjy1 = _mm256_setzero_pd();
1937 fjz1 = _mm256_setzero_pd();
1938 fjx2 = _mm256_setzero_pd();
1939 fjy2 = _mm256_setzero_pd();
1940 fjz2 = _mm256_setzero_pd();
1942 /**************************
1943 * CALCULATE INTERACTIONS *
1944 **************************/
1946 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1949 r00 = _mm256_mul_pd(rsq00,rinv00);
1951 /* EWALD ELECTROSTATICS */
1953 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1954 ewrt = _mm256_mul_pd(r00,ewtabscale);
1955 ewitab = _mm256_cvttpd_epi32(ewrt);
1956 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1957 ewitab = _mm_slli_epi32(ewitab,2);
1958 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1959 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1960 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1961 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1962 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1963 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1964 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1965 velec = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
1966 felec = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
1968 /* LENNARD-JONES DISPERSION/REPULSION */
1970 rinvsix = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
1971 vvdw6 = _mm256_mul_pd(c6_00,rinvsix);
1972 vvdw12 = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
1973 vvdw = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
1974 fvdw = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
1976 d = _mm256_sub_pd(r00,rswitch);
1977 d = _mm256_max_pd(d,_mm256_setzero_pd());
1978 d2 = _mm256_mul_pd(d,d);
1979 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1981 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1983 /* Evaluate switch function */
1984 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1985 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
1986 fvdw = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
1987 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1989 fscal = _mm256_add_pd(felec,fvdw);
1991 fscal = _mm256_and_pd(fscal,cutoff_mask);
1993 /* Calculate temporary vectorial force */
1994 tx = _mm256_mul_pd(fscal,dx00);
1995 ty = _mm256_mul_pd(fscal,dy00);
1996 tz = _mm256_mul_pd(fscal,dz00);
1998 /* Update vectorial force */
1999 fix0 = _mm256_add_pd(fix0,tx);
2000 fiy0 = _mm256_add_pd(fiy0,ty);
2001 fiz0 = _mm256_add_pd(fiz0,tz);
2003 fjx0 = _mm256_add_pd(fjx0,tx);
2004 fjy0 = _mm256_add_pd(fjy0,ty);
2005 fjz0 = _mm256_add_pd(fjz0,tz);
2009 /**************************
2010 * CALCULATE INTERACTIONS *
2011 **************************/
2013 if (gmx_mm256_any_lt(rsq01,rcutoff2))
2016 r01 = _mm256_mul_pd(rsq01,rinv01);
2018 /* EWALD ELECTROSTATICS */
2020 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2021 ewrt = _mm256_mul_pd(r01,ewtabscale);
2022 ewitab = _mm256_cvttpd_epi32(ewrt);
2023 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2024 ewitab = _mm_slli_epi32(ewitab,2);
2025 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2026 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2027 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2028 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2029 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2030 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2031 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2032 velec = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
2033 felec = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
2035 d = _mm256_sub_pd(r01,rswitch);
2036 d = _mm256_max_pd(d,_mm256_setzero_pd());
2037 d2 = _mm256_mul_pd(d,d);
2038 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2040 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2042 /* Evaluate switch function */
2043 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2044 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
2045 cutoff_mask = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
2049 fscal = _mm256_and_pd(fscal,cutoff_mask);
2051 /* Calculate temporary vectorial force */
2052 tx = _mm256_mul_pd(fscal,dx01);
2053 ty = _mm256_mul_pd(fscal,dy01);
2054 tz = _mm256_mul_pd(fscal,dz01);
2056 /* Update vectorial force */
2057 fix0 = _mm256_add_pd(fix0,tx);
2058 fiy0 = _mm256_add_pd(fiy0,ty);
2059 fiz0 = _mm256_add_pd(fiz0,tz);
2061 fjx1 = _mm256_add_pd(fjx1,tx);
2062 fjy1 = _mm256_add_pd(fjy1,ty);
2063 fjz1 = _mm256_add_pd(fjz1,tz);
2067 /**************************
2068 * CALCULATE INTERACTIONS *
2069 **************************/
2071 if (gmx_mm256_any_lt(rsq02,rcutoff2))
2074 r02 = _mm256_mul_pd(rsq02,rinv02);
2076 /* EWALD ELECTROSTATICS */
2078 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2079 ewrt = _mm256_mul_pd(r02,ewtabscale);
2080 ewitab = _mm256_cvttpd_epi32(ewrt);
2081 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2082 ewitab = _mm_slli_epi32(ewitab,2);
2083 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2084 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2085 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2086 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2087 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2088 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2089 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2090 velec = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
2091 felec = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
2093 d = _mm256_sub_pd(r02,rswitch);
2094 d = _mm256_max_pd(d,_mm256_setzero_pd());
2095 d2 = _mm256_mul_pd(d,d);
2096 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2098 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2100 /* Evaluate switch function */
2101 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2102 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
2103 cutoff_mask = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
2107 fscal = _mm256_and_pd(fscal,cutoff_mask);
2109 /* Calculate temporary vectorial force */
2110 tx = _mm256_mul_pd(fscal,dx02);
2111 ty = _mm256_mul_pd(fscal,dy02);
2112 tz = _mm256_mul_pd(fscal,dz02);
2114 /* Update vectorial force */
2115 fix0 = _mm256_add_pd(fix0,tx);
2116 fiy0 = _mm256_add_pd(fiy0,ty);
2117 fiz0 = _mm256_add_pd(fiz0,tz);
2119 fjx2 = _mm256_add_pd(fjx2,tx);
2120 fjy2 = _mm256_add_pd(fjy2,ty);
2121 fjz2 = _mm256_add_pd(fjz2,tz);
2125 /**************************
2126 * CALCULATE INTERACTIONS *
2127 **************************/
2129 if (gmx_mm256_any_lt(rsq10,rcutoff2))
2132 r10 = _mm256_mul_pd(rsq10,rinv10);
2134 /* EWALD ELECTROSTATICS */
2136 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2137 ewrt = _mm256_mul_pd(r10,ewtabscale);
2138 ewitab = _mm256_cvttpd_epi32(ewrt);
2139 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2140 ewitab = _mm_slli_epi32(ewitab,2);
2141 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2142 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2143 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2144 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2145 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2146 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2147 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2148 velec = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
2149 felec = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
2151 d = _mm256_sub_pd(r10,rswitch);
2152 d = _mm256_max_pd(d,_mm256_setzero_pd());
2153 d2 = _mm256_mul_pd(d,d);
2154 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2156 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2158 /* Evaluate switch function */
2159 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2160 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
2161 cutoff_mask = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
2165 fscal = _mm256_and_pd(fscal,cutoff_mask);
2167 /* Calculate temporary vectorial force */
2168 tx = _mm256_mul_pd(fscal,dx10);
2169 ty = _mm256_mul_pd(fscal,dy10);
2170 tz = _mm256_mul_pd(fscal,dz10);
2172 /* Update vectorial force */
2173 fix1 = _mm256_add_pd(fix1,tx);
2174 fiy1 = _mm256_add_pd(fiy1,ty);
2175 fiz1 = _mm256_add_pd(fiz1,tz);
2177 fjx0 = _mm256_add_pd(fjx0,tx);
2178 fjy0 = _mm256_add_pd(fjy0,ty);
2179 fjz0 = _mm256_add_pd(fjz0,tz);
2183 /**************************
2184 * CALCULATE INTERACTIONS *
2185 **************************/
2187 if (gmx_mm256_any_lt(rsq11,rcutoff2))
2190 r11 = _mm256_mul_pd(rsq11,rinv11);
2192 /* EWALD ELECTROSTATICS */
2194 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2195 ewrt = _mm256_mul_pd(r11,ewtabscale);
2196 ewitab = _mm256_cvttpd_epi32(ewrt);
2197 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2198 ewitab = _mm_slli_epi32(ewitab,2);
2199 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2200 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2201 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2202 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2203 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2204 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2205 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2206 velec = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
2207 felec = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
2209 d = _mm256_sub_pd(r11,rswitch);
2210 d = _mm256_max_pd(d,_mm256_setzero_pd());
2211 d2 = _mm256_mul_pd(d,d);
2212 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2214 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2216 /* Evaluate switch function */
2217 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2218 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
2219 cutoff_mask = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
2223 fscal = _mm256_and_pd(fscal,cutoff_mask);
2225 /* Calculate temporary vectorial force */
2226 tx = _mm256_mul_pd(fscal,dx11);
2227 ty = _mm256_mul_pd(fscal,dy11);
2228 tz = _mm256_mul_pd(fscal,dz11);
2230 /* Update vectorial force */
2231 fix1 = _mm256_add_pd(fix1,tx);
2232 fiy1 = _mm256_add_pd(fiy1,ty);
2233 fiz1 = _mm256_add_pd(fiz1,tz);
2235 fjx1 = _mm256_add_pd(fjx1,tx);
2236 fjy1 = _mm256_add_pd(fjy1,ty);
2237 fjz1 = _mm256_add_pd(fjz1,tz);
2241 /**************************
2242 * CALCULATE INTERACTIONS *
2243 **************************/
2245 if (gmx_mm256_any_lt(rsq12,rcutoff2))
2248 r12 = _mm256_mul_pd(rsq12,rinv12);
2250 /* EWALD ELECTROSTATICS */
2252 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2253 ewrt = _mm256_mul_pd(r12,ewtabscale);
2254 ewitab = _mm256_cvttpd_epi32(ewrt);
2255 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2256 ewitab = _mm_slli_epi32(ewitab,2);
2257 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2258 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2259 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2260 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2261 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2262 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2263 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2264 velec = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
2265 felec = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
2267 d = _mm256_sub_pd(r12,rswitch);
2268 d = _mm256_max_pd(d,_mm256_setzero_pd());
2269 d2 = _mm256_mul_pd(d,d);
2270 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2272 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2274 /* Evaluate switch function */
2275 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2276 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
2277 cutoff_mask = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2281 fscal = _mm256_and_pd(fscal,cutoff_mask);
2283 /* Calculate temporary vectorial force */
2284 tx = _mm256_mul_pd(fscal,dx12);
2285 ty = _mm256_mul_pd(fscal,dy12);
2286 tz = _mm256_mul_pd(fscal,dz12);
2288 /* Update vectorial force */
2289 fix1 = _mm256_add_pd(fix1,tx);
2290 fiy1 = _mm256_add_pd(fiy1,ty);
2291 fiz1 = _mm256_add_pd(fiz1,tz);
2293 fjx2 = _mm256_add_pd(fjx2,tx);
2294 fjy2 = _mm256_add_pd(fjy2,ty);
2295 fjz2 = _mm256_add_pd(fjz2,tz);
2299 /**************************
2300 * CALCULATE INTERACTIONS *
2301 **************************/
2303 if (gmx_mm256_any_lt(rsq20,rcutoff2))
2306 r20 = _mm256_mul_pd(rsq20,rinv20);
2308 /* EWALD ELECTROSTATICS */
2310 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2311 ewrt = _mm256_mul_pd(r20,ewtabscale);
2312 ewitab = _mm256_cvttpd_epi32(ewrt);
2313 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2314 ewitab = _mm_slli_epi32(ewitab,2);
2315 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2316 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2317 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2318 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2319 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2320 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2321 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2322 velec = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
2323 felec = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
2325 d = _mm256_sub_pd(r20,rswitch);
2326 d = _mm256_max_pd(d,_mm256_setzero_pd());
2327 d2 = _mm256_mul_pd(d,d);
2328 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2330 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2332 /* Evaluate switch function */
2333 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2334 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
2335 cutoff_mask = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
2339 fscal = _mm256_and_pd(fscal,cutoff_mask);
2341 /* Calculate temporary vectorial force */
2342 tx = _mm256_mul_pd(fscal,dx20);
2343 ty = _mm256_mul_pd(fscal,dy20);
2344 tz = _mm256_mul_pd(fscal,dz20);
2346 /* Update vectorial force */
2347 fix2 = _mm256_add_pd(fix2,tx);
2348 fiy2 = _mm256_add_pd(fiy2,ty);
2349 fiz2 = _mm256_add_pd(fiz2,tz);
2351 fjx0 = _mm256_add_pd(fjx0,tx);
2352 fjy0 = _mm256_add_pd(fjy0,ty);
2353 fjz0 = _mm256_add_pd(fjz0,tz);
2357 /**************************
2358 * CALCULATE INTERACTIONS *
2359 **************************/
2361 if (gmx_mm256_any_lt(rsq21,rcutoff2))
2364 r21 = _mm256_mul_pd(rsq21,rinv21);
2366 /* EWALD ELECTROSTATICS */
2368 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2369 ewrt = _mm256_mul_pd(r21,ewtabscale);
2370 ewitab = _mm256_cvttpd_epi32(ewrt);
2371 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2372 ewitab = _mm_slli_epi32(ewitab,2);
2373 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2374 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2375 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2376 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2377 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2378 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2379 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2380 velec = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
2381 felec = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
2383 d = _mm256_sub_pd(r21,rswitch);
2384 d = _mm256_max_pd(d,_mm256_setzero_pd());
2385 d2 = _mm256_mul_pd(d,d);
2386 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2388 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2390 /* Evaluate switch function */
2391 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2392 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
2393 cutoff_mask = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
2397 fscal = _mm256_and_pd(fscal,cutoff_mask);
2399 /* Calculate temporary vectorial force */
2400 tx = _mm256_mul_pd(fscal,dx21);
2401 ty = _mm256_mul_pd(fscal,dy21);
2402 tz = _mm256_mul_pd(fscal,dz21);
2404 /* Update vectorial force */
2405 fix2 = _mm256_add_pd(fix2,tx);
2406 fiy2 = _mm256_add_pd(fiy2,ty);
2407 fiz2 = _mm256_add_pd(fiz2,tz);
2409 fjx1 = _mm256_add_pd(fjx1,tx);
2410 fjy1 = _mm256_add_pd(fjy1,ty);
2411 fjz1 = _mm256_add_pd(fjz1,tz);
2415 /**************************
2416 * CALCULATE INTERACTIONS *
2417 **************************/
2419 if (gmx_mm256_any_lt(rsq22,rcutoff2))
2422 r22 = _mm256_mul_pd(rsq22,rinv22);
2424 /* EWALD ELECTROSTATICS */
2426 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2427 ewrt = _mm256_mul_pd(r22,ewtabscale);
2428 ewitab = _mm256_cvttpd_epi32(ewrt);
2429 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2430 ewitab = _mm_slli_epi32(ewitab,2);
2431 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2432 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2433 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2434 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2435 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2436 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2437 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2438 velec = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
2439 felec = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
2441 d = _mm256_sub_pd(r22,rswitch);
2442 d = _mm256_max_pd(d,_mm256_setzero_pd());
2443 d2 = _mm256_mul_pd(d,d);
2444 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2446 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2448 /* Evaluate switch function */
2449 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2450 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
2451 cutoff_mask = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
2455 fscal = _mm256_and_pd(fscal,cutoff_mask);
2457 /* Calculate temporary vectorial force */
2458 tx = _mm256_mul_pd(fscal,dx22);
2459 ty = _mm256_mul_pd(fscal,dy22);
2460 tz = _mm256_mul_pd(fscal,dz22);
2462 /* Update vectorial force */
2463 fix2 = _mm256_add_pd(fix2,tx);
2464 fiy2 = _mm256_add_pd(fiy2,ty);
2465 fiz2 = _mm256_add_pd(fiz2,tz);
2467 fjx2 = _mm256_add_pd(fjx2,tx);
2468 fjy2 = _mm256_add_pd(fjy2,ty);
2469 fjz2 = _mm256_add_pd(fjz2,tz);
2473 fjptrA = f+j_coord_offsetA;
2474 fjptrB = f+j_coord_offsetB;
2475 fjptrC = f+j_coord_offsetC;
2476 fjptrD = f+j_coord_offsetD;
2478 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
2479 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2481 /* Inner loop uses 573 flops */
2484 if(jidx<j_index_end)
2487 /* Get j neighbor index, and coordinate index */
2488 jnrlistA = jjnr[jidx];
2489 jnrlistB = jjnr[jidx+1];
2490 jnrlistC = jjnr[jidx+2];
2491 jnrlistD = jjnr[jidx+3];
2492 /* Sign of each element will be negative for non-real atoms.
2493 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2494 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
2496 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
2498 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
2499 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
2500 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
2502 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
2503 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
2504 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
2505 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
2506 j_coord_offsetA = DIM*jnrA;
2507 j_coord_offsetB = DIM*jnrB;
2508 j_coord_offsetC = DIM*jnrC;
2509 j_coord_offsetD = DIM*jnrD;
2511 /* load j atom coordinates */
2512 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
2513 x+j_coord_offsetC,x+j_coord_offsetD,
2514 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
2516 /* Calculate displacement vector */
2517 dx00 = _mm256_sub_pd(ix0,jx0);
2518 dy00 = _mm256_sub_pd(iy0,jy0);
2519 dz00 = _mm256_sub_pd(iz0,jz0);
2520 dx01 = _mm256_sub_pd(ix0,jx1);
2521 dy01 = _mm256_sub_pd(iy0,jy1);
2522 dz01 = _mm256_sub_pd(iz0,jz1);
2523 dx02 = _mm256_sub_pd(ix0,jx2);
2524 dy02 = _mm256_sub_pd(iy0,jy2);
2525 dz02 = _mm256_sub_pd(iz0,jz2);
2526 dx10 = _mm256_sub_pd(ix1,jx0);
2527 dy10 = _mm256_sub_pd(iy1,jy0);
2528 dz10 = _mm256_sub_pd(iz1,jz0);
2529 dx11 = _mm256_sub_pd(ix1,jx1);
2530 dy11 = _mm256_sub_pd(iy1,jy1);
2531 dz11 = _mm256_sub_pd(iz1,jz1);
2532 dx12 = _mm256_sub_pd(ix1,jx2);
2533 dy12 = _mm256_sub_pd(iy1,jy2);
2534 dz12 = _mm256_sub_pd(iz1,jz2);
2535 dx20 = _mm256_sub_pd(ix2,jx0);
2536 dy20 = _mm256_sub_pd(iy2,jy0);
2537 dz20 = _mm256_sub_pd(iz2,jz0);
2538 dx21 = _mm256_sub_pd(ix2,jx1);
2539 dy21 = _mm256_sub_pd(iy2,jy1);
2540 dz21 = _mm256_sub_pd(iz2,jz1);
2541 dx22 = _mm256_sub_pd(ix2,jx2);
2542 dy22 = _mm256_sub_pd(iy2,jy2);
2543 dz22 = _mm256_sub_pd(iz2,jz2);
2545 /* Calculate squared distance and things based on it */
2546 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
2547 rsq01 = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
2548 rsq02 = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
2549 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
2550 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
2551 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
2552 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
2553 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
2554 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
2556 rinv00 = gmx_mm256_invsqrt_pd(rsq00);
2557 rinv01 = gmx_mm256_invsqrt_pd(rsq01);
2558 rinv02 = gmx_mm256_invsqrt_pd(rsq02);
2559 rinv10 = gmx_mm256_invsqrt_pd(rsq10);
2560 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
2561 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
2562 rinv20 = gmx_mm256_invsqrt_pd(rsq20);
2563 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
2564 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
2566 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
2567 rinvsq01 = _mm256_mul_pd(rinv01,rinv01);
2568 rinvsq02 = _mm256_mul_pd(rinv02,rinv02);
2569 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
2570 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
2571 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
2572 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
2573 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
2574 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
2576 fjx0 = _mm256_setzero_pd();
2577 fjy0 = _mm256_setzero_pd();
2578 fjz0 = _mm256_setzero_pd();
2579 fjx1 = _mm256_setzero_pd();
2580 fjy1 = _mm256_setzero_pd();
2581 fjz1 = _mm256_setzero_pd();
2582 fjx2 = _mm256_setzero_pd();
2583 fjy2 = _mm256_setzero_pd();
2584 fjz2 = _mm256_setzero_pd();
2586 /**************************
2587 * CALCULATE INTERACTIONS *
2588 **************************/
2590 if (gmx_mm256_any_lt(rsq00,rcutoff2))
2593 r00 = _mm256_mul_pd(rsq00,rinv00);
2594 r00 = _mm256_andnot_pd(dummy_mask,r00);
2596 /* EWALD ELECTROSTATICS */
2598 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2599 ewrt = _mm256_mul_pd(r00,ewtabscale);
2600 ewitab = _mm256_cvttpd_epi32(ewrt);
2601 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2602 ewitab = _mm_slli_epi32(ewitab,2);
2603 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2604 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2605 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2606 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2607 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2608 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2609 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2610 velec = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
2611 felec = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
2613 /* LENNARD-JONES DISPERSION/REPULSION */
2615 rinvsix = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
2616 vvdw6 = _mm256_mul_pd(c6_00,rinvsix);
2617 vvdw12 = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
2618 vvdw = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
2619 fvdw = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
2621 d = _mm256_sub_pd(r00,rswitch);
2622 d = _mm256_max_pd(d,_mm256_setzero_pd());
2623 d2 = _mm256_mul_pd(d,d);
2624 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2626 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2628 /* Evaluate switch function */
2629 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2630 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
2631 fvdw = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
2632 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
2634 fscal = _mm256_add_pd(felec,fvdw);
2636 fscal = _mm256_and_pd(fscal,cutoff_mask);
2638 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2640 /* Calculate temporary vectorial force */
2641 tx = _mm256_mul_pd(fscal,dx00);
2642 ty = _mm256_mul_pd(fscal,dy00);
2643 tz = _mm256_mul_pd(fscal,dz00);
2645 /* Update vectorial force */
2646 fix0 = _mm256_add_pd(fix0,tx);
2647 fiy0 = _mm256_add_pd(fiy0,ty);
2648 fiz0 = _mm256_add_pd(fiz0,tz);
2650 fjx0 = _mm256_add_pd(fjx0,tx);
2651 fjy0 = _mm256_add_pd(fjy0,ty);
2652 fjz0 = _mm256_add_pd(fjz0,tz);
2656 /**************************
2657 * CALCULATE INTERACTIONS *
2658 **************************/
2660 if (gmx_mm256_any_lt(rsq01,rcutoff2))
2663 r01 = _mm256_mul_pd(rsq01,rinv01);
2664 r01 = _mm256_andnot_pd(dummy_mask,r01);
2666 /* EWALD ELECTROSTATICS */
2668 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2669 ewrt = _mm256_mul_pd(r01,ewtabscale);
2670 ewitab = _mm256_cvttpd_epi32(ewrt);
2671 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2672 ewitab = _mm_slli_epi32(ewitab,2);
2673 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2674 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2675 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2676 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2677 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2678 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2679 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2680 velec = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
2681 felec = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
2683 d = _mm256_sub_pd(r01,rswitch);
2684 d = _mm256_max_pd(d,_mm256_setzero_pd());
2685 d2 = _mm256_mul_pd(d,d);
2686 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2688 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2690 /* Evaluate switch function */
2691 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2692 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
2693 cutoff_mask = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
2697 fscal = _mm256_and_pd(fscal,cutoff_mask);
2699 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2701 /* Calculate temporary vectorial force */
2702 tx = _mm256_mul_pd(fscal,dx01);
2703 ty = _mm256_mul_pd(fscal,dy01);
2704 tz = _mm256_mul_pd(fscal,dz01);
2706 /* Update vectorial force */
2707 fix0 = _mm256_add_pd(fix0,tx);
2708 fiy0 = _mm256_add_pd(fiy0,ty);
2709 fiz0 = _mm256_add_pd(fiz0,tz);
2711 fjx1 = _mm256_add_pd(fjx1,tx);
2712 fjy1 = _mm256_add_pd(fjy1,ty);
2713 fjz1 = _mm256_add_pd(fjz1,tz);
2717 /**************************
2718 * CALCULATE INTERACTIONS *
2719 **************************/
2721 if (gmx_mm256_any_lt(rsq02,rcutoff2))
2724 r02 = _mm256_mul_pd(rsq02,rinv02);
2725 r02 = _mm256_andnot_pd(dummy_mask,r02);
2727 /* EWALD ELECTROSTATICS */
2729 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2730 ewrt = _mm256_mul_pd(r02,ewtabscale);
2731 ewitab = _mm256_cvttpd_epi32(ewrt);
2732 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2733 ewitab = _mm_slli_epi32(ewitab,2);
2734 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2735 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2736 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2737 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2738 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2739 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2740 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2741 velec = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
2742 felec = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
2744 d = _mm256_sub_pd(r02,rswitch);
2745 d = _mm256_max_pd(d,_mm256_setzero_pd());
2746 d2 = _mm256_mul_pd(d,d);
2747 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2749 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2751 /* Evaluate switch function */
2752 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2753 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
2754 cutoff_mask = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
2758 fscal = _mm256_and_pd(fscal,cutoff_mask);
2760 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2762 /* Calculate temporary vectorial force */
2763 tx = _mm256_mul_pd(fscal,dx02);
2764 ty = _mm256_mul_pd(fscal,dy02);
2765 tz = _mm256_mul_pd(fscal,dz02);
2767 /* Update vectorial force */
2768 fix0 = _mm256_add_pd(fix0,tx);
2769 fiy0 = _mm256_add_pd(fiy0,ty);
2770 fiz0 = _mm256_add_pd(fiz0,tz);
2772 fjx2 = _mm256_add_pd(fjx2,tx);
2773 fjy2 = _mm256_add_pd(fjy2,ty);
2774 fjz2 = _mm256_add_pd(fjz2,tz);
2778 /**************************
2779 * CALCULATE INTERACTIONS *
2780 **************************/
2782 if (gmx_mm256_any_lt(rsq10,rcutoff2))
2785 r10 = _mm256_mul_pd(rsq10,rinv10);
2786 r10 = _mm256_andnot_pd(dummy_mask,r10);
2788 /* EWALD ELECTROSTATICS */
2790 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2791 ewrt = _mm256_mul_pd(r10,ewtabscale);
2792 ewitab = _mm256_cvttpd_epi32(ewrt);
2793 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2794 ewitab = _mm_slli_epi32(ewitab,2);
2795 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2796 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2797 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2798 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2799 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2800 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2801 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2802 velec = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
2803 felec = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
2805 d = _mm256_sub_pd(r10,rswitch);
2806 d = _mm256_max_pd(d,_mm256_setzero_pd());
2807 d2 = _mm256_mul_pd(d,d);
2808 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2810 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2812 /* Evaluate switch function */
2813 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2814 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
2815 cutoff_mask = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
2819 fscal = _mm256_and_pd(fscal,cutoff_mask);
2821 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2823 /* Calculate temporary vectorial force */
2824 tx = _mm256_mul_pd(fscal,dx10);
2825 ty = _mm256_mul_pd(fscal,dy10);
2826 tz = _mm256_mul_pd(fscal,dz10);
2828 /* Update vectorial force */
2829 fix1 = _mm256_add_pd(fix1,tx);
2830 fiy1 = _mm256_add_pd(fiy1,ty);
2831 fiz1 = _mm256_add_pd(fiz1,tz);
2833 fjx0 = _mm256_add_pd(fjx0,tx);
2834 fjy0 = _mm256_add_pd(fjy0,ty);
2835 fjz0 = _mm256_add_pd(fjz0,tz);
2839 /**************************
2840 * CALCULATE INTERACTIONS *
2841 **************************/
2843 if (gmx_mm256_any_lt(rsq11,rcutoff2))
2846 r11 = _mm256_mul_pd(rsq11,rinv11);
2847 r11 = _mm256_andnot_pd(dummy_mask,r11);
2849 /* EWALD ELECTROSTATICS */
2851 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2852 ewrt = _mm256_mul_pd(r11,ewtabscale);
2853 ewitab = _mm256_cvttpd_epi32(ewrt);
2854 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2855 ewitab = _mm_slli_epi32(ewitab,2);
2856 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2857 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2858 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2859 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2860 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2861 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2862 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2863 velec = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
2864 felec = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
2866 d = _mm256_sub_pd(r11,rswitch);
2867 d = _mm256_max_pd(d,_mm256_setzero_pd());
2868 d2 = _mm256_mul_pd(d,d);
2869 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2871 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2873 /* Evaluate switch function */
2874 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2875 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
2876 cutoff_mask = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
2880 fscal = _mm256_and_pd(fscal,cutoff_mask);
2882 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2884 /* Calculate temporary vectorial force */
2885 tx = _mm256_mul_pd(fscal,dx11);
2886 ty = _mm256_mul_pd(fscal,dy11);
2887 tz = _mm256_mul_pd(fscal,dz11);
2889 /* Update vectorial force */
2890 fix1 = _mm256_add_pd(fix1,tx);
2891 fiy1 = _mm256_add_pd(fiy1,ty);
2892 fiz1 = _mm256_add_pd(fiz1,tz);
2894 fjx1 = _mm256_add_pd(fjx1,tx);
2895 fjy1 = _mm256_add_pd(fjy1,ty);
2896 fjz1 = _mm256_add_pd(fjz1,tz);
2900 /**************************
2901 * CALCULATE INTERACTIONS *
2902 **************************/
2904 if (gmx_mm256_any_lt(rsq12,rcutoff2))
2907 r12 = _mm256_mul_pd(rsq12,rinv12);
2908 r12 = _mm256_andnot_pd(dummy_mask,r12);
2910 /* EWALD ELECTROSTATICS */
2912 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2913 ewrt = _mm256_mul_pd(r12,ewtabscale);
2914 ewitab = _mm256_cvttpd_epi32(ewrt);
2915 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2916 ewitab = _mm_slli_epi32(ewitab,2);
2917 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2918 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2919 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2920 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2921 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2922 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2923 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2924 velec = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
2925 felec = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
2927 d = _mm256_sub_pd(r12,rswitch);
2928 d = _mm256_max_pd(d,_mm256_setzero_pd());
2929 d2 = _mm256_mul_pd(d,d);
2930 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2932 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2934 /* Evaluate switch function */
2935 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2936 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
2937 cutoff_mask = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2941 fscal = _mm256_and_pd(fscal,cutoff_mask);
2943 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2945 /* Calculate temporary vectorial force */
2946 tx = _mm256_mul_pd(fscal,dx12);
2947 ty = _mm256_mul_pd(fscal,dy12);
2948 tz = _mm256_mul_pd(fscal,dz12);
2950 /* Update vectorial force */
2951 fix1 = _mm256_add_pd(fix1,tx);
2952 fiy1 = _mm256_add_pd(fiy1,ty);
2953 fiz1 = _mm256_add_pd(fiz1,tz);
2955 fjx2 = _mm256_add_pd(fjx2,tx);
2956 fjy2 = _mm256_add_pd(fjy2,ty);
2957 fjz2 = _mm256_add_pd(fjz2,tz);
2961 /**************************
2962 * CALCULATE INTERACTIONS *
2963 **************************/
2965 if (gmx_mm256_any_lt(rsq20,rcutoff2))
2968 r20 = _mm256_mul_pd(rsq20,rinv20);
2969 r20 = _mm256_andnot_pd(dummy_mask,r20);
2971 /* EWALD ELECTROSTATICS */
2973 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2974 ewrt = _mm256_mul_pd(r20,ewtabscale);
2975 ewitab = _mm256_cvttpd_epi32(ewrt);
2976 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2977 ewitab = _mm_slli_epi32(ewitab,2);
2978 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2979 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2980 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2981 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2982 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2983 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2984 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2985 velec = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
2986 felec = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
2988 d = _mm256_sub_pd(r20,rswitch);
2989 d = _mm256_max_pd(d,_mm256_setzero_pd());
2990 d2 = _mm256_mul_pd(d,d);
2991 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2993 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2995 /* Evaluate switch function */
2996 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2997 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
2998 cutoff_mask = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
3002 fscal = _mm256_and_pd(fscal,cutoff_mask);
3004 fscal = _mm256_andnot_pd(dummy_mask,fscal);
3006 /* Calculate temporary vectorial force */
3007 tx = _mm256_mul_pd(fscal,dx20);
3008 ty = _mm256_mul_pd(fscal,dy20);
3009 tz = _mm256_mul_pd(fscal,dz20);
3011 /* Update vectorial force */
3012 fix2 = _mm256_add_pd(fix2,tx);
3013 fiy2 = _mm256_add_pd(fiy2,ty);
3014 fiz2 = _mm256_add_pd(fiz2,tz);
3016 fjx0 = _mm256_add_pd(fjx0,tx);
3017 fjy0 = _mm256_add_pd(fjy0,ty);
3018 fjz0 = _mm256_add_pd(fjz0,tz);
3022 /**************************
3023 * CALCULATE INTERACTIONS *
3024 **************************/
3026 if (gmx_mm256_any_lt(rsq21,rcutoff2))
3029 r21 = _mm256_mul_pd(rsq21,rinv21);
3030 r21 = _mm256_andnot_pd(dummy_mask,r21);
3032 /* EWALD ELECTROSTATICS */
3034 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
3035 ewrt = _mm256_mul_pd(r21,ewtabscale);
3036 ewitab = _mm256_cvttpd_epi32(ewrt);
3037 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
3038 ewitab = _mm_slli_epi32(ewitab,2);
3039 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
3040 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
3041 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
3042 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
3043 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
3044 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
3045 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
3046 velec = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
3047 felec = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
3049 d = _mm256_sub_pd(r21,rswitch);
3050 d = _mm256_max_pd(d,_mm256_setzero_pd());
3051 d2 = _mm256_mul_pd(d,d);
3052 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
3054 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
3056 /* Evaluate switch function */
3057 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3058 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
3059 cutoff_mask = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
3063 fscal = _mm256_and_pd(fscal,cutoff_mask);
3065 fscal = _mm256_andnot_pd(dummy_mask,fscal);
3067 /* Calculate temporary vectorial force */
3068 tx = _mm256_mul_pd(fscal,dx21);
3069 ty = _mm256_mul_pd(fscal,dy21);
3070 tz = _mm256_mul_pd(fscal,dz21);
3072 /* Update vectorial force */
3073 fix2 = _mm256_add_pd(fix2,tx);
3074 fiy2 = _mm256_add_pd(fiy2,ty);
3075 fiz2 = _mm256_add_pd(fiz2,tz);
3077 fjx1 = _mm256_add_pd(fjx1,tx);
3078 fjy1 = _mm256_add_pd(fjy1,ty);
3079 fjz1 = _mm256_add_pd(fjz1,tz);
3083 /**************************
3084 * CALCULATE INTERACTIONS *
3085 **************************/
3087 if (gmx_mm256_any_lt(rsq22,rcutoff2))
3090 r22 = _mm256_mul_pd(rsq22,rinv22);
3091 r22 = _mm256_andnot_pd(dummy_mask,r22);
3093 /* EWALD ELECTROSTATICS */
3095 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
3096 ewrt = _mm256_mul_pd(r22,ewtabscale);
3097 ewitab = _mm256_cvttpd_epi32(ewrt);
3098 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
3099 ewitab = _mm_slli_epi32(ewitab,2);
3100 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
3101 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
3102 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
3103 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
3104 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
3105 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
3106 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
3107 velec = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
3108 felec = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
3110 d = _mm256_sub_pd(r22,rswitch);
3111 d = _mm256_max_pd(d,_mm256_setzero_pd());
3112 d2 = _mm256_mul_pd(d,d);
3113 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
3115 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
3117 /* Evaluate switch function */
3118 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3119 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
3120 cutoff_mask = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
3124 fscal = _mm256_and_pd(fscal,cutoff_mask);
3126 fscal = _mm256_andnot_pd(dummy_mask,fscal);
3128 /* Calculate temporary vectorial force */
3129 tx = _mm256_mul_pd(fscal,dx22);
3130 ty = _mm256_mul_pd(fscal,dy22);
3131 tz = _mm256_mul_pd(fscal,dz22);
3133 /* Update vectorial force */
3134 fix2 = _mm256_add_pd(fix2,tx);
3135 fiy2 = _mm256_add_pd(fiy2,ty);
3136 fiz2 = _mm256_add_pd(fiz2,tz);
3138 fjx2 = _mm256_add_pd(fjx2,tx);
3139 fjy2 = _mm256_add_pd(fjy2,ty);
3140 fjz2 = _mm256_add_pd(fjz2,tz);
3144 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
3145 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
3146 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
3147 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
3149 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
3150 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
3152 /* Inner loop uses 582 flops */
3155 /* End of innermost loop */
3157 gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
3158 f+i_coord_offset,fshift+i_shift_offset);
3160 /* Increment number of inner iterations */
3161 inneriter += j_index_end - j_index_start;
3163 /* Outer loop uses 18 flops */
3166 /* Increment number of outer iterations */
3169 /* Update outer/inner flops */
3171 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*582);