2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_double kernel generator.
44 #include "../nb_kernel.h"
45 #include "gromacs/legacyheaders/types/simple.h"
46 #include "gromacs/math/vec.h"
47 #include "gromacs/legacyheaders/nrnb.h"
49 #include "gromacs/simd/math_x86_avx_256_double.h"
50 #include "kernelutil_x86_avx_256_double.h"
53 * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwNone_GeomW3W3_VF_avx_256_double
54 * Electrostatics interaction: Ewald
55 * VdW interaction: None
56 * Geometry: Water3-Water3
57 * Calculate force/pot: PotentialAndForce
60 nb_kernel_ElecEwSw_VdwNone_GeomW3W3_VF_avx_256_double
61 (t_nblist * gmx_restrict nlist,
62 rvec * gmx_restrict xx,
63 rvec * gmx_restrict ff,
64 t_forcerec * gmx_restrict fr,
65 t_mdatoms * gmx_restrict mdatoms,
66 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67 t_nrnb * gmx_restrict nrnb)
69 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
70 * just 0 for non-waters.
71 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
72 * jnr indices corresponding to data put in the four positions in the SIMD register.
74 int i_shift_offset,i_coord_offset,outeriter,inneriter;
75 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76 int jnrA,jnrB,jnrC,jnrD;
77 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
78 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
79 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
80 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
82 real *shiftvec,*fshift,*x,*f;
83 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
85 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86 real * vdwioffsetptr0;
87 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
88 real * vdwioffsetptr1;
89 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
90 real * vdwioffsetptr2;
91 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
92 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
93 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
94 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
95 __m256d jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
96 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
97 __m256d jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
98 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
99 __m256d dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
100 __m256d dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
101 __m256d dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
102 __m256d dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
103 __m256d dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
104 __m256d dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
105 __m256d dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
106 __m256d dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
107 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
110 __m256d ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
111 __m256d beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
113 __m256d rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
114 real rswitch_scalar,d_scalar;
115 __m256d dummy_mask,cutoff_mask;
116 __m128 tmpmask0,tmpmask1;
117 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
118 __m256d one = _mm256_set1_pd(1.0);
119 __m256d two = _mm256_set1_pd(2.0);
125 jindex = nlist->jindex;
127 shiftidx = nlist->shift;
129 shiftvec = fr->shift_vec[0];
130 fshift = fr->fshift[0];
131 facel = _mm256_set1_pd(fr->epsfac);
132 charge = mdatoms->chargeA;
134 sh_ewald = _mm256_set1_pd(fr->ic->sh_ewald);
135 beta = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
136 beta2 = _mm256_mul_pd(beta,beta);
137 beta3 = _mm256_mul_pd(beta,beta2);
139 ewtab = fr->ic->tabq_coul_FDV0;
140 ewtabscale = _mm256_set1_pd(fr->ic->tabq_scale);
141 ewtabhalfspace = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
143 /* Setup water-specific parameters */
144 inr = nlist->iinr[0];
145 iq0 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
146 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
147 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
149 jq0 = _mm256_set1_pd(charge[inr+0]);
150 jq1 = _mm256_set1_pd(charge[inr+1]);
151 jq2 = _mm256_set1_pd(charge[inr+2]);
152 qq00 = _mm256_mul_pd(iq0,jq0);
153 qq01 = _mm256_mul_pd(iq0,jq1);
154 qq02 = _mm256_mul_pd(iq0,jq2);
155 qq10 = _mm256_mul_pd(iq1,jq0);
156 qq11 = _mm256_mul_pd(iq1,jq1);
157 qq12 = _mm256_mul_pd(iq1,jq2);
158 qq20 = _mm256_mul_pd(iq2,jq0);
159 qq21 = _mm256_mul_pd(iq2,jq1);
160 qq22 = _mm256_mul_pd(iq2,jq2);
162 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
163 rcutoff_scalar = fr->rcoulomb;
164 rcutoff = _mm256_set1_pd(rcutoff_scalar);
165 rcutoff2 = _mm256_mul_pd(rcutoff,rcutoff);
167 rswitch_scalar = fr->rcoulomb_switch;
168 rswitch = _mm256_set1_pd(rswitch_scalar);
169 /* Setup switch parameters */
170 d_scalar = rcutoff_scalar-rswitch_scalar;
171 d = _mm256_set1_pd(d_scalar);
172 swV3 = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
173 swV4 = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
174 swV5 = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
175 swF2 = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
176 swF3 = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
177 swF4 = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
179 /* Avoid stupid compiler warnings */
180 jnrA = jnrB = jnrC = jnrD = 0;
189 for(iidx=0;iidx<4*DIM;iidx++)
194 /* Start outer loop over neighborlists */
195 for(iidx=0; iidx<nri; iidx++)
197 /* Load shift vector for this list */
198 i_shift_offset = DIM*shiftidx[iidx];
200 /* Load limits for loop over neighbors */
201 j_index_start = jindex[iidx];
202 j_index_end = jindex[iidx+1];
204 /* Get outer coordinate index */
206 i_coord_offset = DIM*inr;
208 /* Load i particle coords and add shift vector */
209 gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
210 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
212 fix0 = _mm256_setzero_pd();
213 fiy0 = _mm256_setzero_pd();
214 fiz0 = _mm256_setzero_pd();
215 fix1 = _mm256_setzero_pd();
216 fiy1 = _mm256_setzero_pd();
217 fiz1 = _mm256_setzero_pd();
218 fix2 = _mm256_setzero_pd();
219 fiy2 = _mm256_setzero_pd();
220 fiz2 = _mm256_setzero_pd();
222 /* Reset potential sums */
223 velecsum = _mm256_setzero_pd();
225 /* Start inner kernel loop */
226 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
229 /* Get j neighbor index, and coordinate index */
234 j_coord_offsetA = DIM*jnrA;
235 j_coord_offsetB = DIM*jnrB;
236 j_coord_offsetC = DIM*jnrC;
237 j_coord_offsetD = DIM*jnrD;
239 /* load j atom coordinates */
240 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
241 x+j_coord_offsetC,x+j_coord_offsetD,
242 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
244 /* Calculate displacement vector */
245 dx00 = _mm256_sub_pd(ix0,jx0);
246 dy00 = _mm256_sub_pd(iy0,jy0);
247 dz00 = _mm256_sub_pd(iz0,jz0);
248 dx01 = _mm256_sub_pd(ix0,jx1);
249 dy01 = _mm256_sub_pd(iy0,jy1);
250 dz01 = _mm256_sub_pd(iz0,jz1);
251 dx02 = _mm256_sub_pd(ix0,jx2);
252 dy02 = _mm256_sub_pd(iy0,jy2);
253 dz02 = _mm256_sub_pd(iz0,jz2);
254 dx10 = _mm256_sub_pd(ix1,jx0);
255 dy10 = _mm256_sub_pd(iy1,jy0);
256 dz10 = _mm256_sub_pd(iz1,jz0);
257 dx11 = _mm256_sub_pd(ix1,jx1);
258 dy11 = _mm256_sub_pd(iy1,jy1);
259 dz11 = _mm256_sub_pd(iz1,jz1);
260 dx12 = _mm256_sub_pd(ix1,jx2);
261 dy12 = _mm256_sub_pd(iy1,jy2);
262 dz12 = _mm256_sub_pd(iz1,jz2);
263 dx20 = _mm256_sub_pd(ix2,jx0);
264 dy20 = _mm256_sub_pd(iy2,jy0);
265 dz20 = _mm256_sub_pd(iz2,jz0);
266 dx21 = _mm256_sub_pd(ix2,jx1);
267 dy21 = _mm256_sub_pd(iy2,jy1);
268 dz21 = _mm256_sub_pd(iz2,jz1);
269 dx22 = _mm256_sub_pd(ix2,jx2);
270 dy22 = _mm256_sub_pd(iy2,jy2);
271 dz22 = _mm256_sub_pd(iz2,jz2);
273 /* Calculate squared distance and things based on it */
274 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
275 rsq01 = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
276 rsq02 = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
277 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
278 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
279 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
280 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
281 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
282 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
284 rinv00 = gmx_mm256_invsqrt_pd(rsq00);
285 rinv01 = gmx_mm256_invsqrt_pd(rsq01);
286 rinv02 = gmx_mm256_invsqrt_pd(rsq02);
287 rinv10 = gmx_mm256_invsqrt_pd(rsq10);
288 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
289 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
290 rinv20 = gmx_mm256_invsqrt_pd(rsq20);
291 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
292 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
294 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
295 rinvsq01 = _mm256_mul_pd(rinv01,rinv01);
296 rinvsq02 = _mm256_mul_pd(rinv02,rinv02);
297 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
298 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
299 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
300 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
301 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
302 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
304 fjx0 = _mm256_setzero_pd();
305 fjy0 = _mm256_setzero_pd();
306 fjz0 = _mm256_setzero_pd();
307 fjx1 = _mm256_setzero_pd();
308 fjy1 = _mm256_setzero_pd();
309 fjz1 = _mm256_setzero_pd();
310 fjx2 = _mm256_setzero_pd();
311 fjy2 = _mm256_setzero_pd();
312 fjz2 = _mm256_setzero_pd();
314 /**************************
315 * CALCULATE INTERACTIONS *
316 **************************/
318 if (gmx_mm256_any_lt(rsq00,rcutoff2))
321 r00 = _mm256_mul_pd(rsq00,rinv00);
323 /* EWALD ELECTROSTATICS */
325 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
326 ewrt = _mm256_mul_pd(r00,ewtabscale);
327 ewitab = _mm256_cvttpd_epi32(ewrt);
328 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
329 ewitab = _mm_slli_epi32(ewitab,2);
330 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
331 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
332 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
333 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
334 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
335 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
336 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
337 velec = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
338 felec = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
340 d = _mm256_sub_pd(r00,rswitch);
341 d = _mm256_max_pd(d,_mm256_setzero_pd());
342 d2 = _mm256_mul_pd(d,d);
343 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
345 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
347 /* Evaluate switch function */
348 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
349 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
350 velec = _mm256_mul_pd(velec,sw);
351 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
353 /* Update potential sum for this i atom from the interaction with this j atom. */
354 velec = _mm256_and_pd(velec,cutoff_mask);
355 velecsum = _mm256_add_pd(velecsum,velec);
359 fscal = _mm256_and_pd(fscal,cutoff_mask);
361 /* Calculate temporary vectorial force */
362 tx = _mm256_mul_pd(fscal,dx00);
363 ty = _mm256_mul_pd(fscal,dy00);
364 tz = _mm256_mul_pd(fscal,dz00);
366 /* Update vectorial force */
367 fix0 = _mm256_add_pd(fix0,tx);
368 fiy0 = _mm256_add_pd(fiy0,ty);
369 fiz0 = _mm256_add_pd(fiz0,tz);
371 fjx0 = _mm256_add_pd(fjx0,tx);
372 fjy0 = _mm256_add_pd(fjy0,ty);
373 fjz0 = _mm256_add_pd(fjz0,tz);
377 /**************************
378 * CALCULATE INTERACTIONS *
379 **************************/
381 if (gmx_mm256_any_lt(rsq01,rcutoff2))
384 r01 = _mm256_mul_pd(rsq01,rinv01);
386 /* EWALD ELECTROSTATICS */
388 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
389 ewrt = _mm256_mul_pd(r01,ewtabscale);
390 ewitab = _mm256_cvttpd_epi32(ewrt);
391 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
392 ewitab = _mm_slli_epi32(ewitab,2);
393 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
394 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
395 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
396 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
397 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
398 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
399 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
400 velec = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
401 felec = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
403 d = _mm256_sub_pd(r01,rswitch);
404 d = _mm256_max_pd(d,_mm256_setzero_pd());
405 d2 = _mm256_mul_pd(d,d);
406 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
408 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
410 /* Evaluate switch function */
411 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
412 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
413 velec = _mm256_mul_pd(velec,sw);
414 cutoff_mask = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
416 /* Update potential sum for this i atom from the interaction with this j atom. */
417 velec = _mm256_and_pd(velec,cutoff_mask);
418 velecsum = _mm256_add_pd(velecsum,velec);
422 fscal = _mm256_and_pd(fscal,cutoff_mask);
424 /* Calculate temporary vectorial force */
425 tx = _mm256_mul_pd(fscal,dx01);
426 ty = _mm256_mul_pd(fscal,dy01);
427 tz = _mm256_mul_pd(fscal,dz01);
429 /* Update vectorial force */
430 fix0 = _mm256_add_pd(fix0,tx);
431 fiy0 = _mm256_add_pd(fiy0,ty);
432 fiz0 = _mm256_add_pd(fiz0,tz);
434 fjx1 = _mm256_add_pd(fjx1,tx);
435 fjy1 = _mm256_add_pd(fjy1,ty);
436 fjz1 = _mm256_add_pd(fjz1,tz);
440 /**************************
441 * CALCULATE INTERACTIONS *
442 **************************/
444 if (gmx_mm256_any_lt(rsq02,rcutoff2))
447 r02 = _mm256_mul_pd(rsq02,rinv02);
449 /* EWALD ELECTROSTATICS */
451 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
452 ewrt = _mm256_mul_pd(r02,ewtabscale);
453 ewitab = _mm256_cvttpd_epi32(ewrt);
454 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
455 ewitab = _mm_slli_epi32(ewitab,2);
456 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
457 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
458 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
459 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
460 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
461 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
462 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
463 velec = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
464 felec = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
466 d = _mm256_sub_pd(r02,rswitch);
467 d = _mm256_max_pd(d,_mm256_setzero_pd());
468 d2 = _mm256_mul_pd(d,d);
469 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
471 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
473 /* Evaluate switch function */
474 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
475 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
476 velec = _mm256_mul_pd(velec,sw);
477 cutoff_mask = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
479 /* Update potential sum for this i atom from the interaction with this j atom. */
480 velec = _mm256_and_pd(velec,cutoff_mask);
481 velecsum = _mm256_add_pd(velecsum,velec);
485 fscal = _mm256_and_pd(fscal,cutoff_mask);
487 /* Calculate temporary vectorial force */
488 tx = _mm256_mul_pd(fscal,dx02);
489 ty = _mm256_mul_pd(fscal,dy02);
490 tz = _mm256_mul_pd(fscal,dz02);
492 /* Update vectorial force */
493 fix0 = _mm256_add_pd(fix0,tx);
494 fiy0 = _mm256_add_pd(fiy0,ty);
495 fiz0 = _mm256_add_pd(fiz0,tz);
497 fjx2 = _mm256_add_pd(fjx2,tx);
498 fjy2 = _mm256_add_pd(fjy2,ty);
499 fjz2 = _mm256_add_pd(fjz2,tz);
503 /**************************
504 * CALCULATE INTERACTIONS *
505 **************************/
507 if (gmx_mm256_any_lt(rsq10,rcutoff2))
510 r10 = _mm256_mul_pd(rsq10,rinv10);
512 /* EWALD ELECTROSTATICS */
514 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
515 ewrt = _mm256_mul_pd(r10,ewtabscale);
516 ewitab = _mm256_cvttpd_epi32(ewrt);
517 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
518 ewitab = _mm_slli_epi32(ewitab,2);
519 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
520 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
521 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
522 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
523 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
524 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
525 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
526 velec = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
527 felec = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
529 d = _mm256_sub_pd(r10,rswitch);
530 d = _mm256_max_pd(d,_mm256_setzero_pd());
531 d2 = _mm256_mul_pd(d,d);
532 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
534 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
536 /* Evaluate switch function */
537 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
538 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
539 velec = _mm256_mul_pd(velec,sw);
540 cutoff_mask = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
542 /* Update potential sum for this i atom from the interaction with this j atom. */
543 velec = _mm256_and_pd(velec,cutoff_mask);
544 velecsum = _mm256_add_pd(velecsum,velec);
548 fscal = _mm256_and_pd(fscal,cutoff_mask);
550 /* Calculate temporary vectorial force */
551 tx = _mm256_mul_pd(fscal,dx10);
552 ty = _mm256_mul_pd(fscal,dy10);
553 tz = _mm256_mul_pd(fscal,dz10);
555 /* Update vectorial force */
556 fix1 = _mm256_add_pd(fix1,tx);
557 fiy1 = _mm256_add_pd(fiy1,ty);
558 fiz1 = _mm256_add_pd(fiz1,tz);
560 fjx0 = _mm256_add_pd(fjx0,tx);
561 fjy0 = _mm256_add_pd(fjy0,ty);
562 fjz0 = _mm256_add_pd(fjz0,tz);
566 /**************************
567 * CALCULATE INTERACTIONS *
568 **************************/
570 if (gmx_mm256_any_lt(rsq11,rcutoff2))
573 r11 = _mm256_mul_pd(rsq11,rinv11);
575 /* EWALD ELECTROSTATICS */
577 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
578 ewrt = _mm256_mul_pd(r11,ewtabscale);
579 ewitab = _mm256_cvttpd_epi32(ewrt);
580 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
581 ewitab = _mm_slli_epi32(ewitab,2);
582 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
583 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
584 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
585 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
586 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
587 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
588 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
589 velec = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
590 felec = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
592 d = _mm256_sub_pd(r11,rswitch);
593 d = _mm256_max_pd(d,_mm256_setzero_pd());
594 d2 = _mm256_mul_pd(d,d);
595 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
597 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
599 /* Evaluate switch function */
600 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
601 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
602 velec = _mm256_mul_pd(velec,sw);
603 cutoff_mask = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
605 /* Update potential sum for this i atom from the interaction with this j atom. */
606 velec = _mm256_and_pd(velec,cutoff_mask);
607 velecsum = _mm256_add_pd(velecsum,velec);
611 fscal = _mm256_and_pd(fscal,cutoff_mask);
613 /* Calculate temporary vectorial force */
614 tx = _mm256_mul_pd(fscal,dx11);
615 ty = _mm256_mul_pd(fscal,dy11);
616 tz = _mm256_mul_pd(fscal,dz11);
618 /* Update vectorial force */
619 fix1 = _mm256_add_pd(fix1,tx);
620 fiy1 = _mm256_add_pd(fiy1,ty);
621 fiz1 = _mm256_add_pd(fiz1,tz);
623 fjx1 = _mm256_add_pd(fjx1,tx);
624 fjy1 = _mm256_add_pd(fjy1,ty);
625 fjz1 = _mm256_add_pd(fjz1,tz);
629 /**************************
630 * CALCULATE INTERACTIONS *
631 **************************/
633 if (gmx_mm256_any_lt(rsq12,rcutoff2))
636 r12 = _mm256_mul_pd(rsq12,rinv12);
638 /* EWALD ELECTROSTATICS */
640 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
641 ewrt = _mm256_mul_pd(r12,ewtabscale);
642 ewitab = _mm256_cvttpd_epi32(ewrt);
643 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
644 ewitab = _mm_slli_epi32(ewitab,2);
645 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
646 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
647 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
648 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
649 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
650 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
651 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
652 velec = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
653 felec = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
655 d = _mm256_sub_pd(r12,rswitch);
656 d = _mm256_max_pd(d,_mm256_setzero_pd());
657 d2 = _mm256_mul_pd(d,d);
658 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
660 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
662 /* Evaluate switch function */
663 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
664 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
665 velec = _mm256_mul_pd(velec,sw);
666 cutoff_mask = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
668 /* Update potential sum for this i atom from the interaction with this j atom. */
669 velec = _mm256_and_pd(velec,cutoff_mask);
670 velecsum = _mm256_add_pd(velecsum,velec);
674 fscal = _mm256_and_pd(fscal,cutoff_mask);
676 /* Calculate temporary vectorial force */
677 tx = _mm256_mul_pd(fscal,dx12);
678 ty = _mm256_mul_pd(fscal,dy12);
679 tz = _mm256_mul_pd(fscal,dz12);
681 /* Update vectorial force */
682 fix1 = _mm256_add_pd(fix1,tx);
683 fiy1 = _mm256_add_pd(fiy1,ty);
684 fiz1 = _mm256_add_pd(fiz1,tz);
686 fjx2 = _mm256_add_pd(fjx2,tx);
687 fjy2 = _mm256_add_pd(fjy2,ty);
688 fjz2 = _mm256_add_pd(fjz2,tz);
692 /**************************
693 * CALCULATE INTERACTIONS *
694 **************************/
696 if (gmx_mm256_any_lt(rsq20,rcutoff2))
699 r20 = _mm256_mul_pd(rsq20,rinv20);
701 /* EWALD ELECTROSTATICS */
703 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
704 ewrt = _mm256_mul_pd(r20,ewtabscale);
705 ewitab = _mm256_cvttpd_epi32(ewrt);
706 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
707 ewitab = _mm_slli_epi32(ewitab,2);
708 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
709 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
710 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
711 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
712 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
713 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
714 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
715 velec = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
716 felec = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
718 d = _mm256_sub_pd(r20,rswitch);
719 d = _mm256_max_pd(d,_mm256_setzero_pd());
720 d2 = _mm256_mul_pd(d,d);
721 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
723 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
725 /* Evaluate switch function */
726 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
727 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
728 velec = _mm256_mul_pd(velec,sw);
729 cutoff_mask = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
731 /* Update potential sum for this i atom from the interaction with this j atom. */
732 velec = _mm256_and_pd(velec,cutoff_mask);
733 velecsum = _mm256_add_pd(velecsum,velec);
737 fscal = _mm256_and_pd(fscal,cutoff_mask);
739 /* Calculate temporary vectorial force */
740 tx = _mm256_mul_pd(fscal,dx20);
741 ty = _mm256_mul_pd(fscal,dy20);
742 tz = _mm256_mul_pd(fscal,dz20);
744 /* Update vectorial force */
745 fix2 = _mm256_add_pd(fix2,tx);
746 fiy2 = _mm256_add_pd(fiy2,ty);
747 fiz2 = _mm256_add_pd(fiz2,tz);
749 fjx0 = _mm256_add_pd(fjx0,tx);
750 fjy0 = _mm256_add_pd(fjy0,ty);
751 fjz0 = _mm256_add_pd(fjz0,tz);
755 /**************************
756 * CALCULATE INTERACTIONS *
757 **************************/
759 if (gmx_mm256_any_lt(rsq21,rcutoff2))
762 r21 = _mm256_mul_pd(rsq21,rinv21);
764 /* EWALD ELECTROSTATICS */
766 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
767 ewrt = _mm256_mul_pd(r21,ewtabscale);
768 ewitab = _mm256_cvttpd_epi32(ewrt);
769 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
770 ewitab = _mm_slli_epi32(ewitab,2);
771 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
772 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
773 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
774 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
775 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
776 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
777 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
778 velec = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
779 felec = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
781 d = _mm256_sub_pd(r21,rswitch);
782 d = _mm256_max_pd(d,_mm256_setzero_pd());
783 d2 = _mm256_mul_pd(d,d);
784 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
786 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
788 /* Evaluate switch function */
789 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
790 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
791 velec = _mm256_mul_pd(velec,sw);
792 cutoff_mask = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
794 /* Update potential sum for this i atom from the interaction with this j atom. */
795 velec = _mm256_and_pd(velec,cutoff_mask);
796 velecsum = _mm256_add_pd(velecsum,velec);
800 fscal = _mm256_and_pd(fscal,cutoff_mask);
802 /* Calculate temporary vectorial force */
803 tx = _mm256_mul_pd(fscal,dx21);
804 ty = _mm256_mul_pd(fscal,dy21);
805 tz = _mm256_mul_pd(fscal,dz21);
807 /* Update vectorial force */
808 fix2 = _mm256_add_pd(fix2,tx);
809 fiy2 = _mm256_add_pd(fiy2,ty);
810 fiz2 = _mm256_add_pd(fiz2,tz);
812 fjx1 = _mm256_add_pd(fjx1,tx);
813 fjy1 = _mm256_add_pd(fjy1,ty);
814 fjz1 = _mm256_add_pd(fjz1,tz);
818 /**************************
819 * CALCULATE INTERACTIONS *
820 **************************/
822 if (gmx_mm256_any_lt(rsq22,rcutoff2))
825 r22 = _mm256_mul_pd(rsq22,rinv22);
827 /* EWALD ELECTROSTATICS */
829 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
830 ewrt = _mm256_mul_pd(r22,ewtabscale);
831 ewitab = _mm256_cvttpd_epi32(ewrt);
832 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
833 ewitab = _mm_slli_epi32(ewitab,2);
834 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
835 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
836 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
837 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
838 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
839 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
840 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
841 velec = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
842 felec = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
844 d = _mm256_sub_pd(r22,rswitch);
845 d = _mm256_max_pd(d,_mm256_setzero_pd());
846 d2 = _mm256_mul_pd(d,d);
847 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
849 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
851 /* Evaluate switch function */
852 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
853 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
854 velec = _mm256_mul_pd(velec,sw);
855 cutoff_mask = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
857 /* Update potential sum for this i atom from the interaction with this j atom. */
858 velec = _mm256_and_pd(velec,cutoff_mask);
859 velecsum = _mm256_add_pd(velecsum,velec);
863 fscal = _mm256_and_pd(fscal,cutoff_mask);
865 /* Calculate temporary vectorial force */
866 tx = _mm256_mul_pd(fscal,dx22);
867 ty = _mm256_mul_pd(fscal,dy22);
868 tz = _mm256_mul_pd(fscal,dz22);
870 /* Update vectorial force */
871 fix2 = _mm256_add_pd(fix2,tx);
872 fiy2 = _mm256_add_pd(fiy2,ty);
873 fiz2 = _mm256_add_pd(fiz2,tz);
875 fjx2 = _mm256_add_pd(fjx2,tx);
876 fjy2 = _mm256_add_pd(fjy2,ty);
877 fjz2 = _mm256_add_pd(fjz2,tz);
881 fjptrA = f+j_coord_offsetA;
882 fjptrB = f+j_coord_offsetB;
883 fjptrC = f+j_coord_offsetC;
884 fjptrD = f+j_coord_offsetD;
886 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
887 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
889 /* Inner loop uses 585 flops */
895 /* Get j neighbor index, and coordinate index */
896 jnrlistA = jjnr[jidx];
897 jnrlistB = jjnr[jidx+1];
898 jnrlistC = jjnr[jidx+2];
899 jnrlistD = jjnr[jidx+3];
900 /* Sign of each element will be negative for non-real atoms.
901 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
902 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
904 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
906 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
907 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
908 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
910 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
911 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
912 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
913 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
914 j_coord_offsetA = DIM*jnrA;
915 j_coord_offsetB = DIM*jnrB;
916 j_coord_offsetC = DIM*jnrC;
917 j_coord_offsetD = DIM*jnrD;
919 /* load j atom coordinates */
920 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
921 x+j_coord_offsetC,x+j_coord_offsetD,
922 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
924 /* Calculate displacement vector */
925 dx00 = _mm256_sub_pd(ix0,jx0);
926 dy00 = _mm256_sub_pd(iy0,jy0);
927 dz00 = _mm256_sub_pd(iz0,jz0);
928 dx01 = _mm256_sub_pd(ix0,jx1);
929 dy01 = _mm256_sub_pd(iy0,jy1);
930 dz01 = _mm256_sub_pd(iz0,jz1);
931 dx02 = _mm256_sub_pd(ix0,jx2);
932 dy02 = _mm256_sub_pd(iy0,jy2);
933 dz02 = _mm256_sub_pd(iz0,jz2);
934 dx10 = _mm256_sub_pd(ix1,jx0);
935 dy10 = _mm256_sub_pd(iy1,jy0);
936 dz10 = _mm256_sub_pd(iz1,jz0);
937 dx11 = _mm256_sub_pd(ix1,jx1);
938 dy11 = _mm256_sub_pd(iy1,jy1);
939 dz11 = _mm256_sub_pd(iz1,jz1);
940 dx12 = _mm256_sub_pd(ix1,jx2);
941 dy12 = _mm256_sub_pd(iy1,jy2);
942 dz12 = _mm256_sub_pd(iz1,jz2);
943 dx20 = _mm256_sub_pd(ix2,jx0);
944 dy20 = _mm256_sub_pd(iy2,jy0);
945 dz20 = _mm256_sub_pd(iz2,jz0);
946 dx21 = _mm256_sub_pd(ix2,jx1);
947 dy21 = _mm256_sub_pd(iy2,jy1);
948 dz21 = _mm256_sub_pd(iz2,jz1);
949 dx22 = _mm256_sub_pd(ix2,jx2);
950 dy22 = _mm256_sub_pd(iy2,jy2);
951 dz22 = _mm256_sub_pd(iz2,jz2);
953 /* Calculate squared distance and things based on it */
954 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
955 rsq01 = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
956 rsq02 = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
957 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
958 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
959 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
960 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
961 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
962 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
964 rinv00 = gmx_mm256_invsqrt_pd(rsq00);
965 rinv01 = gmx_mm256_invsqrt_pd(rsq01);
966 rinv02 = gmx_mm256_invsqrt_pd(rsq02);
967 rinv10 = gmx_mm256_invsqrt_pd(rsq10);
968 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
969 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
970 rinv20 = gmx_mm256_invsqrt_pd(rsq20);
971 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
972 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
974 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
975 rinvsq01 = _mm256_mul_pd(rinv01,rinv01);
976 rinvsq02 = _mm256_mul_pd(rinv02,rinv02);
977 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
978 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
979 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
980 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
981 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
982 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
984 fjx0 = _mm256_setzero_pd();
985 fjy0 = _mm256_setzero_pd();
986 fjz0 = _mm256_setzero_pd();
987 fjx1 = _mm256_setzero_pd();
988 fjy1 = _mm256_setzero_pd();
989 fjz1 = _mm256_setzero_pd();
990 fjx2 = _mm256_setzero_pd();
991 fjy2 = _mm256_setzero_pd();
992 fjz2 = _mm256_setzero_pd();
994 /**************************
995 * CALCULATE INTERACTIONS *
996 **************************/
998 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1001 r00 = _mm256_mul_pd(rsq00,rinv00);
1002 r00 = _mm256_andnot_pd(dummy_mask,r00);
1004 /* EWALD ELECTROSTATICS */
1006 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1007 ewrt = _mm256_mul_pd(r00,ewtabscale);
1008 ewitab = _mm256_cvttpd_epi32(ewrt);
1009 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1010 ewitab = _mm_slli_epi32(ewitab,2);
1011 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1012 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1013 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1014 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1015 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1016 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1017 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1018 velec = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
1019 felec = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
1021 d = _mm256_sub_pd(r00,rswitch);
1022 d = _mm256_max_pd(d,_mm256_setzero_pd());
1023 d2 = _mm256_mul_pd(d,d);
1024 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1026 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1028 /* Evaluate switch function */
1029 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1030 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
1031 velec = _mm256_mul_pd(velec,sw);
1032 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1034 /* Update potential sum for this i atom from the interaction with this j atom. */
1035 velec = _mm256_and_pd(velec,cutoff_mask);
1036 velec = _mm256_andnot_pd(dummy_mask,velec);
1037 velecsum = _mm256_add_pd(velecsum,velec);
1041 fscal = _mm256_and_pd(fscal,cutoff_mask);
1043 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1045 /* Calculate temporary vectorial force */
1046 tx = _mm256_mul_pd(fscal,dx00);
1047 ty = _mm256_mul_pd(fscal,dy00);
1048 tz = _mm256_mul_pd(fscal,dz00);
1050 /* Update vectorial force */
1051 fix0 = _mm256_add_pd(fix0,tx);
1052 fiy0 = _mm256_add_pd(fiy0,ty);
1053 fiz0 = _mm256_add_pd(fiz0,tz);
1055 fjx0 = _mm256_add_pd(fjx0,tx);
1056 fjy0 = _mm256_add_pd(fjy0,ty);
1057 fjz0 = _mm256_add_pd(fjz0,tz);
1061 /**************************
1062 * CALCULATE INTERACTIONS *
1063 **************************/
1065 if (gmx_mm256_any_lt(rsq01,rcutoff2))
1068 r01 = _mm256_mul_pd(rsq01,rinv01);
1069 r01 = _mm256_andnot_pd(dummy_mask,r01);
1071 /* EWALD ELECTROSTATICS */
1073 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1074 ewrt = _mm256_mul_pd(r01,ewtabscale);
1075 ewitab = _mm256_cvttpd_epi32(ewrt);
1076 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1077 ewitab = _mm_slli_epi32(ewitab,2);
1078 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1079 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1080 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1081 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1082 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1083 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1084 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1085 velec = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
1086 felec = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
1088 d = _mm256_sub_pd(r01,rswitch);
1089 d = _mm256_max_pd(d,_mm256_setzero_pd());
1090 d2 = _mm256_mul_pd(d,d);
1091 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1093 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1095 /* Evaluate switch function */
1096 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1097 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
1098 velec = _mm256_mul_pd(velec,sw);
1099 cutoff_mask = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
1101 /* Update potential sum for this i atom from the interaction with this j atom. */
1102 velec = _mm256_and_pd(velec,cutoff_mask);
1103 velec = _mm256_andnot_pd(dummy_mask,velec);
1104 velecsum = _mm256_add_pd(velecsum,velec);
1108 fscal = _mm256_and_pd(fscal,cutoff_mask);
1110 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1112 /* Calculate temporary vectorial force */
1113 tx = _mm256_mul_pd(fscal,dx01);
1114 ty = _mm256_mul_pd(fscal,dy01);
1115 tz = _mm256_mul_pd(fscal,dz01);
1117 /* Update vectorial force */
1118 fix0 = _mm256_add_pd(fix0,tx);
1119 fiy0 = _mm256_add_pd(fiy0,ty);
1120 fiz0 = _mm256_add_pd(fiz0,tz);
1122 fjx1 = _mm256_add_pd(fjx1,tx);
1123 fjy1 = _mm256_add_pd(fjy1,ty);
1124 fjz1 = _mm256_add_pd(fjz1,tz);
1128 /**************************
1129 * CALCULATE INTERACTIONS *
1130 **************************/
1132 if (gmx_mm256_any_lt(rsq02,rcutoff2))
1135 r02 = _mm256_mul_pd(rsq02,rinv02);
1136 r02 = _mm256_andnot_pd(dummy_mask,r02);
1138 /* EWALD ELECTROSTATICS */
1140 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1141 ewrt = _mm256_mul_pd(r02,ewtabscale);
1142 ewitab = _mm256_cvttpd_epi32(ewrt);
1143 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1144 ewitab = _mm_slli_epi32(ewitab,2);
1145 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1146 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1147 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1148 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1149 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1150 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1151 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1152 velec = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
1153 felec = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
1155 d = _mm256_sub_pd(r02,rswitch);
1156 d = _mm256_max_pd(d,_mm256_setzero_pd());
1157 d2 = _mm256_mul_pd(d,d);
1158 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1160 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1162 /* Evaluate switch function */
1163 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1164 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
1165 velec = _mm256_mul_pd(velec,sw);
1166 cutoff_mask = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
1168 /* Update potential sum for this i atom from the interaction with this j atom. */
1169 velec = _mm256_and_pd(velec,cutoff_mask);
1170 velec = _mm256_andnot_pd(dummy_mask,velec);
1171 velecsum = _mm256_add_pd(velecsum,velec);
1175 fscal = _mm256_and_pd(fscal,cutoff_mask);
1177 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1179 /* Calculate temporary vectorial force */
1180 tx = _mm256_mul_pd(fscal,dx02);
1181 ty = _mm256_mul_pd(fscal,dy02);
1182 tz = _mm256_mul_pd(fscal,dz02);
1184 /* Update vectorial force */
1185 fix0 = _mm256_add_pd(fix0,tx);
1186 fiy0 = _mm256_add_pd(fiy0,ty);
1187 fiz0 = _mm256_add_pd(fiz0,tz);
1189 fjx2 = _mm256_add_pd(fjx2,tx);
1190 fjy2 = _mm256_add_pd(fjy2,ty);
1191 fjz2 = _mm256_add_pd(fjz2,tz);
1195 /**************************
1196 * CALCULATE INTERACTIONS *
1197 **************************/
1199 if (gmx_mm256_any_lt(rsq10,rcutoff2))
1202 r10 = _mm256_mul_pd(rsq10,rinv10);
1203 r10 = _mm256_andnot_pd(dummy_mask,r10);
1205 /* EWALD ELECTROSTATICS */
1207 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1208 ewrt = _mm256_mul_pd(r10,ewtabscale);
1209 ewitab = _mm256_cvttpd_epi32(ewrt);
1210 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1211 ewitab = _mm_slli_epi32(ewitab,2);
1212 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1213 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1214 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1215 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1216 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1217 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1218 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1219 velec = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
1220 felec = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
1222 d = _mm256_sub_pd(r10,rswitch);
1223 d = _mm256_max_pd(d,_mm256_setzero_pd());
1224 d2 = _mm256_mul_pd(d,d);
1225 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1227 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1229 /* Evaluate switch function */
1230 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1231 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
1232 velec = _mm256_mul_pd(velec,sw);
1233 cutoff_mask = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1235 /* Update potential sum for this i atom from the interaction with this j atom. */
1236 velec = _mm256_and_pd(velec,cutoff_mask);
1237 velec = _mm256_andnot_pd(dummy_mask,velec);
1238 velecsum = _mm256_add_pd(velecsum,velec);
1242 fscal = _mm256_and_pd(fscal,cutoff_mask);
1244 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1246 /* Calculate temporary vectorial force */
1247 tx = _mm256_mul_pd(fscal,dx10);
1248 ty = _mm256_mul_pd(fscal,dy10);
1249 tz = _mm256_mul_pd(fscal,dz10);
1251 /* Update vectorial force */
1252 fix1 = _mm256_add_pd(fix1,tx);
1253 fiy1 = _mm256_add_pd(fiy1,ty);
1254 fiz1 = _mm256_add_pd(fiz1,tz);
1256 fjx0 = _mm256_add_pd(fjx0,tx);
1257 fjy0 = _mm256_add_pd(fjy0,ty);
1258 fjz0 = _mm256_add_pd(fjz0,tz);
1262 /**************************
1263 * CALCULATE INTERACTIONS *
1264 **************************/
1266 if (gmx_mm256_any_lt(rsq11,rcutoff2))
1269 r11 = _mm256_mul_pd(rsq11,rinv11);
1270 r11 = _mm256_andnot_pd(dummy_mask,r11);
1272 /* EWALD ELECTROSTATICS */
1274 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1275 ewrt = _mm256_mul_pd(r11,ewtabscale);
1276 ewitab = _mm256_cvttpd_epi32(ewrt);
1277 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1278 ewitab = _mm_slli_epi32(ewitab,2);
1279 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1280 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1281 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1282 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1283 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1284 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1285 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1286 velec = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
1287 felec = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
1289 d = _mm256_sub_pd(r11,rswitch);
1290 d = _mm256_max_pd(d,_mm256_setzero_pd());
1291 d2 = _mm256_mul_pd(d,d);
1292 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1294 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1296 /* Evaluate switch function */
1297 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1298 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
1299 velec = _mm256_mul_pd(velec,sw);
1300 cutoff_mask = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
1302 /* Update potential sum for this i atom from the interaction with this j atom. */
1303 velec = _mm256_and_pd(velec,cutoff_mask);
1304 velec = _mm256_andnot_pd(dummy_mask,velec);
1305 velecsum = _mm256_add_pd(velecsum,velec);
1309 fscal = _mm256_and_pd(fscal,cutoff_mask);
1311 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1313 /* Calculate temporary vectorial force */
1314 tx = _mm256_mul_pd(fscal,dx11);
1315 ty = _mm256_mul_pd(fscal,dy11);
1316 tz = _mm256_mul_pd(fscal,dz11);
1318 /* Update vectorial force */
1319 fix1 = _mm256_add_pd(fix1,tx);
1320 fiy1 = _mm256_add_pd(fiy1,ty);
1321 fiz1 = _mm256_add_pd(fiz1,tz);
1323 fjx1 = _mm256_add_pd(fjx1,tx);
1324 fjy1 = _mm256_add_pd(fjy1,ty);
1325 fjz1 = _mm256_add_pd(fjz1,tz);
1329 /**************************
1330 * CALCULATE INTERACTIONS *
1331 **************************/
1333 if (gmx_mm256_any_lt(rsq12,rcutoff2))
1336 r12 = _mm256_mul_pd(rsq12,rinv12);
1337 r12 = _mm256_andnot_pd(dummy_mask,r12);
1339 /* EWALD ELECTROSTATICS */
1341 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1342 ewrt = _mm256_mul_pd(r12,ewtabscale);
1343 ewitab = _mm256_cvttpd_epi32(ewrt);
1344 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1345 ewitab = _mm_slli_epi32(ewitab,2);
1346 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1347 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1348 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1349 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1350 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1351 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1352 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1353 velec = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
1354 felec = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
1356 d = _mm256_sub_pd(r12,rswitch);
1357 d = _mm256_max_pd(d,_mm256_setzero_pd());
1358 d2 = _mm256_mul_pd(d,d);
1359 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1361 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1363 /* Evaluate switch function */
1364 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1365 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
1366 velec = _mm256_mul_pd(velec,sw);
1367 cutoff_mask = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
1369 /* Update potential sum for this i atom from the interaction with this j atom. */
1370 velec = _mm256_and_pd(velec,cutoff_mask);
1371 velec = _mm256_andnot_pd(dummy_mask,velec);
1372 velecsum = _mm256_add_pd(velecsum,velec);
1376 fscal = _mm256_and_pd(fscal,cutoff_mask);
1378 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1380 /* Calculate temporary vectorial force */
1381 tx = _mm256_mul_pd(fscal,dx12);
1382 ty = _mm256_mul_pd(fscal,dy12);
1383 tz = _mm256_mul_pd(fscal,dz12);
1385 /* Update vectorial force */
1386 fix1 = _mm256_add_pd(fix1,tx);
1387 fiy1 = _mm256_add_pd(fiy1,ty);
1388 fiz1 = _mm256_add_pd(fiz1,tz);
1390 fjx2 = _mm256_add_pd(fjx2,tx);
1391 fjy2 = _mm256_add_pd(fjy2,ty);
1392 fjz2 = _mm256_add_pd(fjz2,tz);
1396 /**************************
1397 * CALCULATE INTERACTIONS *
1398 **************************/
1400 if (gmx_mm256_any_lt(rsq20,rcutoff2))
1403 r20 = _mm256_mul_pd(rsq20,rinv20);
1404 r20 = _mm256_andnot_pd(dummy_mask,r20);
1406 /* EWALD ELECTROSTATICS */
1408 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1409 ewrt = _mm256_mul_pd(r20,ewtabscale);
1410 ewitab = _mm256_cvttpd_epi32(ewrt);
1411 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1412 ewitab = _mm_slli_epi32(ewitab,2);
1413 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1414 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1415 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1416 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1417 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1418 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1419 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1420 velec = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
1421 felec = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
1423 d = _mm256_sub_pd(r20,rswitch);
1424 d = _mm256_max_pd(d,_mm256_setzero_pd());
1425 d2 = _mm256_mul_pd(d,d);
1426 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1428 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1430 /* Evaluate switch function */
1431 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1432 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
1433 velec = _mm256_mul_pd(velec,sw);
1434 cutoff_mask = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1436 /* Update potential sum for this i atom from the interaction with this j atom. */
1437 velec = _mm256_and_pd(velec,cutoff_mask);
1438 velec = _mm256_andnot_pd(dummy_mask,velec);
1439 velecsum = _mm256_add_pd(velecsum,velec);
1443 fscal = _mm256_and_pd(fscal,cutoff_mask);
1445 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1447 /* Calculate temporary vectorial force */
1448 tx = _mm256_mul_pd(fscal,dx20);
1449 ty = _mm256_mul_pd(fscal,dy20);
1450 tz = _mm256_mul_pd(fscal,dz20);
1452 /* Update vectorial force */
1453 fix2 = _mm256_add_pd(fix2,tx);
1454 fiy2 = _mm256_add_pd(fiy2,ty);
1455 fiz2 = _mm256_add_pd(fiz2,tz);
1457 fjx0 = _mm256_add_pd(fjx0,tx);
1458 fjy0 = _mm256_add_pd(fjy0,ty);
1459 fjz0 = _mm256_add_pd(fjz0,tz);
1463 /**************************
1464 * CALCULATE INTERACTIONS *
1465 **************************/
1467 if (gmx_mm256_any_lt(rsq21,rcutoff2))
1470 r21 = _mm256_mul_pd(rsq21,rinv21);
1471 r21 = _mm256_andnot_pd(dummy_mask,r21);
1473 /* EWALD ELECTROSTATICS */
1475 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1476 ewrt = _mm256_mul_pd(r21,ewtabscale);
1477 ewitab = _mm256_cvttpd_epi32(ewrt);
1478 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1479 ewitab = _mm_slli_epi32(ewitab,2);
1480 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1481 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1482 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1483 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1484 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1485 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1486 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1487 velec = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
1488 felec = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
1490 d = _mm256_sub_pd(r21,rswitch);
1491 d = _mm256_max_pd(d,_mm256_setzero_pd());
1492 d2 = _mm256_mul_pd(d,d);
1493 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1495 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1497 /* Evaluate switch function */
1498 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1499 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
1500 velec = _mm256_mul_pd(velec,sw);
1501 cutoff_mask = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
1503 /* Update potential sum for this i atom from the interaction with this j atom. */
1504 velec = _mm256_and_pd(velec,cutoff_mask);
1505 velec = _mm256_andnot_pd(dummy_mask,velec);
1506 velecsum = _mm256_add_pd(velecsum,velec);
1510 fscal = _mm256_and_pd(fscal,cutoff_mask);
1512 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1514 /* Calculate temporary vectorial force */
1515 tx = _mm256_mul_pd(fscal,dx21);
1516 ty = _mm256_mul_pd(fscal,dy21);
1517 tz = _mm256_mul_pd(fscal,dz21);
1519 /* Update vectorial force */
1520 fix2 = _mm256_add_pd(fix2,tx);
1521 fiy2 = _mm256_add_pd(fiy2,ty);
1522 fiz2 = _mm256_add_pd(fiz2,tz);
1524 fjx1 = _mm256_add_pd(fjx1,tx);
1525 fjy1 = _mm256_add_pd(fjy1,ty);
1526 fjz1 = _mm256_add_pd(fjz1,tz);
1530 /**************************
1531 * CALCULATE INTERACTIONS *
1532 **************************/
1534 if (gmx_mm256_any_lt(rsq22,rcutoff2))
1537 r22 = _mm256_mul_pd(rsq22,rinv22);
1538 r22 = _mm256_andnot_pd(dummy_mask,r22);
1540 /* EWALD ELECTROSTATICS */
1542 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1543 ewrt = _mm256_mul_pd(r22,ewtabscale);
1544 ewitab = _mm256_cvttpd_epi32(ewrt);
1545 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1546 ewitab = _mm_slli_epi32(ewitab,2);
1547 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1548 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1549 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1550 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1551 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1552 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1553 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1554 velec = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
1555 felec = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
1557 d = _mm256_sub_pd(r22,rswitch);
1558 d = _mm256_max_pd(d,_mm256_setzero_pd());
1559 d2 = _mm256_mul_pd(d,d);
1560 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1562 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1564 /* Evaluate switch function */
1565 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1566 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
1567 velec = _mm256_mul_pd(velec,sw);
1568 cutoff_mask = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
1570 /* Update potential sum for this i atom from the interaction with this j atom. */
1571 velec = _mm256_and_pd(velec,cutoff_mask);
1572 velec = _mm256_andnot_pd(dummy_mask,velec);
1573 velecsum = _mm256_add_pd(velecsum,velec);
1577 fscal = _mm256_and_pd(fscal,cutoff_mask);
1579 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1581 /* Calculate temporary vectorial force */
1582 tx = _mm256_mul_pd(fscal,dx22);
1583 ty = _mm256_mul_pd(fscal,dy22);
1584 tz = _mm256_mul_pd(fscal,dz22);
1586 /* Update vectorial force */
1587 fix2 = _mm256_add_pd(fix2,tx);
1588 fiy2 = _mm256_add_pd(fiy2,ty);
1589 fiz2 = _mm256_add_pd(fiz2,tz);
1591 fjx2 = _mm256_add_pd(fjx2,tx);
1592 fjy2 = _mm256_add_pd(fjy2,ty);
1593 fjz2 = _mm256_add_pd(fjz2,tz);
1597 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1598 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1599 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1600 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1602 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1603 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1605 /* Inner loop uses 594 flops */
1608 /* End of innermost loop */
1610 gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1611 f+i_coord_offset,fshift+i_shift_offset);
1614 /* Update potential energies */
1615 gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1617 /* Increment number of inner iterations */
1618 inneriter += j_index_end - j_index_start;
1620 /* Outer loop uses 19 flops */
1623 /* Increment number of outer iterations */
1626 /* Update outer/inner flops */
1628 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_VF,outeriter*19 + inneriter*594);
1631 * Gromacs nonbonded kernel: nb_kernel_ElecEwSw_VdwNone_GeomW3W3_F_avx_256_double
1632 * Electrostatics interaction: Ewald
1633 * VdW interaction: None
1634 * Geometry: Water3-Water3
1635 * Calculate force/pot: Force
1638 nb_kernel_ElecEwSw_VdwNone_GeomW3W3_F_avx_256_double
1639 (t_nblist * gmx_restrict nlist,
1640 rvec * gmx_restrict xx,
1641 rvec * gmx_restrict ff,
1642 t_forcerec * gmx_restrict fr,
1643 t_mdatoms * gmx_restrict mdatoms,
1644 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1645 t_nrnb * gmx_restrict nrnb)
1647 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
1648 * just 0 for non-waters.
1649 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1650 * jnr indices corresponding to data put in the four positions in the SIMD register.
1652 int i_shift_offset,i_coord_offset,outeriter,inneriter;
1653 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1654 int jnrA,jnrB,jnrC,jnrD;
1655 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1656 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1657 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1658 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
1659 real rcutoff_scalar;
1660 real *shiftvec,*fshift,*x,*f;
1661 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1662 real scratch[4*DIM];
1663 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1664 real * vdwioffsetptr0;
1665 __m256d ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1666 real * vdwioffsetptr1;
1667 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1668 real * vdwioffsetptr2;
1669 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1670 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
1671 __m256d jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1672 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1673 __m256d jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1674 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1675 __m256d jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1676 __m256d dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1677 __m256d dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1678 __m256d dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1679 __m256d dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1680 __m256d dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1681 __m256d dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1682 __m256d dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1683 __m256d dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1684 __m256d dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1685 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
1688 __m256d ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1689 __m256d beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1691 __m256d rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
1692 real rswitch_scalar,d_scalar;
1693 __m256d dummy_mask,cutoff_mask;
1694 __m128 tmpmask0,tmpmask1;
1695 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1696 __m256d one = _mm256_set1_pd(1.0);
1697 __m256d two = _mm256_set1_pd(2.0);
1703 jindex = nlist->jindex;
1705 shiftidx = nlist->shift;
1707 shiftvec = fr->shift_vec[0];
1708 fshift = fr->fshift[0];
1709 facel = _mm256_set1_pd(fr->epsfac);
1710 charge = mdatoms->chargeA;
1712 sh_ewald = _mm256_set1_pd(fr->ic->sh_ewald);
1713 beta = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
1714 beta2 = _mm256_mul_pd(beta,beta);
1715 beta3 = _mm256_mul_pd(beta,beta2);
1717 ewtab = fr->ic->tabq_coul_FDV0;
1718 ewtabscale = _mm256_set1_pd(fr->ic->tabq_scale);
1719 ewtabhalfspace = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
1721 /* Setup water-specific parameters */
1722 inr = nlist->iinr[0];
1723 iq0 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
1724 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1725 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1727 jq0 = _mm256_set1_pd(charge[inr+0]);
1728 jq1 = _mm256_set1_pd(charge[inr+1]);
1729 jq2 = _mm256_set1_pd(charge[inr+2]);
1730 qq00 = _mm256_mul_pd(iq0,jq0);
1731 qq01 = _mm256_mul_pd(iq0,jq1);
1732 qq02 = _mm256_mul_pd(iq0,jq2);
1733 qq10 = _mm256_mul_pd(iq1,jq0);
1734 qq11 = _mm256_mul_pd(iq1,jq1);
1735 qq12 = _mm256_mul_pd(iq1,jq2);
1736 qq20 = _mm256_mul_pd(iq2,jq0);
1737 qq21 = _mm256_mul_pd(iq2,jq1);
1738 qq22 = _mm256_mul_pd(iq2,jq2);
1740 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1741 rcutoff_scalar = fr->rcoulomb;
1742 rcutoff = _mm256_set1_pd(rcutoff_scalar);
1743 rcutoff2 = _mm256_mul_pd(rcutoff,rcutoff);
1745 rswitch_scalar = fr->rcoulomb_switch;
1746 rswitch = _mm256_set1_pd(rswitch_scalar);
1747 /* Setup switch parameters */
1748 d_scalar = rcutoff_scalar-rswitch_scalar;
1749 d = _mm256_set1_pd(d_scalar);
1750 swV3 = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
1751 swV4 = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1752 swV5 = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1753 swF2 = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
1754 swF3 = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1755 swF4 = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1757 /* Avoid stupid compiler warnings */
1758 jnrA = jnrB = jnrC = jnrD = 0;
1759 j_coord_offsetA = 0;
1760 j_coord_offsetB = 0;
1761 j_coord_offsetC = 0;
1762 j_coord_offsetD = 0;
1767 for(iidx=0;iidx<4*DIM;iidx++)
1769 scratch[iidx] = 0.0;
1772 /* Start outer loop over neighborlists */
1773 for(iidx=0; iidx<nri; iidx++)
1775 /* Load shift vector for this list */
1776 i_shift_offset = DIM*shiftidx[iidx];
1778 /* Load limits for loop over neighbors */
1779 j_index_start = jindex[iidx];
1780 j_index_end = jindex[iidx+1];
1782 /* Get outer coordinate index */
1784 i_coord_offset = DIM*inr;
1786 /* Load i particle coords and add shift vector */
1787 gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1788 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1790 fix0 = _mm256_setzero_pd();
1791 fiy0 = _mm256_setzero_pd();
1792 fiz0 = _mm256_setzero_pd();
1793 fix1 = _mm256_setzero_pd();
1794 fiy1 = _mm256_setzero_pd();
1795 fiz1 = _mm256_setzero_pd();
1796 fix2 = _mm256_setzero_pd();
1797 fiy2 = _mm256_setzero_pd();
1798 fiz2 = _mm256_setzero_pd();
1800 /* Start inner kernel loop */
1801 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1804 /* Get j neighbor index, and coordinate index */
1806 jnrB = jjnr[jidx+1];
1807 jnrC = jjnr[jidx+2];
1808 jnrD = jjnr[jidx+3];
1809 j_coord_offsetA = DIM*jnrA;
1810 j_coord_offsetB = DIM*jnrB;
1811 j_coord_offsetC = DIM*jnrC;
1812 j_coord_offsetD = DIM*jnrD;
1814 /* load j atom coordinates */
1815 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1816 x+j_coord_offsetC,x+j_coord_offsetD,
1817 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1819 /* Calculate displacement vector */
1820 dx00 = _mm256_sub_pd(ix0,jx0);
1821 dy00 = _mm256_sub_pd(iy0,jy0);
1822 dz00 = _mm256_sub_pd(iz0,jz0);
1823 dx01 = _mm256_sub_pd(ix0,jx1);
1824 dy01 = _mm256_sub_pd(iy0,jy1);
1825 dz01 = _mm256_sub_pd(iz0,jz1);
1826 dx02 = _mm256_sub_pd(ix0,jx2);
1827 dy02 = _mm256_sub_pd(iy0,jy2);
1828 dz02 = _mm256_sub_pd(iz0,jz2);
1829 dx10 = _mm256_sub_pd(ix1,jx0);
1830 dy10 = _mm256_sub_pd(iy1,jy0);
1831 dz10 = _mm256_sub_pd(iz1,jz0);
1832 dx11 = _mm256_sub_pd(ix1,jx1);
1833 dy11 = _mm256_sub_pd(iy1,jy1);
1834 dz11 = _mm256_sub_pd(iz1,jz1);
1835 dx12 = _mm256_sub_pd(ix1,jx2);
1836 dy12 = _mm256_sub_pd(iy1,jy2);
1837 dz12 = _mm256_sub_pd(iz1,jz2);
1838 dx20 = _mm256_sub_pd(ix2,jx0);
1839 dy20 = _mm256_sub_pd(iy2,jy0);
1840 dz20 = _mm256_sub_pd(iz2,jz0);
1841 dx21 = _mm256_sub_pd(ix2,jx1);
1842 dy21 = _mm256_sub_pd(iy2,jy1);
1843 dz21 = _mm256_sub_pd(iz2,jz1);
1844 dx22 = _mm256_sub_pd(ix2,jx2);
1845 dy22 = _mm256_sub_pd(iy2,jy2);
1846 dz22 = _mm256_sub_pd(iz2,jz2);
1848 /* Calculate squared distance and things based on it */
1849 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1850 rsq01 = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1851 rsq02 = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1852 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1853 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1854 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1855 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1856 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1857 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1859 rinv00 = gmx_mm256_invsqrt_pd(rsq00);
1860 rinv01 = gmx_mm256_invsqrt_pd(rsq01);
1861 rinv02 = gmx_mm256_invsqrt_pd(rsq02);
1862 rinv10 = gmx_mm256_invsqrt_pd(rsq10);
1863 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
1864 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
1865 rinv20 = gmx_mm256_invsqrt_pd(rsq20);
1866 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
1867 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
1869 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
1870 rinvsq01 = _mm256_mul_pd(rinv01,rinv01);
1871 rinvsq02 = _mm256_mul_pd(rinv02,rinv02);
1872 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
1873 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
1874 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
1875 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
1876 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
1877 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
1879 fjx0 = _mm256_setzero_pd();
1880 fjy0 = _mm256_setzero_pd();
1881 fjz0 = _mm256_setzero_pd();
1882 fjx1 = _mm256_setzero_pd();
1883 fjy1 = _mm256_setzero_pd();
1884 fjz1 = _mm256_setzero_pd();
1885 fjx2 = _mm256_setzero_pd();
1886 fjy2 = _mm256_setzero_pd();
1887 fjz2 = _mm256_setzero_pd();
1889 /**************************
1890 * CALCULATE INTERACTIONS *
1891 **************************/
1893 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1896 r00 = _mm256_mul_pd(rsq00,rinv00);
1898 /* EWALD ELECTROSTATICS */
1900 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1901 ewrt = _mm256_mul_pd(r00,ewtabscale);
1902 ewitab = _mm256_cvttpd_epi32(ewrt);
1903 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1904 ewitab = _mm_slli_epi32(ewitab,2);
1905 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1906 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1907 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1908 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1909 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1910 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1911 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1912 velec = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
1913 felec = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
1915 d = _mm256_sub_pd(r00,rswitch);
1916 d = _mm256_max_pd(d,_mm256_setzero_pd());
1917 d2 = _mm256_mul_pd(d,d);
1918 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1920 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1922 /* Evaluate switch function */
1923 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1924 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
1925 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1929 fscal = _mm256_and_pd(fscal,cutoff_mask);
1931 /* Calculate temporary vectorial force */
1932 tx = _mm256_mul_pd(fscal,dx00);
1933 ty = _mm256_mul_pd(fscal,dy00);
1934 tz = _mm256_mul_pd(fscal,dz00);
1936 /* Update vectorial force */
1937 fix0 = _mm256_add_pd(fix0,tx);
1938 fiy0 = _mm256_add_pd(fiy0,ty);
1939 fiz0 = _mm256_add_pd(fiz0,tz);
1941 fjx0 = _mm256_add_pd(fjx0,tx);
1942 fjy0 = _mm256_add_pd(fjy0,ty);
1943 fjz0 = _mm256_add_pd(fjz0,tz);
1947 /**************************
1948 * CALCULATE INTERACTIONS *
1949 **************************/
1951 if (gmx_mm256_any_lt(rsq01,rcutoff2))
1954 r01 = _mm256_mul_pd(rsq01,rinv01);
1956 /* EWALD ELECTROSTATICS */
1958 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1959 ewrt = _mm256_mul_pd(r01,ewtabscale);
1960 ewitab = _mm256_cvttpd_epi32(ewrt);
1961 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1962 ewitab = _mm_slli_epi32(ewitab,2);
1963 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1964 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1965 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1966 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1967 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1968 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1969 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1970 velec = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
1971 felec = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
1973 d = _mm256_sub_pd(r01,rswitch);
1974 d = _mm256_max_pd(d,_mm256_setzero_pd());
1975 d2 = _mm256_mul_pd(d,d);
1976 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1978 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1980 /* Evaluate switch function */
1981 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1982 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
1983 cutoff_mask = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
1987 fscal = _mm256_and_pd(fscal,cutoff_mask);
1989 /* Calculate temporary vectorial force */
1990 tx = _mm256_mul_pd(fscal,dx01);
1991 ty = _mm256_mul_pd(fscal,dy01);
1992 tz = _mm256_mul_pd(fscal,dz01);
1994 /* Update vectorial force */
1995 fix0 = _mm256_add_pd(fix0,tx);
1996 fiy0 = _mm256_add_pd(fiy0,ty);
1997 fiz0 = _mm256_add_pd(fiz0,tz);
1999 fjx1 = _mm256_add_pd(fjx1,tx);
2000 fjy1 = _mm256_add_pd(fjy1,ty);
2001 fjz1 = _mm256_add_pd(fjz1,tz);
2005 /**************************
2006 * CALCULATE INTERACTIONS *
2007 **************************/
2009 if (gmx_mm256_any_lt(rsq02,rcutoff2))
2012 r02 = _mm256_mul_pd(rsq02,rinv02);
2014 /* EWALD ELECTROSTATICS */
2016 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2017 ewrt = _mm256_mul_pd(r02,ewtabscale);
2018 ewitab = _mm256_cvttpd_epi32(ewrt);
2019 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2020 ewitab = _mm_slli_epi32(ewitab,2);
2021 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2022 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2023 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2024 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2025 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2026 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2027 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2028 velec = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
2029 felec = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
2031 d = _mm256_sub_pd(r02,rswitch);
2032 d = _mm256_max_pd(d,_mm256_setzero_pd());
2033 d2 = _mm256_mul_pd(d,d);
2034 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2036 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2038 /* Evaluate switch function */
2039 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2040 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
2041 cutoff_mask = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
2045 fscal = _mm256_and_pd(fscal,cutoff_mask);
2047 /* Calculate temporary vectorial force */
2048 tx = _mm256_mul_pd(fscal,dx02);
2049 ty = _mm256_mul_pd(fscal,dy02);
2050 tz = _mm256_mul_pd(fscal,dz02);
2052 /* Update vectorial force */
2053 fix0 = _mm256_add_pd(fix0,tx);
2054 fiy0 = _mm256_add_pd(fiy0,ty);
2055 fiz0 = _mm256_add_pd(fiz0,tz);
2057 fjx2 = _mm256_add_pd(fjx2,tx);
2058 fjy2 = _mm256_add_pd(fjy2,ty);
2059 fjz2 = _mm256_add_pd(fjz2,tz);
2063 /**************************
2064 * CALCULATE INTERACTIONS *
2065 **************************/
2067 if (gmx_mm256_any_lt(rsq10,rcutoff2))
2070 r10 = _mm256_mul_pd(rsq10,rinv10);
2072 /* EWALD ELECTROSTATICS */
2074 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2075 ewrt = _mm256_mul_pd(r10,ewtabscale);
2076 ewitab = _mm256_cvttpd_epi32(ewrt);
2077 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2078 ewitab = _mm_slli_epi32(ewitab,2);
2079 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2080 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2081 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2082 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2083 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2084 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2085 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2086 velec = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
2087 felec = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
2089 d = _mm256_sub_pd(r10,rswitch);
2090 d = _mm256_max_pd(d,_mm256_setzero_pd());
2091 d2 = _mm256_mul_pd(d,d);
2092 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2094 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2096 /* Evaluate switch function */
2097 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2098 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
2099 cutoff_mask = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
2103 fscal = _mm256_and_pd(fscal,cutoff_mask);
2105 /* Calculate temporary vectorial force */
2106 tx = _mm256_mul_pd(fscal,dx10);
2107 ty = _mm256_mul_pd(fscal,dy10);
2108 tz = _mm256_mul_pd(fscal,dz10);
2110 /* Update vectorial force */
2111 fix1 = _mm256_add_pd(fix1,tx);
2112 fiy1 = _mm256_add_pd(fiy1,ty);
2113 fiz1 = _mm256_add_pd(fiz1,tz);
2115 fjx0 = _mm256_add_pd(fjx0,tx);
2116 fjy0 = _mm256_add_pd(fjy0,ty);
2117 fjz0 = _mm256_add_pd(fjz0,tz);
2121 /**************************
2122 * CALCULATE INTERACTIONS *
2123 **************************/
2125 if (gmx_mm256_any_lt(rsq11,rcutoff2))
2128 r11 = _mm256_mul_pd(rsq11,rinv11);
2130 /* EWALD ELECTROSTATICS */
2132 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2133 ewrt = _mm256_mul_pd(r11,ewtabscale);
2134 ewitab = _mm256_cvttpd_epi32(ewrt);
2135 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2136 ewitab = _mm_slli_epi32(ewitab,2);
2137 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2138 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2139 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2140 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2141 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2142 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2143 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2144 velec = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
2145 felec = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
2147 d = _mm256_sub_pd(r11,rswitch);
2148 d = _mm256_max_pd(d,_mm256_setzero_pd());
2149 d2 = _mm256_mul_pd(d,d);
2150 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2152 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2154 /* Evaluate switch function */
2155 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2156 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
2157 cutoff_mask = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
2161 fscal = _mm256_and_pd(fscal,cutoff_mask);
2163 /* Calculate temporary vectorial force */
2164 tx = _mm256_mul_pd(fscal,dx11);
2165 ty = _mm256_mul_pd(fscal,dy11);
2166 tz = _mm256_mul_pd(fscal,dz11);
2168 /* Update vectorial force */
2169 fix1 = _mm256_add_pd(fix1,tx);
2170 fiy1 = _mm256_add_pd(fiy1,ty);
2171 fiz1 = _mm256_add_pd(fiz1,tz);
2173 fjx1 = _mm256_add_pd(fjx1,tx);
2174 fjy1 = _mm256_add_pd(fjy1,ty);
2175 fjz1 = _mm256_add_pd(fjz1,tz);
2179 /**************************
2180 * CALCULATE INTERACTIONS *
2181 **************************/
2183 if (gmx_mm256_any_lt(rsq12,rcutoff2))
2186 r12 = _mm256_mul_pd(rsq12,rinv12);
2188 /* EWALD ELECTROSTATICS */
2190 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2191 ewrt = _mm256_mul_pd(r12,ewtabscale);
2192 ewitab = _mm256_cvttpd_epi32(ewrt);
2193 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2194 ewitab = _mm_slli_epi32(ewitab,2);
2195 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2196 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2197 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2198 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2199 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2200 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2201 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2202 velec = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
2203 felec = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
2205 d = _mm256_sub_pd(r12,rswitch);
2206 d = _mm256_max_pd(d,_mm256_setzero_pd());
2207 d2 = _mm256_mul_pd(d,d);
2208 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2210 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2212 /* Evaluate switch function */
2213 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2214 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
2215 cutoff_mask = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2219 fscal = _mm256_and_pd(fscal,cutoff_mask);
2221 /* Calculate temporary vectorial force */
2222 tx = _mm256_mul_pd(fscal,dx12);
2223 ty = _mm256_mul_pd(fscal,dy12);
2224 tz = _mm256_mul_pd(fscal,dz12);
2226 /* Update vectorial force */
2227 fix1 = _mm256_add_pd(fix1,tx);
2228 fiy1 = _mm256_add_pd(fiy1,ty);
2229 fiz1 = _mm256_add_pd(fiz1,tz);
2231 fjx2 = _mm256_add_pd(fjx2,tx);
2232 fjy2 = _mm256_add_pd(fjy2,ty);
2233 fjz2 = _mm256_add_pd(fjz2,tz);
2237 /**************************
2238 * CALCULATE INTERACTIONS *
2239 **************************/
2241 if (gmx_mm256_any_lt(rsq20,rcutoff2))
2244 r20 = _mm256_mul_pd(rsq20,rinv20);
2246 /* EWALD ELECTROSTATICS */
2248 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2249 ewrt = _mm256_mul_pd(r20,ewtabscale);
2250 ewitab = _mm256_cvttpd_epi32(ewrt);
2251 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2252 ewitab = _mm_slli_epi32(ewitab,2);
2253 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2254 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2255 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2256 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2257 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2258 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2259 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2260 velec = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
2261 felec = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
2263 d = _mm256_sub_pd(r20,rswitch);
2264 d = _mm256_max_pd(d,_mm256_setzero_pd());
2265 d2 = _mm256_mul_pd(d,d);
2266 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2268 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2270 /* Evaluate switch function */
2271 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2272 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
2273 cutoff_mask = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
2277 fscal = _mm256_and_pd(fscal,cutoff_mask);
2279 /* Calculate temporary vectorial force */
2280 tx = _mm256_mul_pd(fscal,dx20);
2281 ty = _mm256_mul_pd(fscal,dy20);
2282 tz = _mm256_mul_pd(fscal,dz20);
2284 /* Update vectorial force */
2285 fix2 = _mm256_add_pd(fix2,tx);
2286 fiy2 = _mm256_add_pd(fiy2,ty);
2287 fiz2 = _mm256_add_pd(fiz2,tz);
2289 fjx0 = _mm256_add_pd(fjx0,tx);
2290 fjy0 = _mm256_add_pd(fjy0,ty);
2291 fjz0 = _mm256_add_pd(fjz0,tz);
2295 /**************************
2296 * CALCULATE INTERACTIONS *
2297 **************************/
2299 if (gmx_mm256_any_lt(rsq21,rcutoff2))
2302 r21 = _mm256_mul_pd(rsq21,rinv21);
2304 /* EWALD ELECTROSTATICS */
2306 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2307 ewrt = _mm256_mul_pd(r21,ewtabscale);
2308 ewitab = _mm256_cvttpd_epi32(ewrt);
2309 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2310 ewitab = _mm_slli_epi32(ewitab,2);
2311 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2312 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2313 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2314 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2315 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2316 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2317 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2318 velec = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
2319 felec = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
2321 d = _mm256_sub_pd(r21,rswitch);
2322 d = _mm256_max_pd(d,_mm256_setzero_pd());
2323 d2 = _mm256_mul_pd(d,d);
2324 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2326 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2328 /* Evaluate switch function */
2329 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2330 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
2331 cutoff_mask = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
2335 fscal = _mm256_and_pd(fscal,cutoff_mask);
2337 /* Calculate temporary vectorial force */
2338 tx = _mm256_mul_pd(fscal,dx21);
2339 ty = _mm256_mul_pd(fscal,dy21);
2340 tz = _mm256_mul_pd(fscal,dz21);
2342 /* Update vectorial force */
2343 fix2 = _mm256_add_pd(fix2,tx);
2344 fiy2 = _mm256_add_pd(fiy2,ty);
2345 fiz2 = _mm256_add_pd(fiz2,tz);
2347 fjx1 = _mm256_add_pd(fjx1,tx);
2348 fjy1 = _mm256_add_pd(fjy1,ty);
2349 fjz1 = _mm256_add_pd(fjz1,tz);
2353 /**************************
2354 * CALCULATE INTERACTIONS *
2355 **************************/
2357 if (gmx_mm256_any_lt(rsq22,rcutoff2))
2360 r22 = _mm256_mul_pd(rsq22,rinv22);
2362 /* EWALD ELECTROSTATICS */
2364 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2365 ewrt = _mm256_mul_pd(r22,ewtabscale);
2366 ewitab = _mm256_cvttpd_epi32(ewrt);
2367 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2368 ewitab = _mm_slli_epi32(ewitab,2);
2369 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2370 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2371 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2372 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2373 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2374 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2375 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2376 velec = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
2377 felec = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
2379 d = _mm256_sub_pd(r22,rswitch);
2380 d = _mm256_max_pd(d,_mm256_setzero_pd());
2381 d2 = _mm256_mul_pd(d,d);
2382 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2384 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2386 /* Evaluate switch function */
2387 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2388 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
2389 cutoff_mask = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
2393 fscal = _mm256_and_pd(fscal,cutoff_mask);
2395 /* Calculate temporary vectorial force */
2396 tx = _mm256_mul_pd(fscal,dx22);
2397 ty = _mm256_mul_pd(fscal,dy22);
2398 tz = _mm256_mul_pd(fscal,dz22);
2400 /* Update vectorial force */
2401 fix2 = _mm256_add_pd(fix2,tx);
2402 fiy2 = _mm256_add_pd(fiy2,ty);
2403 fiz2 = _mm256_add_pd(fiz2,tz);
2405 fjx2 = _mm256_add_pd(fjx2,tx);
2406 fjy2 = _mm256_add_pd(fjy2,ty);
2407 fjz2 = _mm256_add_pd(fjz2,tz);
2411 fjptrA = f+j_coord_offsetA;
2412 fjptrB = f+j_coord_offsetB;
2413 fjptrC = f+j_coord_offsetC;
2414 fjptrD = f+j_coord_offsetD;
2416 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
2417 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2419 /* Inner loop uses 558 flops */
2422 if(jidx<j_index_end)
2425 /* Get j neighbor index, and coordinate index */
2426 jnrlistA = jjnr[jidx];
2427 jnrlistB = jjnr[jidx+1];
2428 jnrlistC = jjnr[jidx+2];
2429 jnrlistD = jjnr[jidx+3];
2430 /* Sign of each element will be negative for non-real atoms.
2431 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2432 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
2434 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
2436 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
2437 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
2438 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
2440 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
2441 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
2442 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
2443 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
2444 j_coord_offsetA = DIM*jnrA;
2445 j_coord_offsetB = DIM*jnrB;
2446 j_coord_offsetC = DIM*jnrC;
2447 j_coord_offsetD = DIM*jnrD;
2449 /* load j atom coordinates */
2450 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
2451 x+j_coord_offsetC,x+j_coord_offsetD,
2452 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
2454 /* Calculate displacement vector */
2455 dx00 = _mm256_sub_pd(ix0,jx0);
2456 dy00 = _mm256_sub_pd(iy0,jy0);
2457 dz00 = _mm256_sub_pd(iz0,jz0);
2458 dx01 = _mm256_sub_pd(ix0,jx1);
2459 dy01 = _mm256_sub_pd(iy0,jy1);
2460 dz01 = _mm256_sub_pd(iz0,jz1);
2461 dx02 = _mm256_sub_pd(ix0,jx2);
2462 dy02 = _mm256_sub_pd(iy0,jy2);
2463 dz02 = _mm256_sub_pd(iz0,jz2);
2464 dx10 = _mm256_sub_pd(ix1,jx0);
2465 dy10 = _mm256_sub_pd(iy1,jy0);
2466 dz10 = _mm256_sub_pd(iz1,jz0);
2467 dx11 = _mm256_sub_pd(ix1,jx1);
2468 dy11 = _mm256_sub_pd(iy1,jy1);
2469 dz11 = _mm256_sub_pd(iz1,jz1);
2470 dx12 = _mm256_sub_pd(ix1,jx2);
2471 dy12 = _mm256_sub_pd(iy1,jy2);
2472 dz12 = _mm256_sub_pd(iz1,jz2);
2473 dx20 = _mm256_sub_pd(ix2,jx0);
2474 dy20 = _mm256_sub_pd(iy2,jy0);
2475 dz20 = _mm256_sub_pd(iz2,jz0);
2476 dx21 = _mm256_sub_pd(ix2,jx1);
2477 dy21 = _mm256_sub_pd(iy2,jy1);
2478 dz21 = _mm256_sub_pd(iz2,jz1);
2479 dx22 = _mm256_sub_pd(ix2,jx2);
2480 dy22 = _mm256_sub_pd(iy2,jy2);
2481 dz22 = _mm256_sub_pd(iz2,jz2);
2483 /* Calculate squared distance and things based on it */
2484 rsq00 = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
2485 rsq01 = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
2486 rsq02 = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
2487 rsq10 = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
2488 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
2489 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
2490 rsq20 = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
2491 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
2492 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
2494 rinv00 = gmx_mm256_invsqrt_pd(rsq00);
2495 rinv01 = gmx_mm256_invsqrt_pd(rsq01);
2496 rinv02 = gmx_mm256_invsqrt_pd(rsq02);
2497 rinv10 = gmx_mm256_invsqrt_pd(rsq10);
2498 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
2499 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
2500 rinv20 = gmx_mm256_invsqrt_pd(rsq20);
2501 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
2502 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
2504 rinvsq00 = _mm256_mul_pd(rinv00,rinv00);
2505 rinvsq01 = _mm256_mul_pd(rinv01,rinv01);
2506 rinvsq02 = _mm256_mul_pd(rinv02,rinv02);
2507 rinvsq10 = _mm256_mul_pd(rinv10,rinv10);
2508 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
2509 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
2510 rinvsq20 = _mm256_mul_pd(rinv20,rinv20);
2511 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
2512 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
2514 fjx0 = _mm256_setzero_pd();
2515 fjy0 = _mm256_setzero_pd();
2516 fjz0 = _mm256_setzero_pd();
2517 fjx1 = _mm256_setzero_pd();
2518 fjy1 = _mm256_setzero_pd();
2519 fjz1 = _mm256_setzero_pd();
2520 fjx2 = _mm256_setzero_pd();
2521 fjy2 = _mm256_setzero_pd();
2522 fjz2 = _mm256_setzero_pd();
2524 /**************************
2525 * CALCULATE INTERACTIONS *
2526 **************************/
2528 if (gmx_mm256_any_lt(rsq00,rcutoff2))
2531 r00 = _mm256_mul_pd(rsq00,rinv00);
2532 r00 = _mm256_andnot_pd(dummy_mask,r00);
2534 /* EWALD ELECTROSTATICS */
2536 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2537 ewrt = _mm256_mul_pd(r00,ewtabscale);
2538 ewitab = _mm256_cvttpd_epi32(ewrt);
2539 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2540 ewitab = _mm_slli_epi32(ewitab,2);
2541 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2542 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2543 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2544 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2545 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2546 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2547 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2548 velec = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
2549 felec = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
2551 d = _mm256_sub_pd(r00,rswitch);
2552 d = _mm256_max_pd(d,_mm256_setzero_pd());
2553 d2 = _mm256_mul_pd(d,d);
2554 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2556 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2558 /* Evaluate switch function */
2559 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2560 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
2561 cutoff_mask = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
2565 fscal = _mm256_and_pd(fscal,cutoff_mask);
2567 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2569 /* Calculate temporary vectorial force */
2570 tx = _mm256_mul_pd(fscal,dx00);
2571 ty = _mm256_mul_pd(fscal,dy00);
2572 tz = _mm256_mul_pd(fscal,dz00);
2574 /* Update vectorial force */
2575 fix0 = _mm256_add_pd(fix0,tx);
2576 fiy0 = _mm256_add_pd(fiy0,ty);
2577 fiz0 = _mm256_add_pd(fiz0,tz);
2579 fjx0 = _mm256_add_pd(fjx0,tx);
2580 fjy0 = _mm256_add_pd(fjy0,ty);
2581 fjz0 = _mm256_add_pd(fjz0,tz);
2585 /**************************
2586 * CALCULATE INTERACTIONS *
2587 **************************/
2589 if (gmx_mm256_any_lt(rsq01,rcutoff2))
2592 r01 = _mm256_mul_pd(rsq01,rinv01);
2593 r01 = _mm256_andnot_pd(dummy_mask,r01);
2595 /* EWALD ELECTROSTATICS */
2597 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2598 ewrt = _mm256_mul_pd(r01,ewtabscale);
2599 ewitab = _mm256_cvttpd_epi32(ewrt);
2600 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2601 ewitab = _mm_slli_epi32(ewitab,2);
2602 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2603 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2604 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2605 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2606 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2607 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2608 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2609 velec = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
2610 felec = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
2612 d = _mm256_sub_pd(r01,rswitch);
2613 d = _mm256_max_pd(d,_mm256_setzero_pd());
2614 d2 = _mm256_mul_pd(d,d);
2615 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2617 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2619 /* Evaluate switch function */
2620 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2621 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
2622 cutoff_mask = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
2626 fscal = _mm256_and_pd(fscal,cutoff_mask);
2628 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2630 /* Calculate temporary vectorial force */
2631 tx = _mm256_mul_pd(fscal,dx01);
2632 ty = _mm256_mul_pd(fscal,dy01);
2633 tz = _mm256_mul_pd(fscal,dz01);
2635 /* Update vectorial force */
2636 fix0 = _mm256_add_pd(fix0,tx);
2637 fiy0 = _mm256_add_pd(fiy0,ty);
2638 fiz0 = _mm256_add_pd(fiz0,tz);
2640 fjx1 = _mm256_add_pd(fjx1,tx);
2641 fjy1 = _mm256_add_pd(fjy1,ty);
2642 fjz1 = _mm256_add_pd(fjz1,tz);
2646 /**************************
2647 * CALCULATE INTERACTIONS *
2648 **************************/
2650 if (gmx_mm256_any_lt(rsq02,rcutoff2))
2653 r02 = _mm256_mul_pd(rsq02,rinv02);
2654 r02 = _mm256_andnot_pd(dummy_mask,r02);
2656 /* EWALD ELECTROSTATICS */
2658 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2659 ewrt = _mm256_mul_pd(r02,ewtabscale);
2660 ewitab = _mm256_cvttpd_epi32(ewrt);
2661 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2662 ewitab = _mm_slli_epi32(ewitab,2);
2663 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2664 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2665 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2666 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2667 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2668 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2669 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2670 velec = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
2671 felec = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
2673 d = _mm256_sub_pd(r02,rswitch);
2674 d = _mm256_max_pd(d,_mm256_setzero_pd());
2675 d2 = _mm256_mul_pd(d,d);
2676 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2678 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2680 /* Evaluate switch function */
2681 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2682 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
2683 cutoff_mask = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
2687 fscal = _mm256_and_pd(fscal,cutoff_mask);
2689 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2691 /* Calculate temporary vectorial force */
2692 tx = _mm256_mul_pd(fscal,dx02);
2693 ty = _mm256_mul_pd(fscal,dy02);
2694 tz = _mm256_mul_pd(fscal,dz02);
2696 /* Update vectorial force */
2697 fix0 = _mm256_add_pd(fix0,tx);
2698 fiy0 = _mm256_add_pd(fiy0,ty);
2699 fiz0 = _mm256_add_pd(fiz0,tz);
2701 fjx2 = _mm256_add_pd(fjx2,tx);
2702 fjy2 = _mm256_add_pd(fjy2,ty);
2703 fjz2 = _mm256_add_pd(fjz2,tz);
2707 /**************************
2708 * CALCULATE INTERACTIONS *
2709 **************************/
2711 if (gmx_mm256_any_lt(rsq10,rcutoff2))
2714 r10 = _mm256_mul_pd(rsq10,rinv10);
2715 r10 = _mm256_andnot_pd(dummy_mask,r10);
2717 /* EWALD ELECTROSTATICS */
2719 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2720 ewrt = _mm256_mul_pd(r10,ewtabscale);
2721 ewitab = _mm256_cvttpd_epi32(ewrt);
2722 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2723 ewitab = _mm_slli_epi32(ewitab,2);
2724 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2725 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2726 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2727 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2728 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2729 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2730 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2731 velec = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
2732 felec = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
2734 d = _mm256_sub_pd(r10,rswitch);
2735 d = _mm256_max_pd(d,_mm256_setzero_pd());
2736 d2 = _mm256_mul_pd(d,d);
2737 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2739 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2741 /* Evaluate switch function */
2742 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2743 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
2744 cutoff_mask = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
2748 fscal = _mm256_and_pd(fscal,cutoff_mask);
2750 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2752 /* Calculate temporary vectorial force */
2753 tx = _mm256_mul_pd(fscal,dx10);
2754 ty = _mm256_mul_pd(fscal,dy10);
2755 tz = _mm256_mul_pd(fscal,dz10);
2757 /* Update vectorial force */
2758 fix1 = _mm256_add_pd(fix1,tx);
2759 fiy1 = _mm256_add_pd(fiy1,ty);
2760 fiz1 = _mm256_add_pd(fiz1,tz);
2762 fjx0 = _mm256_add_pd(fjx0,tx);
2763 fjy0 = _mm256_add_pd(fjy0,ty);
2764 fjz0 = _mm256_add_pd(fjz0,tz);
2768 /**************************
2769 * CALCULATE INTERACTIONS *
2770 **************************/
2772 if (gmx_mm256_any_lt(rsq11,rcutoff2))
2775 r11 = _mm256_mul_pd(rsq11,rinv11);
2776 r11 = _mm256_andnot_pd(dummy_mask,r11);
2778 /* EWALD ELECTROSTATICS */
2780 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2781 ewrt = _mm256_mul_pd(r11,ewtabscale);
2782 ewitab = _mm256_cvttpd_epi32(ewrt);
2783 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2784 ewitab = _mm_slli_epi32(ewitab,2);
2785 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2786 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2787 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2788 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2789 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2790 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2791 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2792 velec = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
2793 felec = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
2795 d = _mm256_sub_pd(r11,rswitch);
2796 d = _mm256_max_pd(d,_mm256_setzero_pd());
2797 d2 = _mm256_mul_pd(d,d);
2798 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2800 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2802 /* Evaluate switch function */
2803 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2804 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
2805 cutoff_mask = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
2809 fscal = _mm256_and_pd(fscal,cutoff_mask);
2811 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2813 /* Calculate temporary vectorial force */
2814 tx = _mm256_mul_pd(fscal,dx11);
2815 ty = _mm256_mul_pd(fscal,dy11);
2816 tz = _mm256_mul_pd(fscal,dz11);
2818 /* Update vectorial force */
2819 fix1 = _mm256_add_pd(fix1,tx);
2820 fiy1 = _mm256_add_pd(fiy1,ty);
2821 fiz1 = _mm256_add_pd(fiz1,tz);
2823 fjx1 = _mm256_add_pd(fjx1,tx);
2824 fjy1 = _mm256_add_pd(fjy1,ty);
2825 fjz1 = _mm256_add_pd(fjz1,tz);
2829 /**************************
2830 * CALCULATE INTERACTIONS *
2831 **************************/
2833 if (gmx_mm256_any_lt(rsq12,rcutoff2))
2836 r12 = _mm256_mul_pd(rsq12,rinv12);
2837 r12 = _mm256_andnot_pd(dummy_mask,r12);
2839 /* EWALD ELECTROSTATICS */
2841 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2842 ewrt = _mm256_mul_pd(r12,ewtabscale);
2843 ewitab = _mm256_cvttpd_epi32(ewrt);
2844 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2845 ewitab = _mm_slli_epi32(ewitab,2);
2846 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2847 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2848 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2849 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2850 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2851 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2852 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2853 velec = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
2854 felec = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
2856 d = _mm256_sub_pd(r12,rswitch);
2857 d = _mm256_max_pd(d,_mm256_setzero_pd());
2858 d2 = _mm256_mul_pd(d,d);
2859 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2861 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2863 /* Evaluate switch function */
2864 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2865 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
2866 cutoff_mask = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2870 fscal = _mm256_and_pd(fscal,cutoff_mask);
2872 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2874 /* Calculate temporary vectorial force */
2875 tx = _mm256_mul_pd(fscal,dx12);
2876 ty = _mm256_mul_pd(fscal,dy12);
2877 tz = _mm256_mul_pd(fscal,dz12);
2879 /* Update vectorial force */
2880 fix1 = _mm256_add_pd(fix1,tx);
2881 fiy1 = _mm256_add_pd(fiy1,ty);
2882 fiz1 = _mm256_add_pd(fiz1,tz);
2884 fjx2 = _mm256_add_pd(fjx2,tx);
2885 fjy2 = _mm256_add_pd(fjy2,ty);
2886 fjz2 = _mm256_add_pd(fjz2,tz);
2890 /**************************
2891 * CALCULATE INTERACTIONS *
2892 **************************/
2894 if (gmx_mm256_any_lt(rsq20,rcutoff2))
2897 r20 = _mm256_mul_pd(rsq20,rinv20);
2898 r20 = _mm256_andnot_pd(dummy_mask,r20);
2900 /* EWALD ELECTROSTATICS */
2902 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2903 ewrt = _mm256_mul_pd(r20,ewtabscale);
2904 ewitab = _mm256_cvttpd_epi32(ewrt);
2905 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2906 ewitab = _mm_slli_epi32(ewitab,2);
2907 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2908 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2909 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2910 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2911 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2912 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2913 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2914 velec = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
2915 felec = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
2917 d = _mm256_sub_pd(r20,rswitch);
2918 d = _mm256_max_pd(d,_mm256_setzero_pd());
2919 d2 = _mm256_mul_pd(d,d);
2920 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2922 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2924 /* Evaluate switch function */
2925 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2926 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
2927 cutoff_mask = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
2931 fscal = _mm256_and_pd(fscal,cutoff_mask);
2933 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2935 /* Calculate temporary vectorial force */
2936 tx = _mm256_mul_pd(fscal,dx20);
2937 ty = _mm256_mul_pd(fscal,dy20);
2938 tz = _mm256_mul_pd(fscal,dz20);
2940 /* Update vectorial force */
2941 fix2 = _mm256_add_pd(fix2,tx);
2942 fiy2 = _mm256_add_pd(fiy2,ty);
2943 fiz2 = _mm256_add_pd(fiz2,tz);
2945 fjx0 = _mm256_add_pd(fjx0,tx);
2946 fjy0 = _mm256_add_pd(fjy0,ty);
2947 fjz0 = _mm256_add_pd(fjz0,tz);
2951 /**************************
2952 * CALCULATE INTERACTIONS *
2953 **************************/
2955 if (gmx_mm256_any_lt(rsq21,rcutoff2))
2958 r21 = _mm256_mul_pd(rsq21,rinv21);
2959 r21 = _mm256_andnot_pd(dummy_mask,r21);
2961 /* EWALD ELECTROSTATICS */
2963 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2964 ewrt = _mm256_mul_pd(r21,ewtabscale);
2965 ewitab = _mm256_cvttpd_epi32(ewrt);
2966 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2967 ewitab = _mm_slli_epi32(ewitab,2);
2968 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2969 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2970 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2971 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2972 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2973 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2974 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2975 velec = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
2976 felec = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
2978 d = _mm256_sub_pd(r21,rswitch);
2979 d = _mm256_max_pd(d,_mm256_setzero_pd());
2980 d2 = _mm256_mul_pd(d,d);
2981 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2983 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2985 /* Evaluate switch function */
2986 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2987 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
2988 cutoff_mask = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
2992 fscal = _mm256_and_pd(fscal,cutoff_mask);
2994 fscal = _mm256_andnot_pd(dummy_mask,fscal);
2996 /* Calculate temporary vectorial force */
2997 tx = _mm256_mul_pd(fscal,dx21);
2998 ty = _mm256_mul_pd(fscal,dy21);
2999 tz = _mm256_mul_pd(fscal,dz21);
3001 /* Update vectorial force */
3002 fix2 = _mm256_add_pd(fix2,tx);
3003 fiy2 = _mm256_add_pd(fiy2,ty);
3004 fiz2 = _mm256_add_pd(fiz2,tz);
3006 fjx1 = _mm256_add_pd(fjx1,tx);
3007 fjy1 = _mm256_add_pd(fjy1,ty);
3008 fjz1 = _mm256_add_pd(fjz1,tz);
3012 /**************************
3013 * CALCULATE INTERACTIONS *
3014 **************************/
3016 if (gmx_mm256_any_lt(rsq22,rcutoff2))
3019 r22 = _mm256_mul_pd(rsq22,rinv22);
3020 r22 = _mm256_andnot_pd(dummy_mask,r22);
3022 /* EWALD ELECTROSTATICS */
3024 /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
3025 ewrt = _mm256_mul_pd(r22,ewtabscale);
3026 ewitab = _mm256_cvttpd_epi32(ewrt);
3027 eweps = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
3028 ewitab = _mm_slli_epi32(ewitab,2);
3029 ewtabF = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
3030 ewtabD = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
3031 ewtabV = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
3032 ewtabFn = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
3033 GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
3034 felec = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
3035 velec = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
3036 velec = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
3037 felec = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
3039 d = _mm256_sub_pd(r22,rswitch);
3040 d = _mm256_max_pd(d,_mm256_setzero_pd());
3041 d2 = _mm256_mul_pd(d,d);
3042 sw = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
3044 dsw = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
3046 /* Evaluate switch function */
3047 /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3048 felec = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
3049 cutoff_mask = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
3053 fscal = _mm256_and_pd(fscal,cutoff_mask);
3055 fscal = _mm256_andnot_pd(dummy_mask,fscal);
3057 /* Calculate temporary vectorial force */
3058 tx = _mm256_mul_pd(fscal,dx22);
3059 ty = _mm256_mul_pd(fscal,dy22);
3060 tz = _mm256_mul_pd(fscal,dz22);
3062 /* Update vectorial force */
3063 fix2 = _mm256_add_pd(fix2,tx);
3064 fiy2 = _mm256_add_pd(fiy2,ty);
3065 fiz2 = _mm256_add_pd(fiz2,tz);
3067 fjx2 = _mm256_add_pd(fjx2,tx);
3068 fjy2 = _mm256_add_pd(fjy2,ty);
3069 fjz2 = _mm256_add_pd(fjz2,tz);
3073 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
3074 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
3075 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
3076 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
3078 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
3079 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
3081 /* Inner loop uses 567 flops */
3084 /* End of innermost loop */
3086 gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
3087 f+i_coord_offset,fshift+i_shift_offset);
3089 /* Increment number of inner iterations */
3090 inneriter += j_index_end - j_index_start;
3092 /* Outer loop uses 18 flops */
3095 /* Increment number of outer iterations */
3098 /* Update outer/inner flops */
3100 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W3W3_F,outeriter*18 + inneriter*567);