2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_single kernel generator.
44 #include "../nb_kernel.h"
45 #include "types/simple.h"
49 #include "gromacs/simd/math_x86_avx_256_single.h"
50 #include "kernelutil_x86_avx_256_single.h"
53 * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_VF_avx_256_single
54 * Electrostatics interaction: ReactionField
55 * VdW interaction: LennardJones
56 * Geometry: Water3-Water3
57 * Calculate force/pot: PotentialAndForce
60 nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_VF_avx_256_single
61 (t_nblist * gmx_restrict nlist,
62 rvec * gmx_restrict xx,
63 rvec * gmx_restrict ff,
64 t_forcerec * gmx_restrict fr,
65 t_mdatoms * gmx_restrict mdatoms,
66 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67 t_nrnb * gmx_restrict nrnb)
69 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
70 * just 0 for non-waters.
71 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
72 * jnr indices corresponding to data put in the four positions in the SIMD register.
74 int i_shift_offset,i_coord_offset,outeriter,inneriter;
75 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76 int jnrA,jnrB,jnrC,jnrD;
77 int jnrE,jnrF,jnrG,jnrH;
78 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
79 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
80 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
81 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
82 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
84 real *shiftvec,*fshift,*x,*f;
85 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
87 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
88 real * vdwioffsetptr0;
89 __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
90 real * vdwioffsetptr1;
91 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
92 real * vdwioffsetptr2;
93 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
94 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
95 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
96 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
97 __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
98 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
99 __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
100 __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
101 __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
102 __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
103 __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
104 __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
105 __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
106 __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
107 __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
108 __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
109 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
112 __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
115 __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
116 __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
117 __m256 dummy_mask,cutoff_mask;
118 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
119 __m256 one = _mm256_set1_ps(1.0);
120 __m256 two = _mm256_set1_ps(2.0);
126 jindex = nlist->jindex;
128 shiftidx = nlist->shift;
130 shiftvec = fr->shift_vec[0];
131 fshift = fr->fshift[0];
132 facel = _mm256_set1_ps(fr->epsfac);
133 charge = mdatoms->chargeA;
134 krf = _mm256_set1_ps(fr->ic->k_rf);
135 krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
136 crf = _mm256_set1_ps(fr->ic->c_rf);
137 nvdwtype = fr->ntype;
139 vdwtype = mdatoms->typeA;
141 /* Setup water-specific parameters */
142 inr = nlist->iinr[0];
143 iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
144 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
145 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
146 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
148 jq0 = _mm256_set1_ps(charge[inr+0]);
149 jq1 = _mm256_set1_ps(charge[inr+1]);
150 jq2 = _mm256_set1_ps(charge[inr+2]);
151 vdwjidx0A = 2*vdwtype[inr+0];
152 qq00 = _mm256_mul_ps(iq0,jq0);
153 c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
154 c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
155 qq01 = _mm256_mul_ps(iq0,jq1);
156 qq02 = _mm256_mul_ps(iq0,jq2);
157 qq10 = _mm256_mul_ps(iq1,jq0);
158 qq11 = _mm256_mul_ps(iq1,jq1);
159 qq12 = _mm256_mul_ps(iq1,jq2);
160 qq20 = _mm256_mul_ps(iq2,jq0);
161 qq21 = _mm256_mul_ps(iq2,jq1);
162 qq22 = _mm256_mul_ps(iq2,jq2);
164 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
165 rcutoff_scalar = fr->rcoulomb;
166 rcutoff = _mm256_set1_ps(rcutoff_scalar);
167 rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
169 sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
170 rvdw = _mm256_set1_ps(fr->rvdw);
172 /* Avoid stupid compiler warnings */
173 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
186 for(iidx=0;iidx<4*DIM;iidx++)
191 /* Start outer loop over neighborlists */
192 for(iidx=0; iidx<nri; iidx++)
194 /* Load shift vector for this list */
195 i_shift_offset = DIM*shiftidx[iidx];
197 /* Load limits for loop over neighbors */
198 j_index_start = jindex[iidx];
199 j_index_end = jindex[iidx+1];
201 /* Get outer coordinate index */
203 i_coord_offset = DIM*inr;
205 /* Load i particle coords and add shift vector */
206 gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
207 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
209 fix0 = _mm256_setzero_ps();
210 fiy0 = _mm256_setzero_ps();
211 fiz0 = _mm256_setzero_ps();
212 fix1 = _mm256_setzero_ps();
213 fiy1 = _mm256_setzero_ps();
214 fiz1 = _mm256_setzero_ps();
215 fix2 = _mm256_setzero_ps();
216 fiy2 = _mm256_setzero_ps();
217 fiz2 = _mm256_setzero_ps();
219 /* Reset potential sums */
220 velecsum = _mm256_setzero_ps();
221 vvdwsum = _mm256_setzero_ps();
223 /* Start inner kernel loop */
224 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
227 /* Get j neighbor index, and coordinate index */
236 j_coord_offsetA = DIM*jnrA;
237 j_coord_offsetB = DIM*jnrB;
238 j_coord_offsetC = DIM*jnrC;
239 j_coord_offsetD = DIM*jnrD;
240 j_coord_offsetE = DIM*jnrE;
241 j_coord_offsetF = DIM*jnrF;
242 j_coord_offsetG = DIM*jnrG;
243 j_coord_offsetH = DIM*jnrH;
245 /* load j atom coordinates */
246 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
247 x+j_coord_offsetC,x+j_coord_offsetD,
248 x+j_coord_offsetE,x+j_coord_offsetF,
249 x+j_coord_offsetG,x+j_coord_offsetH,
250 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
252 /* Calculate displacement vector */
253 dx00 = _mm256_sub_ps(ix0,jx0);
254 dy00 = _mm256_sub_ps(iy0,jy0);
255 dz00 = _mm256_sub_ps(iz0,jz0);
256 dx01 = _mm256_sub_ps(ix0,jx1);
257 dy01 = _mm256_sub_ps(iy0,jy1);
258 dz01 = _mm256_sub_ps(iz0,jz1);
259 dx02 = _mm256_sub_ps(ix0,jx2);
260 dy02 = _mm256_sub_ps(iy0,jy2);
261 dz02 = _mm256_sub_ps(iz0,jz2);
262 dx10 = _mm256_sub_ps(ix1,jx0);
263 dy10 = _mm256_sub_ps(iy1,jy0);
264 dz10 = _mm256_sub_ps(iz1,jz0);
265 dx11 = _mm256_sub_ps(ix1,jx1);
266 dy11 = _mm256_sub_ps(iy1,jy1);
267 dz11 = _mm256_sub_ps(iz1,jz1);
268 dx12 = _mm256_sub_ps(ix1,jx2);
269 dy12 = _mm256_sub_ps(iy1,jy2);
270 dz12 = _mm256_sub_ps(iz1,jz2);
271 dx20 = _mm256_sub_ps(ix2,jx0);
272 dy20 = _mm256_sub_ps(iy2,jy0);
273 dz20 = _mm256_sub_ps(iz2,jz0);
274 dx21 = _mm256_sub_ps(ix2,jx1);
275 dy21 = _mm256_sub_ps(iy2,jy1);
276 dz21 = _mm256_sub_ps(iz2,jz1);
277 dx22 = _mm256_sub_ps(ix2,jx2);
278 dy22 = _mm256_sub_ps(iy2,jy2);
279 dz22 = _mm256_sub_ps(iz2,jz2);
281 /* Calculate squared distance and things based on it */
282 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
283 rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
284 rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
285 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
286 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
287 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
288 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
289 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
290 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
292 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
293 rinv01 = gmx_mm256_invsqrt_ps(rsq01);
294 rinv02 = gmx_mm256_invsqrt_ps(rsq02);
295 rinv10 = gmx_mm256_invsqrt_ps(rsq10);
296 rinv11 = gmx_mm256_invsqrt_ps(rsq11);
297 rinv12 = gmx_mm256_invsqrt_ps(rsq12);
298 rinv20 = gmx_mm256_invsqrt_ps(rsq20);
299 rinv21 = gmx_mm256_invsqrt_ps(rsq21);
300 rinv22 = gmx_mm256_invsqrt_ps(rsq22);
302 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
303 rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
304 rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
305 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
306 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
307 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
308 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
309 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
310 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
312 fjx0 = _mm256_setzero_ps();
313 fjy0 = _mm256_setzero_ps();
314 fjz0 = _mm256_setzero_ps();
315 fjx1 = _mm256_setzero_ps();
316 fjy1 = _mm256_setzero_ps();
317 fjz1 = _mm256_setzero_ps();
318 fjx2 = _mm256_setzero_ps();
319 fjy2 = _mm256_setzero_ps();
320 fjz2 = _mm256_setzero_ps();
322 /**************************
323 * CALCULATE INTERACTIONS *
324 **************************/
326 if (gmx_mm256_any_lt(rsq00,rcutoff2))
329 /* REACTION-FIELD ELECTROSTATICS */
330 velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
331 felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
333 /* LENNARD-JONES DISPERSION/REPULSION */
335 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
336 vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
337 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
338 vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
339 _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
340 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
342 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
344 /* Update potential sum for this i atom from the interaction with this j atom. */
345 velec = _mm256_and_ps(velec,cutoff_mask);
346 velecsum = _mm256_add_ps(velecsum,velec);
347 vvdw = _mm256_and_ps(vvdw,cutoff_mask);
348 vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
350 fscal = _mm256_add_ps(felec,fvdw);
352 fscal = _mm256_and_ps(fscal,cutoff_mask);
354 /* Calculate temporary vectorial force */
355 tx = _mm256_mul_ps(fscal,dx00);
356 ty = _mm256_mul_ps(fscal,dy00);
357 tz = _mm256_mul_ps(fscal,dz00);
359 /* Update vectorial force */
360 fix0 = _mm256_add_ps(fix0,tx);
361 fiy0 = _mm256_add_ps(fiy0,ty);
362 fiz0 = _mm256_add_ps(fiz0,tz);
364 fjx0 = _mm256_add_ps(fjx0,tx);
365 fjy0 = _mm256_add_ps(fjy0,ty);
366 fjz0 = _mm256_add_ps(fjz0,tz);
370 /**************************
371 * CALCULATE INTERACTIONS *
372 **************************/
374 if (gmx_mm256_any_lt(rsq01,rcutoff2))
377 /* REACTION-FIELD ELECTROSTATICS */
378 velec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_add_ps(rinv01,_mm256_mul_ps(krf,rsq01)),crf));
379 felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
381 cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
383 /* Update potential sum for this i atom from the interaction with this j atom. */
384 velec = _mm256_and_ps(velec,cutoff_mask);
385 velecsum = _mm256_add_ps(velecsum,velec);
389 fscal = _mm256_and_ps(fscal,cutoff_mask);
391 /* Calculate temporary vectorial force */
392 tx = _mm256_mul_ps(fscal,dx01);
393 ty = _mm256_mul_ps(fscal,dy01);
394 tz = _mm256_mul_ps(fscal,dz01);
396 /* Update vectorial force */
397 fix0 = _mm256_add_ps(fix0,tx);
398 fiy0 = _mm256_add_ps(fiy0,ty);
399 fiz0 = _mm256_add_ps(fiz0,tz);
401 fjx1 = _mm256_add_ps(fjx1,tx);
402 fjy1 = _mm256_add_ps(fjy1,ty);
403 fjz1 = _mm256_add_ps(fjz1,tz);
407 /**************************
408 * CALCULATE INTERACTIONS *
409 **************************/
411 if (gmx_mm256_any_lt(rsq02,rcutoff2))
414 /* REACTION-FIELD ELECTROSTATICS */
415 velec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_add_ps(rinv02,_mm256_mul_ps(krf,rsq02)),crf));
416 felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
418 cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
420 /* Update potential sum for this i atom from the interaction with this j atom. */
421 velec = _mm256_and_ps(velec,cutoff_mask);
422 velecsum = _mm256_add_ps(velecsum,velec);
426 fscal = _mm256_and_ps(fscal,cutoff_mask);
428 /* Calculate temporary vectorial force */
429 tx = _mm256_mul_ps(fscal,dx02);
430 ty = _mm256_mul_ps(fscal,dy02);
431 tz = _mm256_mul_ps(fscal,dz02);
433 /* Update vectorial force */
434 fix0 = _mm256_add_ps(fix0,tx);
435 fiy0 = _mm256_add_ps(fiy0,ty);
436 fiz0 = _mm256_add_ps(fiz0,tz);
438 fjx2 = _mm256_add_ps(fjx2,tx);
439 fjy2 = _mm256_add_ps(fjy2,ty);
440 fjz2 = _mm256_add_ps(fjz2,tz);
444 /**************************
445 * CALCULATE INTERACTIONS *
446 **************************/
448 if (gmx_mm256_any_lt(rsq10,rcutoff2))
451 /* REACTION-FIELD ELECTROSTATICS */
452 velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
453 felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
455 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
457 /* Update potential sum for this i atom from the interaction with this j atom. */
458 velec = _mm256_and_ps(velec,cutoff_mask);
459 velecsum = _mm256_add_ps(velecsum,velec);
463 fscal = _mm256_and_ps(fscal,cutoff_mask);
465 /* Calculate temporary vectorial force */
466 tx = _mm256_mul_ps(fscal,dx10);
467 ty = _mm256_mul_ps(fscal,dy10);
468 tz = _mm256_mul_ps(fscal,dz10);
470 /* Update vectorial force */
471 fix1 = _mm256_add_ps(fix1,tx);
472 fiy1 = _mm256_add_ps(fiy1,ty);
473 fiz1 = _mm256_add_ps(fiz1,tz);
475 fjx0 = _mm256_add_ps(fjx0,tx);
476 fjy0 = _mm256_add_ps(fjy0,ty);
477 fjz0 = _mm256_add_ps(fjz0,tz);
481 /**************************
482 * CALCULATE INTERACTIONS *
483 **************************/
485 if (gmx_mm256_any_lt(rsq11,rcutoff2))
488 /* REACTION-FIELD ELECTROSTATICS */
489 velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
490 felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
492 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
494 /* Update potential sum for this i atom from the interaction with this j atom. */
495 velec = _mm256_and_ps(velec,cutoff_mask);
496 velecsum = _mm256_add_ps(velecsum,velec);
500 fscal = _mm256_and_ps(fscal,cutoff_mask);
502 /* Calculate temporary vectorial force */
503 tx = _mm256_mul_ps(fscal,dx11);
504 ty = _mm256_mul_ps(fscal,dy11);
505 tz = _mm256_mul_ps(fscal,dz11);
507 /* Update vectorial force */
508 fix1 = _mm256_add_ps(fix1,tx);
509 fiy1 = _mm256_add_ps(fiy1,ty);
510 fiz1 = _mm256_add_ps(fiz1,tz);
512 fjx1 = _mm256_add_ps(fjx1,tx);
513 fjy1 = _mm256_add_ps(fjy1,ty);
514 fjz1 = _mm256_add_ps(fjz1,tz);
518 /**************************
519 * CALCULATE INTERACTIONS *
520 **************************/
522 if (gmx_mm256_any_lt(rsq12,rcutoff2))
525 /* REACTION-FIELD ELECTROSTATICS */
526 velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
527 felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
529 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
531 /* Update potential sum for this i atom from the interaction with this j atom. */
532 velec = _mm256_and_ps(velec,cutoff_mask);
533 velecsum = _mm256_add_ps(velecsum,velec);
537 fscal = _mm256_and_ps(fscal,cutoff_mask);
539 /* Calculate temporary vectorial force */
540 tx = _mm256_mul_ps(fscal,dx12);
541 ty = _mm256_mul_ps(fscal,dy12);
542 tz = _mm256_mul_ps(fscal,dz12);
544 /* Update vectorial force */
545 fix1 = _mm256_add_ps(fix1,tx);
546 fiy1 = _mm256_add_ps(fiy1,ty);
547 fiz1 = _mm256_add_ps(fiz1,tz);
549 fjx2 = _mm256_add_ps(fjx2,tx);
550 fjy2 = _mm256_add_ps(fjy2,ty);
551 fjz2 = _mm256_add_ps(fjz2,tz);
555 /**************************
556 * CALCULATE INTERACTIONS *
557 **************************/
559 if (gmx_mm256_any_lt(rsq20,rcutoff2))
562 /* REACTION-FIELD ELECTROSTATICS */
563 velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
564 felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
566 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
568 /* Update potential sum for this i atom from the interaction with this j atom. */
569 velec = _mm256_and_ps(velec,cutoff_mask);
570 velecsum = _mm256_add_ps(velecsum,velec);
574 fscal = _mm256_and_ps(fscal,cutoff_mask);
576 /* Calculate temporary vectorial force */
577 tx = _mm256_mul_ps(fscal,dx20);
578 ty = _mm256_mul_ps(fscal,dy20);
579 tz = _mm256_mul_ps(fscal,dz20);
581 /* Update vectorial force */
582 fix2 = _mm256_add_ps(fix2,tx);
583 fiy2 = _mm256_add_ps(fiy2,ty);
584 fiz2 = _mm256_add_ps(fiz2,tz);
586 fjx0 = _mm256_add_ps(fjx0,tx);
587 fjy0 = _mm256_add_ps(fjy0,ty);
588 fjz0 = _mm256_add_ps(fjz0,tz);
592 /**************************
593 * CALCULATE INTERACTIONS *
594 **************************/
596 if (gmx_mm256_any_lt(rsq21,rcutoff2))
599 /* REACTION-FIELD ELECTROSTATICS */
600 velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
601 felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
603 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
605 /* Update potential sum for this i atom from the interaction with this j atom. */
606 velec = _mm256_and_ps(velec,cutoff_mask);
607 velecsum = _mm256_add_ps(velecsum,velec);
611 fscal = _mm256_and_ps(fscal,cutoff_mask);
613 /* Calculate temporary vectorial force */
614 tx = _mm256_mul_ps(fscal,dx21);
615 ty = _mm256_mul_ps(fscal,dy21);
616 tz = _mm256_mul_ps(fscal,dz21);
618 /* Update vectorial force */
619 fix2 = _mm256_add_ps(fix2,tx);
620 fiy2 = _mm256_add_ps(fiy2,ty);
621 fiz2 = _mm256_add_ps(fiz2,tz);
623 fjx1 = _mm256_add_ps(fjx1,tx);
624 fjy1 = _mm256_add_ps(fjy1,ty);
625 fjz1 = _mm256_add_ps(fjz1,tz);
629 /**************************
630 * CALCULATE INTERACTIONS *
631 **************************/
633 if (gmx_mm256_any_lt(rsq22,rcutoff2))
636 /* REACTION-FIELD ELECTROSTATICS */
637 velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
638 felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
640 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
642 /* Update potential sum for this i atom from the interaction with this j atom. */
643 velec = _mm256_and_ps(velec,cutoff_mask);
644 velecsum = _mm256_add_ps(velecsum,velec);
648 fscal = _mm256_and_ps(fscal,cutoff_mask);
650 /* Calculate temporary vectorial force */
651 tx = _mm256_mul_ps(fscal,dx22);
652 ty = _mm256_mul_ps(fscal,dy22);
653 tz = _mm256_mul_ps(fscal,dz22);
655 /* Update vectorial force */
656 fix2 = _mm256_add_ps(fix2,tx);
657 fiy2 = _mm256_add_ps(fiy2,ty);
658 fiz2 = _mm256_add_ps(fiz2,tz);
660 fjx2 = _mm256_add_ps(fjx2,tx);
661 fjy2 = _mm256_add_ps(fjy2,ty);
662 fjz2 = _mm256_add_ps(fjz2,tz);
666 fjptrA = f+j_coord_offsetA;
667 fjptrB = f+j_coord_offsetB;
668 fjptrC = f+j_coord_offsetC;
669 fjptrD = f+j_coord_offsetD;
670 fjptrE = f+j_coord_offsetE;
671 fjptrF = f+j_coord_offsetF;
672 fjptrG = f+j_coord_offsetG;
673 fjptrH = f+j_coord_offsetH;
675 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
676 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
678 /* Inner loop uses 342 flops */
684 /* Get j neighbor index, and coordinate index */
685 jnrlistA = jjnr[jidx];
686 jnrlistB = jjnr[jidx+1];
687 jnrlistC = jjnr[jidx+2];
688 jnrlistD = jjnr[jidx+3];
689 jnrlistE = jjnr[jidx+4];
690 jnrlistF = jjnr[jidx+5];
691 jnrlistG = jjnr[jidx+6];
692 jnrlistH = jjnr[jidx+7];
693 /* Sign of each element will be negative for non-real atoms.
694 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
695 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
697 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
698 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
700 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
701 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
702 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
703 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
704 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
705 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
706 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
707 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
708 j_coord_offsetA = DIM*jnrA;
709 j_coord_offsetB = DIM*jnrB;
710 j_coord_offsetC = DIM*jnrC;
711 j_coord_offsetD = DIM*jnrD;
712 j_coord_offsetE = DIM*jnrE;
713 j_coord_offsetF = DIM*jnrF;
714 j_coord_offsetG = DIM*jnrG;
715 j_coord_offsetH = DIM*jnrH;
717 /* load j atom coordinates */
718 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
719 x+j_coord_offsetC,x+j_coord_offsetD,
720 x+j_coord_offsetE,x+j_coord_offsetF,
721 x+j_coord_offsetG,x+j_coord_offsetH,
722 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
724 /* Calculate displacement vector */
725 dx00 = _mm256_sub_ps(ix0,jx0);
726 dy00 = _mm256_sub_ps(iy0,jy0);
727 dz00 = _mm256_sub_ps(iz0,jz0);
728 dx01 = _mm256_sub_ps(ix0,jx1);
729 dy01 = _mm256_sub_ps(iy0,jy1);
730 dz01 = _mm256_sub_ps(iz0,jz1);
731 dx02 = _mm256_sub_ps(ix0,jx2);
732 dy02 = _mm256_sub_ps(iy0,jy2);
733 dz02 = _mm256_sub_ps(iz0,jz2);
734 dx10 = _mm256_sub_ps(ix1,jx0);
735 dy10 = _mm256_sub_ps(iy1,jy0);
736 dz10 = _mm256_sub_ps(iz1,jz0);
737 dx11 = _mm256_sub_ps(ix1,jx1);
738 dy11 = _mm256_sub_ps(iy1,jy1);
739 dz11 = _mm256_sub_ps(iz1,jz1);
740 dx12 = _mm256_sub_ps(ix1,jx2);
741 dy12 = _mm256_sub_ps(iy1,jy2);
742 dz12 = _mm256_sub_ps(iz1,jz2);
743 dx20 = _mm256_sub_ps(ix2,jx0);
744 dy20 = _mm256_sub_ps(iy2,jy0);
745 dz20 = _mm256_sub_ps(iz2,jz0);
746 dx21 = _mm256_sub_ps(ix2,jx1);
747 dy21 = _mm256_sub_ps(iy2,jy1);
748 dz21 = _mm256_sub_ps(iz2,jz1);
749 dx22 = _mm256_sub_ps(ix2,jx2);
750 dy22 = _mm256_sub_ps(iy2,jy2);
751 dz22 = _mm256_sub_ps(iz2,jz2);
753 /* Calculate squared distance and things based on it */
754 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
755 rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
756 rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
757 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
758 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
759 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
760 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
761 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
762 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
764 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
765 rinv01 = gmx_mm256_invsqrt_ps(rsq01);
766 rinv02 = gmx_mm256_invsqrt_ps(rsq02);
767 rinv10 = gmx_mm256_invsqrt_ps(rsq10);
768 rinv11 = gmx_mm256_invsqrt_ps(rsq11);
769 rinv12 = gmx_mm256_invsqrt_ps(rsq12);
770 rinv20 = gmx_mm256_invsqrt_ps(rsq20);
771 rinv21 = gmx_mm256_invsqrt_ps(rsq21);
772 rinv22 = gmx_mm256_invsqrt_ps(rsq22);
774 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
775 rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
776 rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
777 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
778 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
779 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
780 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
781 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
782 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
784 fjx0 = _mm256_setzero_ps();
785 fjy0 = _mm256_setzero_ps();
786 fjz0 = _mm256_setzero_ps();
787 fjx1 = _mm256_setzero_ps();
788 fjy1 = _mm256_setzero_ps();
789 fjz1 = _mm256_setzero_ps();
790 fjx2 = _mm256_setzero_ps();
791 fjy2 = _mm256_setzero_ps();
792 fjz2 = _mm256_setzero_ps();
794 /**************************
795 * CALCULATE INTERACTIONS *
796 **************************/
798 if (gmx_mm256_any_lt(rsq00,rcutoff2))
801 /* REACTION-FIELD ELECTROSTATICS */
802 velec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_add_ps(rinv00,_mm256_mul_ps(krf,rsq00)),crf));
803 felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
805 /* LENNARD-JONES DISPERSION/REPULSION */
807 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
808 vvdw6 = _mm256_mul_ps(c6_00,rinvsix);
809 vvdw12 = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
810 vvdw = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
811 _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
812 fvdw = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
814 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
816 /* Update potential sum for this i atom from the interaction with this j atom. */
817 velec = _mm256_and_ps(velec,cutoff_mask);
818 velec = _mm256_andnot_ps(dummy_mask,velec);
819 velecsum = _mm256_add_ps(velecsum,velec);
820 vvdw = _mm256_and_ps(vvdw,cutoff_mask);
821 vvdw = _mm256_andnot_ps(dummy_mask,vvdw);
822 vvdwsum = _mm256_add_ps(vvdwsum,vvdw);
824 fscal = _mm256_add_ps(felec,fvdw);
826 fscal = _mm256_and_ps(fscal,cutoff_mask);
828 fscal = _mm256_andnot_ps(dummy_mask,fscal);
830 /* Calculate temporary vectorial force */
831 tx = _mm256_mul_ps(fscal,dx00);
832 ty = _mm256_mul_ps(fscal,dy00);
833 tz = _mm256_mul_ps(fscal,dz00);
835 /* Update vectorial force */
836 fix0 = _mm256_add_ps(fix0,tx);
837 fiy0 = _mm256_add_ps(fiy0,ty);
838 fiz0 = _mm256_add_ps(fiz0,tz);
840 fjx0 = _mm256_add_ps(fjx0,tx);
841 fjy0 = _mm256_add_ps(fjy0,ty);
842 fjz0 = _mm256_add_ps(fjz0,tz);
846 /**************************
847 * CALCULATE INTERACTIONS *
848 **************************/
850 if (gmx_mm256_any_lt(rsq01,rcutoff2))
853 /* REACTION-FIELD ELECTROSTATICS */
854 velec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_add_ps(rinv01,_mm256_mul_ps(krf,rsq01)),crf));
855 felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
857 cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
859 /* Update potential sum for this i atom from the interaction with this j atom. */
860 velec = _mm256_and_ps(velec,cutoff_mask);
861 velec = _mm256_andnot_ps(dummy_mask,velec);
862 velecsum = _mm256_add_ps(velecsum,velec);
866 fscal = _mm256_and_ps(fscal,cutoff_mask);
868 fscal = _mm256_andnot_ps(dummy_mask,fscal);
870 /* Calculate temporary vectorial force */
871 tx = _mm256_mul_ps(fscal,dx01);
872 ty = _mm256_mul_ps(fscal,dy01);
873 tz = _mm256_mul_ps(fscal,dz01);
875 /* Update vectorial force */
876 fix0 = _mm256_add_ps(fix0,tx);
877 fiy0 = _mm256_add_ps(fiy0,ty);
878 fiz0 = _mm256_add_ps(fiz0,tz);
880 fjx1 = _mm256_add_ps(fjx1,tx);
881 fjy1 = _mm256_add_ps(fjy1,ty);
882 fjz1 = _mm256_add_ps(fjz1,tz);
886 /**************************
887 * CALCULATE INTERACTIONS *
888 **************************/
890 if (gmx_mm256_any_lt(rsq02,rcutoff2))
893 /* REACTION-FIELD ELECTROSTATICS */
894 velec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_add_ps(rinv02,_mm256_mul_ps(krf,rsq02)),crf));
895 felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
897 cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
899 /* Update potential sum for this i atom from the interaction with this j atom. */
900 velec = _mm256_and_ps(velec,cutoff_mask);
901 velec = _mm256_andnot_ps(dummy_mask,velec);
902 velecsum = _mm256_add_ps(velecsum,velec);
906 fscal = _mm256_and_ps(fscal,cutoff_mask);
908 fscal = _mm256_andnot_ps(dummy_mask,fscal);
910 /* Calculate temporary vectorial force */
911 tx = _mm256_mul_ps(fscal,dx02);
912 ty = _mm256_mul_ps(fscal,dy02);
913 tz = _mm256_mul_ps(fscal,dz02);
915 /* Update vectorial force */
916 fix0 = _mm256_add_ps(fix0,tx);
917 fiy0 = _mm256_add_ps(fiy0,ty);
918 fiz0 = _mm256_add_ps(fiz0,tz);
920 fjx2 = _mm256_add_ps(fjx2,tx);
921 fjy2 = _mm256_add_ps(fjy2,ty);
922 fjz2 = _mm256_add_ps(fjz2,tz);
926 /**************************
927 * CALCULATE INTERACTIONS *
928 **************************/
930 if (gmx_mm256_any_lt(rsq10,rcutoff2))
933 /* REACTION-FIELD ELECTROSTATICS */
934 velec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_add_ps(rinv10,_mm256_mul_ps(krf,rsq10)),crf));
935 felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
937 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
939 /* Update potential sum for this i atom from the interaction with this j atom. */
940 velec = _mm256_and_ps(velec,cutoff_mask);
941 velec = _mm256_andnot_ps(dummy_mask,velec);
942 velecsum = _mm256_add_ps(velecsum,velec);
946 fscal = _mm256_and_ps(fscal,cutoff_mask);
948 fscal = _mm256_andnot_ps(dummy_mask,fscal);
950 /* Calculate temporary vectorial force */
951 tx = _mm256_mul_ps(fscal,dx10);
952 ty = _mm256_mul_ps(fscal,dy10);
953 tz = _mm256_mul_ps(fscal,dz10);
955 /* Update vectorial force */
956 fix1 = _mm256_add_ps(fix1,tx);
957 fiy1 = _mm256_add_ps(fiy1,ty);
958 fiz1 = _mm256_add_ps(fiz1,tz);
960 fjx0 = _mm256_add_ps(fjx0,tx);
961 fjy0 = _mm256_add_ps(fjy0,ty);
962 fjz0 = _mm256_add_ps(fjz0,tz);
966 /**************************
967 * CALCULATE INTERACTIONS *
968 **************************/
970 if (gmx_mm256_any_lt(rsq11,rcutoff2))
973 /* REACTION-FIELD ELECTROSTATICS */
974 velec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
975 felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
977 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
979 /* Update potential sum for this i atom from the interaction with this j atom. */
980 velec = _mm256_and_ps(velec,cutoff_mask);
981 velec = _mm256_andnot_ps(dummy_mask,velec);
982 velecsum = _mm256_add_ps(velecsum,velec);
986 fscal = _mm256_and_ps(fscal,cutoff_mask);
988 fscal = _mm256_andnot_ps(dummy_mask,fscal);
990 /* Calculate temporary vectorial force */
991 tx = _mm256_mul_ps(fscal,dx11);
992 ty = _mm256_mul_ps(fscal,dy11);
993 tz = _mm256_mul_ps(fscal,dz11);
995 /* Update vectorial force */
996 fix1 = _mm256_add_ps(fix1,tx);
997 fiy1 = _mm256_add_ps(fiy1,ty);
998 fiz1 = _mm256_add_ps(fiz1,tz);
1000 fjx1 = _mm256_add_ps(fjx1,tx);
1001 fjy1 = _mm256_add_ps(fjy1,ty);
1002 fjz1 = _mm256_add_ps(fjz1,tz);
1006 /**************************
1007 * CALCULATE INTERACTIONS *
1008 **************************/
1010 if (gmx_mm256_any_lt(rsq12,rcutoff2))
1013 /* REACTION-FIELD ELECTROSTATICS */
1014 velec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
1015 felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
1017 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1019 /* Update potential sum for this i atom from the interaction with this j atom. */
1020 velec = _mm256_and_ps(velec,cutoff_mask);
1021 velec = _mm256_andnot_ps(dummy_mask,velec);
1022 velecsum = _mm256_add_ps(velecsum,velec);
1026 fscal = _mm256_and_ps(fscal,cutoff_mask);
1028 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1030 /* Calculate temporary vectorial force */
1031 tx = _mm256_mul_ps(fscal,dx12);
1032 ty = _mm256_mul_ps(fscal,dy12);
1033 tz = _mm256_mul_ps(fscal,dz12);
1035 /* Update vectorial force */
1036 fix1 = _mm256_add_ps(fix1,tx);
1037 fiy1 = _mm256_add_ps(fiy1,ty);
1038 fiz1 = _mm256_add_ps(fiz1,tz);
1040 fjx2 = _mm256_add_ps(fjx2,tx);
1041 fjy2 = _mm256_add_ps(fjy2,ty);
1042 fjz2 = _mm256_add_ps(fjz2,tz);
1046 /**************************
1047 * CALCULATE INTERACTIONS *
1048 **************************/
1050 if (gmx_mm256_any_lt(rsq20,rcutoff2))
1053 /* REACTION-FIELD ELECTROSTATICS */
1054 velec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_add_ps(rinv20,_mm256_mul_ps(krf,rsq20)),crf));
1055 felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
1057 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1059 /* Update potential sum for this i atom from the interaction with this j atom. */
1060 velec = _mm256_and_ps(velec,cutoff_mask);
1061 velec = _mm256_andnot_ps(dummy_mask,velec);
1062 velecsum = _mm256_add_ps(velecsum,velec);
1066 fscal = _mm256_and_ps(fscal,cutoff_mask);
1068 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1070 /* Calculate temporary vectorial force */
1071 tx = _mm256_mul_ps(fscal,dx20);
1072 ty = _mm256_mul_ps(fscal,dy20);
1073 tz = _mm256_mul_ps(fscal,dz20);
1075 /* Update vectorial force */
1076 fix2 = _mm256_add_ps(fix2,tx);
1077 fiy2 = _mm256_add_ps(fiy2,ty);
1078 fiz2 = _mm256_add_ps(fiz2,tz);
1080 fjx0 = _mm256_add_ps(fjx0,tx);
1081 fjy0 = _mm256_add_ps(fjy0,ty);
1082 fjz0 = _mm256_add_ps(fjz0,tz);
1086 /**************************
1087 * CALCULATE INTERACTIONS *
1088 **************************/
1090 if (gmx_mm256_any_lt(rsq21,rcutoff2))
1093 /* REACTION-FIELD ELECTROSTATICS */
1094 velec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
1095 felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
1097 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1099 /* Update potential sum for this i atom from the interaction with this j atom. */
1100 velec = _mm256_and_ps(velec,cutoff_mask);
1101 velec = _mm256_andnot_ps(dummy_mask,velec);
1102 velecsum = _mm256_add_ps(velecsum,velec);
1106 fscal = _mm256_and_ps(fscal,cutoff_mask);
1108 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1110 /* Calculate temporary vectorial force */
1111 tx = _mm256_mul_ps(fscal,dx21);
1112 ty = _mm256_mul_ps(fscal,dy21);
1113 tz = _mm256_mul_ps(fscal,dz21);
1115 /* Update vectorial force */
1116 fix2 = _mm256_add_ps(fix2,tx);
1117 fiy2 = _mm256_add_ps(fiy2,ty);
1118 fiz2 = _mm256_add_ps(fiz2,tz);
1120 fjx1 = _mm256_add_ps(fjx1,tx);
1121 fjy1 = _mm256_add_ps(fjy1,ty);
1122 fjz1 = _mm256_add_ps(fjz1,tz);
1126 /**************************
1127 * CALCULATE INTERACTIONS *
1128 **************************/
1130 if (gmx_mm256_any_lt(rsq22,rcutoff2))
1133 /* REACTION-FIELD ELECTROSTATICS */
1134 velec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
1135 felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
1137 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1139 /* Update potential sum for this i atom from the interaction with this j atom. */
1140 velec = _mm256_and_ps(velec,cutoff_mask);
1141 velec = _mm256_andnot_ps(dummy_mask,velec);
1142 velecsum = _mm256_add_ps(velecsum,velec);
1146 fscal = _mm256_and_ps(fscal,cutoff_mask);
1148 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1150 /* Calculate temporary vectorial force */
1151 tx = _mm256_mul_ps(fscal,dx22);
1152 ty = _mm256_mul_ps(fscal,dy22);
1153 tz = _mm256_mul_ps(fscal,dz22);
1155 /* Update vectorial force */
1156 fix2 = _mm256_add_ps(fix2,tx);
1157 fiy2 = _mm256_add_ps(fiy2,ty);
1158 fiz2 = _mm256_add_ps(fiz2,tz);
1160 fjx2 = _mm256_add_ps(fjx2,tx);
1161 fjy2 = _mm256_add_ps(fjy2,ty);
1162 fjz2 = _mm256_add_ps(fjz2,tz);
1166 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1167 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1168 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1169 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1170 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1171 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1172 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1173 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1175 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1176 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1178 /* Inner loop uses 342 flops */
1181 /* End of innermost loop */
1183 gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1184 f+i_coord_offset,fshift+i_shift_offset);
1187 /* Update potential energies */
1188 gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1189 gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1191 /* Increment number of inner iterations */
1192 inneriter += j_index_end - j_index_start;
1194 /* Outer loop uses 20 flops */
1197 /* Increment number of outer iterations */
1200 /* Update outer/inner flops */
1202 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*342);
1205 * Gromacs nonbonded kernel: nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_F_avx_256_single
1206 * Electrostatics interaction: ReactionField
1207 * VdW interaction: LennardJones
1208 * Geometry: Water3-Water3
1209 * Calculate force/pot: Force
1212 nb_kernel_ElecRFCut_VdwLJSh_GeomW3W3_F_avx_256_single
1213 (t_nblist * gmx_restrict nlist,
1214 rvec * gmx_restrict xx,
1215 rvec * gmx_restrict ff,
1216 t_forcerec * gmx_restrict fr,
1217 t_mdatoms * gmx_restrict mdatoms,
1218 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1219 t_nrnb * gmx_restrict nrnb)
1221 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
1222 * just 0 for non-waters.
1223 * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1224 * jnr indices corresponding to data put in the four positions in the SIMD register.
1226 int i_shift_offset,i_coord_offset,outeriter,inneriter;
1227 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1228 int jnrA,jnrB,jnrC,jnrD;
1229 int jnrE,jnrF,jnrG,jnrH;
1230 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1231 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1232 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1233 int j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1234 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
1235 real rcutoff_scalar;
1236 real *shiftvec,*fshift,*x,*f;
1237 real *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1238 real scratch[4*DIM];
1239 __m256 tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1240 real * vdwioffsetptr0;
1241 __m256 ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1242 real * vdwioffsetptr1;
1243 __m256 ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1244 real * vdwioffsetptr2;
1245 __m256 ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1246 int vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1247 __m256 jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1248 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1249 __m256 jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1250 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1251 __m256 jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1252 __m256 dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1253 __m256 dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1254 __m256 dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1255 __m256 dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1256 __m256 dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1257 __m256 dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1258 __m256 dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1259 __m256 dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1260 __m256 dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1261 __m256 velec,felec,velecsum,facel,crf,krf,krf2;
1264 __m256 rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1267 __m256 one_sixth = _mm256_set1_ps(1.0/6.0);
1268 __m256 one_twelfth = _mm256_set1_ps(1.0/12.0);
1269 __m256 dummy_mask,cutoff_mask;
1270 __m256 signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1271 __m256 one = _mm256_set1_ps(1.0);
1272 __m256 two = _mm256_set1_ps(2.0);
1278 jindex = nlist->jindex;
1280 shiftidx = nlist->shift;
1282 shiftvec = fr->shift_vec[0];
1283 fshift = fr->fshift[0];
1284 facel = _mm256_set1_ps(fr->epsfac);
1285 charge = mdatoms->chargeA;
1286 krf = _mm256_set1_ps(fr->ic->k_rf);
1287 krf2 = _mm256_set1_ps(fr->ic->k_rf*2.0);
1288 crf = _mm256_set1_ps(fr->ic->c_rf);
1289 nvdwtype = fr->ntype;
1290 vdwparam = fr->nbfp;
1291 vdwtype = mdatoms->typeA;
1293 /* Setup water-specific parameters */
1294 inr = nlist->iinr[0];
1295 iq0 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
1296 iq1 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1297 iq2 = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1298 vdwioffsetptr0 = vdwparam+2*nvdwtype*vdwtype[inr+0];
1300 jq0 = _mm256_set1_ps(charge[inr+0]);
1301 jq1 = _mm256_set1_ps(charge[inr+1]);
1302 jq2 = _mm256_set1_ps(charge[inr+2]);
1303 vdwjidx0A = 2*vdwtype[inr+0];
1304 qq00 = _mm256_mul_ps(iq0,jq0);
1305 c6_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1306 c12_00 = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1307 qq01 = _mm256_mul_ps(iq0,jq1);
1308 qq02 = _mm256_mul_ps(iq0,jq2);
1309 qq10 = _mm256_mul_ps(iq1,jq0);
1310 qq11 = _mm256_mul_ps(iq1,jq1);
1311 qq12 = _mm256_mul_ps(iq1,jq2);
1312 qq20 = _mm256_mul_ps(iq2,jq0);
1313 qq21 = _mm256_mul_ps(iq2,jq1);
1314 qq22 = _mm256_mul_ps(iq2,jq2);
1316 /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1317 rcutoff_scalar = fr->rcoulomb;
1318 rcutoff = _mm256_set1_ps(rcutoff_scalar);
1319 rcutoff2 = _mm256_mul_ps(rcutoff,rcutoff);
1321 sh_vdw_invrcut6 = _mm256_set1_ps(fr->ic->sh_invrc6);
1322 rvdw = _mm256_set1_ps(fr->rvdw);
1324 /* Avoid stupid compiler warnings */
1325 jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1326 j_coord_offsetA = 0;
1327 j_coord_offsetB = 0;
1328 j_coord_offsetC = 0;
1329 j_coord_offsetD = 0;
1330 j_coord_offsetE = 0;
1331 j_coord_offsetF = 0;
1332 j_coord_offsetG = 0;
1333 j_coord_offsetH = 0;
1338 for(iidx=0;iidx<4*DIM;iidx++)
1340 scratch[iidx] = 0.0;
1343 /* Start outer loop over neighborlists */
1344 for(iidx=0; iidx<nri; iidx++)
1346 /* Load shift vector for this list */
1347 i_shift_offset = DIM*shiftidx[iidx];
1349 /* Load limits for loop over neighbors */
1350 j_index_start = jindex[iidx];
1351 j_index_end = jindex[iidx+1];
1353 /* Get outer coordinate index */
1355 i_coord_offset = DIM*inr;
1357 /* Load i particle coords and add shift vector */
1358 gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1359 &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1361 fix0 = _mm256_setzero_ps();
1362 fiy0 = _mm256_setzero_ps();
1363 fiz0 = _mm256_setzero_ps();
1364 fix1 = _mm256_setzero_ps();
1365 fiy1 = _mm256_setzero_ps();
1366 fiz1 = _mm256_setzero_ps();
1367 fix2 = _mm256_setzero_ps();
1368 fiy2 = _mm256_setzero_ps();
1369 fiz2 = _mm256_setzero_ps();
1371 /* Start inner kernel loop */
1372 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1375 /* Get j neighbor index, and coordinate index */
1377 jnrB = jjnr[jidx+1];
1378 jnrC = jjnr[jidx+2];
1379 jnrD = jjnr[jidx+3];
1380 jnrE = jjnr[jidx+4];
1381 jnrF = jjnr[jidx+5];
1382 jnrG = jjnr[jidx+6];
1383 jnrH = jjnr[jidx+7];
1384 j_coord_offsetA = DIM*jnrA;
1385 j_coord_offsetB = DIM*jnrB;
1386 j_coord_offsetC = DIM*jnrC;
1387 j_coord_offsetD = DIM*jnrD;
1388 j_coord_offsetE = DIM*jnrE;
1389 j_coord_offsetF = DIM*jnrF;
1390 j_coord_offsetG = DIM*jnrG;
1391 j_coord_offsetH = DIM*jnrH;
1393 /* load j atom coordinates */
1394 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1395 x+j_coord_offsetC,x+j_coord_offsetD,
1396 x+j_coord_offsetE,x+j_coord_offsetF,
1397 x+j_coord_offsetG,x+j_coord_offsetH,
1398 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1400 /* Calculate displacement vector */
1401 dx00 = _mm256_sub_ps(ix0,jx0);
1402 dy00 = _mm256_sub_ps(iy0,jy0);
1403 dz00 = _mm256_sub_ps(iz0,jz0);
1404 dx01 = _mm256_sub_ps(ix0,jx1);
1405 dy01 = _mm256_sub_ps(iy0,jy1);
1406 dz01 = _mm256_sub_ps(iz0,jz1);
1407 dx02 = _mm256_sub_ps(ix0,jx2);
1408 dy02 = _mm256_sub_ps(iy0,jy2);
1409 dz02 = _mm256_sub_ps(iz0,jz2);
1410 dx10 = _mm256_sub_ps(ix1,jx0);
1411 dy10 = _mm256_sub_ps(iy1,jy0);
1412 dz10 = _mm256_sub_ps(iz1,jz0);
1413 dx11 = _mm256_sub_ps(ix1,jx1);
1414 dy11 = _mm256_sub_ps(iy1,jy1);
1415 dz11 = _mm256_sub_ps(iz1,jz1);
1416 dx12 = _mm256_sub_ps(ix1,jx2);
1417 dy12 = _mm256_sub_ps(iy1,jy2);
1418 dz12 = _mm256_sub_ps(iz1,jz2);
1419 dx20 = _mm256_sub_ps(ix2,jx0);
1420 dy20 = _mm256_sub_ps(iy2,jy0);
1421 dz20 = _mm256_sub_ps(iz2,jz0);
1422 dx21 = _mm256_sub_ps(ix2,jx1);
1423 dy21 = _mm256_sub_ps(iy2,jy1);
1424 dz21 = _mm256_sub_ps(iz2,jz1);
1425 dx22 = _mm256_sub_ps(ix2,jx2);
1426 dy22 = _mm256_sub_ps(iy2,jy2);
1427 dz22 = _mm256_sub_ps(iz2,jz2);
1429 /* Calculate squared distance and things based on it */
1430 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1431 rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1432 rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1433 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1434 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1435 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1436 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1437 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1438 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1440 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
1441 rinv01 = gmx_mm256_invsqrt_ps(rsq01);
1442 rinv02 = gmx_mm256_invsqrt_ps(rsq02);
1443 rinv10 = gmx_mm256_invsqrt_ps(rsq10);
1444 rinv11 = gmx_mm256_invsqrt_ps(rsq11);
1445 rinv12 = gmx_mm256_invsqrt_ps(rsq12);
1446 rinv20 = gmx_mm256_invsqrt_ps(rsq20);
1447 rinv21 = gmx_mm256_invsqrt_ps(rsq21);
1448 rinv22 = gmx_mm256_invsqrt_ps(rsq22);
1450 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
1451 rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
1452 rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
1453 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
1454 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
1455 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
1456 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
1457 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
1458 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
1460 fjx0 = _mm256_setzero_ps();
1461 fjy0 = _mm256_setzero_ps();
1462 fjz0 = _mm256_setzero_ps();
1463 fjx1 = _mm256_setzero_ps();
1464 fjy1 = _mm256_setzero_ps();
1465 fjz1 = _mm256_setzero_ps();
1466 fjx2 = _mm256_setzero_ps();
1467 fjy2 = _mm256_setzero_ps();
1468 fjz2 = _mm256_setzero_ps();
1470 /**************************
1471 * CALCULATE INTERACTIONS *
1472 **************************/
1474 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1477 /* REACTION-FIELD ELECTROSTATICS */
1478 felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
1480 /* LENNARD-JONES DISPERSION/REPULSION */
1482 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1483 fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
1485 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1487 fscal = _mm256_add_ps(felec,fvdw);
1489 fscal = _mm256_and_ps(fscal,cutoff_mask);
1491 /* Calculate temporary vectorial force */
1492 tx = _mm256_mul_ps(fscal,dx00);
1493 ty = _mm256_mul_ps(fscal,dy00);
1494 tz = _mm256_mul_ps(fscal,dz00);
1496 /* Update vectorial force */
1497 fix0 = _mm256_add_ps(fix0,tx);
1498 fiy0 = _mm256_add_ps(fiy0,ty);
1499 fiz0 = _mm256_add_ps(fiz0,tz);
1501 fjx0 = _mm256_add_ps(fjx0,tx);
1502 fjy0 = _mm256_add_ps(fjy0,ty);
1503 fjz0 = _mm256_add_ps(fjz0,tz);
1507 /**************************
1508 * CALCULATE INTERACTIONS *
1509 **************************/
1511 if (gmx_mm256_any_lt(rsq01,rcutoff2))
1514 /* REACTION-FIELD ELECTROSTATICS */
1515 felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
1517 cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
1521 fscal = _mm256_and_ps(fscal,cutoff_mask);
1523 /* Calculate temporary vectorial force */
1524 tx = _mm256_mul_ps(fscal,dx01);
1525 ty = _mm256_mul_ps(fscal,dy01);
1526 tz = _mm256_mul_ps(fscal,dz01);
1528 /* Update vectorial force */
1529 fix0 = _mm256_add_ps(fix0,tx);
1530 fiy0 = _mm256_add_ps(fiy0,ty);
1531 fiz0 = _mm256_add_ps(fiz0,tz);
1533 fjx1 = _mm256_add_ps(fjx1,tx);
1534 fjy1 = _mm256_add_ps(fjy1,ty);
1535 fjz1 = _mm256_add_ps(fjz1,tz);
1539 /**************************
1540 * CALCULATE INTERACTIONS *
1541 **************************/
1543 if (gmx_mm256_any_lt(rsq02,rcutoff2))
1546 /* REACTION-FIELD ELECTROSTATICS */
1547 felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
1549 cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
1553 fscal = _mm256_and_ps(fscal,cutoff_mask);
1555 /* Calculate temporary vectorial force */
1556 tx = _mm256_mul_ps(fscal,dx02);
1557 ty = _mm256_mul_ps(fscal,dy02);
1558 tz = _mm256_mul_ps(fscal,dz02);
1560 /* Update vectorial force */
1561 fix0 = _mm256_add_ps(fix0,tx);
1562 fiy0 = _mm256_add_ps(fiy0,ty);
1563 fiz0 = _mm256_add_ps(fiz0,tz);
1565 fjx2 = _mm256_add_ps(fjx2,tx);
1566 fjy2 = _mm256_add_ps(fjy2,ty);
1567 fjz2 = _mm256_add_ps(fjz2,tz);
1571 /**************************
1572 * CALCULATE INTERACTIONS *
1573 **************************/
1575 if (gmx_mm256_any_lt(rsq10,rcutoff2))
1578 /* REACTION-FIELD ELECTROSTATICS */
1579 felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
1581 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1585 fscal = _mm256_and_ps(fscal,cutoff_mask);
1587 /* Calculate temporary vectorial force */
1588 tx = _mm256_mul_ps(fscal,dx10);
1589 ty = _mm256_mul_ps(fscal,dy10);
1590 tz = _mm256_mul_ps(fscal,dz10);
1592 /* Update vectorial force */
1593 fix1 = _mm256_add_ps(fix1,tx);
1594 fiy1 = _mm256_add_ps(fiy1,ty);
1595 fiz1 = _mm256_add_ps(fiz1,tz);
1597 fjx0 = _mm256_add_ps(fjx0,tx);
1598 fjy0 = _mm256_add_ps(fjy0,ty);
1599 fjz0 = _mm256_add_ps(fjz0,tz);
1603 /**************************
1604 * CALCULATE INTERACTIONS *
1605 **************************/
1607 if (gmx_mm256_any_lt(rsq11,rcutoff2))
1610 /* REACTION-FIELD ELECTROSTATICS */
1611 felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
1613 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1617 fscal = _mm256_and_ps(fscal,cutoff_mask);
1619 /* Calculate temporary vectorial force */
1620 tx = _mm256_mul_ps(fscal,dx11);
1621 ty = _mm256_mul_ps(fscal,dy11);
1622 tz = _mm256_mul_ps(fscal,dz11);
1624 /* Update vectorial force */
1625 fix1 = _mm256_add_ps(fix1,tx);
1626 fiy1 = _mm256_add_ps(fiy1,ty);
1627 fiz1 = _mm256_add_ps(fiz1,tz);
1629 fjx1 = _mm256_add_ps(fjx1,tx);
1630 fjy1 = _mm256_add_ps(fjy1,ty);
1631 fjz1 = _mm256_add_ps(fjz1,tz);
1635 /**************************
1636 * CALCULATE INTERACTIONS *
1637 **************************/
1639 if (gmx_mm256_any_lt(rsq12,rcutoff2))
1642 /* REACTION-FIELD ELECTROSTATICS */
1643 felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
1645 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1649 fscal = _mm256_and_ps(fscal,cutoff_mask);
1651 /* Calculate temporary vectorial force */
1652 tx = _mm256_mul_ps(fscal,dx12);
1653 ty = _mm256_mul_ps(fscal,dy12);
1654 tz = _mm256_mul_ps(fscal,dz12);
1656 /* Update vectorial force */
1657 fix1 = _mm256_add_ps(fix1,tx);
1658 fiy1 = _mm256_add_ps(fiy1,ty);
1659 fiz1 = _mm256_add_ps(fiz1,tz);
1661 fjx2 = _mm256_add_ps(fjx2,tx);
1662 fjy2 = _mm256_add_ps(fjy2,ty);
1663 fjz2 = _mm256_add_ps(fjz2,tz);
1667 /**************************
1668 * CALCULATE INTERACTIONS *
1669 **************************/
1671 if (gmx_mm256_any_lt(rsq20,rcutoff2))
1674 /* REACTION-FIELD ELECTROSTATICS */
1675 felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
1677 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1681 fscal = _mm256_and_ps(fscal,cutoff_mask);
1683 /* Calculate temporary vectorial force */
1684 tx = _mm256_mul_ps(fscal,dx20);
1685 ty = _mm256_mul_ps(fscal,dy20);
1686 tz = _mm256_mul_ps(fscal,dz20);
1688 /* Update vectorial force */
1689 fix2 = _mm256_add_ps(fix2,tx);
1690 fiy2 = _mm256_add_ps(fiy2,ty);
1691 fiz2 = _mm256_add_ps(fiz2,tz);
1693 fjx0 = _mm256_add_ps(fjx0,tx);
1694 fjy0 = _mm256_add_ps(fjy0,ty);
1695 fjz0 = _mm256_add_ps(fjz0,tz);
1699 /**************************
1700 * CALCULATE INTERACTIONS *
1701 **************************/
1703 if (gmx_mm256_any_lt(rsq21,rcutoff2))
1706 /* REACTION-FIELD ELECTROSTATICS */
1707 felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
1709 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1713 fscal = _mm256_and_ps(fscal,cutoff_mask);
1715 /* Calculate temporary vectorial force */
1716 tx = _mm256_mul_ps(fscal,dx21);
1717 ty = _mm256_mul_ps(fscal,dy21);
1718 tz = _mm256_mul_ps(fscal,dz21);
1720 /* Update vectorial force */
1721 fix2 = _mm256_add_ps(fix2,tx);
1722 fiy2 = _mm256_add_ps(fiy2,ty);
1723 fiz2 = _mm256_add_ps(fiz2,tz);
1725 fjx1 = _mm256_add_ps(fjx1,tx);
1726 fjy1 = _mm256_add_ps(fjy1,ty);
1727 fjz1 = _mm256_add_ps(fjz1,tz);
1731 /**************************
1732 * CALCULATE INTERACTIONS *
1733 **************************/
1735 if (gmx_mm256_any_lt(rsq22,rcutoff2))
1738 /* REACTION-FIELD ELECTROSTATICS */
1739 felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
1741 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1745 fscal = _mm256_and_ps(fscal,cutoff_mask);
1747 /* Calculate temporary vectorial force */
1748 tx = _mm256_mul_ps(fscal,dx22);
1749 ty = _mm256_mul_ps(fscal,dy22);
1750 tz = _mm256_mul_ps(fscal,dz22);
1752 /* Update vectorial force */
1753 fix2 = _mm256_add_ps(fix2,tx);
1754 fiy2 = _mm256_add_ps(fiy2,ty);
1755 fiz2 = _mm256_add_ps(fiz2,tz);
1757 fjx2 = _mm256_add_ps(fjx2,tx);
1758 fjy2 = _mm256_add_ps(fjy2,ty);
1759 fjz2 = _mm256_add_ps(fjz2,tz);
1763 fjptrA = f+j_coord_offsetA;
1764 fjptrB = f+j_coord_offsetB;
1765 fjptrC = f+j_coord_offsetC;
1766 fjptrD = f+j_coord_offsetD;
1767 fjptrE = f+j_coord_offsetE;
1768 fjptrF = f+j_coord_offsetF;
1769 fjptrG = f+j_coord_offsetG;
1770 fjptrH = f+j_coord_offsetH;
1772 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1773 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1775 /* Inner loop uses 277 flops */
1778 if(jidx<j_index_end)
1781 /* Get j neighbor index, and coordinate index */
1782 jnrlistA = jjnr[jidx];
1783 jnrlistB = jjnr[jidx+1];
1784 jnrlistC = jjnr[jidx+2];
1785 jnrlistD = jjnr[jidx+3];
1786 jnrlistE = jjnr[jidx+4];
1787 jnrlistF = jjnr[jidx+5];
1788 jnrlistG = jjnr[jidx+6];
1789 jnrlistH = jjnr[jidx+7];
1790 /* Sign of each element will be negative for non-real atoms.
1791 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1792 * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
1794 dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
1795 gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
1797 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
1798 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
1799 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
1800 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
1801 jnrE = (jnrlistE>=0) ? jnrlistE : 0;
1802 jnrF = (jnrlistF>=0) ? jnrlistF : 0;
1803 jnrG = (jnrlistG>=0) ? jnrlistG : 0;
1804 jnrH = (jnrlistH>=0) ? jnrlistH : 0;
1805 j_coord_offsetA = DIM*jnrA;
1806 j_coord_offsetB = DIM*jnrB;
1807 j_coord_offsetC = DIM*jnrC;
1808 j_coord_offsetD = DIM*jnrD;
1809 j_coord_offsetE = DIM*jnrE;
1810 j_coord_offsetF = DIM*jnrF;
1811 j_coord_offsetG = DIM*jnrG;
1812 j_coord_offsetH = DIM*jnrH;
1814 /* load j atom coordinates */
1815 gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1816 x+j_coord_offsetC,x+j_coord_offsetD,
1817 x+j_coord_offsetE,x+j_coord_offsetF,
1818 x+j_coord_offsetG,x+j_coord_offsetH,
1819 &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1821 /* Calculate displacement vector */
1822 dx00 = _mm256_sub_ps(ix0,jx0);
1823 dy00 = _mm256_sub_ps(iy0,jy0);
1824 dz00 = _mm256_sub_ps(iz0,jz0);
1825 dx01 = _mm256_sub_ps(ix0,jx1);
1826 dy01 = _mm256_sub_ps(iy0,jy1);
1827 dz01 = _mm256_sub_ps(iz0,jz1);
1828 dx02 = _mm256_sub_ps(ix0,jx2);
1829 dy02 = _mm256_sub_ps(iy0,jy2);
1830 dz02 = _mm256_sub_ps(iz0,jz2);
1831 dx10 = _mm256_sub_ps(ix1,jx0);
1832 dy10 = _mm256_sub_ps(iy1,jy0);
1833 dz10 = _mm256_sub_ps(iz1,jz0);
1834 dx11 = _mm256_sub_ps(ix1,jx1);
1835 dy11 = _mm256_sub_ps(iy1,jy1);
1836 dz11 = _mm256_sub_ps(iz1,jz1);
1837 dx12 = _mm256_sub_ps(ix1,jx2);
1838 dy12 = _mm256_sub_ps(iy1,jy2);
1839 dz12 = _mm256_sub_ps(iz1,jz2);
1840 dx20 = _mm256_sub_ps(ix2,jx0);
1841 dy20 = _mm256_sub_ps(iy2,jy0);
1842 dz20 = _mm256_sub_ps(iz2,jz0);
1843 dx21 = _mm256_sub_ps(ix2,jx1);
1844 dy21 = _mm256_sub_ps(iy2,jy1);
1845 dz21 = _mm256_sub_ps(iz2,jz1);
1846 dx22 = _mm256_sub_ps(ix2,jx2);
1847 dy22 = _mm256_sub_ps(iy2,jy2);
1848 dz22 = _mm256_sub_ps(iz2,jz2);
1850 /* Calculate squared distance and things based on it */
1851 rsq00 = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1852 rsq01 = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1853 rsq02 = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1854 rsq10 = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1855 rsq11 = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1856 rsq12 = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1857 rsq20 = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1858 rsq21 = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1859 rsq22 = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1861 rinv00 = gmx_mm256_invsqrt_ps(rsq00);
1862 rinv01 = gmx_mm256_invsqrt_ps(rsq01);
1863 rinv02 = gmx_mm256_invsqrt_ps(rsq02);
1864 rinv10 = gmx_mm256_invsqrt_ps(rsq10);
1865 rinv11 = gmx_mm256_invsqrt_ps(rsq11);
1866 rinv12 = gmx_mm256_invsqrt_ps(rsq12);
1867 rinv20 = gmx_mm256_invsqrt_ps(rsq20);
1868 rinv21 = gmx_mm256_invsqrt_ps(rsq21);
1869 rinv22 = gmx_mm256_invsqrt_ps(rsq22);
1871 rinvsq00 = _mm256_mul_ps(rinv00,rinv00);
1872 rinvsq01 = _mm256_mul_ps(rinv01,rinv01);
1873 rinvsq02 = _mm256_mul_ps(rinv02,rinv02);
1874 rinvsq10 = _mm256_mul_ps(rinv10,rinv10);
1875 rinvsq11 = _mm256_mul_ps(rinv11,rinv11);
1876 rinvsq12 = _mm256_mul_ps(rinv12,rinv12);
1877 rinvsq20 = _mm256_mul_ps(rinv20,rinv20);
1878 rinvsq21 = _mm256_mul_ps(rinv21,rinv21);
1879 rinvsq22 = _mm256_mul_ps(rinv22,rinv22);
1881 fjx0 = _mm256_setzero_ps();
1882 fjy0 = _mm256_setzero_ps();
1883 fjz0 = _mm256_setzero_ps();
1884 fjx1 = _mm256_setzero_ps();
1885 fjy1 = _mm256_setzero_ps();
1886 fjz1 = _mm256_setzero_ps();
1887 fjx2 = _mm256_setzero_ps();
1888 fjy2 = _mm256_setzero_ps();
1889 fjz2 = _mm256_setzero_ps();
1891 /**************************
1892 * CALCULATE INTERACTIONS *
1893 **************************/
1895 if (gmx_mm256_any_lt(rsq00,rcutoff2))
1898 /* REACTION-FIELD ELECTROSTATICS */
1899 felec = _mm256_mul_ps(qq00,_mm256_sub_ps(_mm256_mul_ps(rinv00,rinvsq00),krf2));
1901 /* LENNARD-JONES DISPERSION/REPULSION */
1903 rinvsix = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1904 fvdw = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
1906 cutoff_mask = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1908 fscal = _mm256_add_ps(felec,fvdw);
1910 fscal = _mm256_and_ps(fscal,cutoff_mask);
1912 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1914 /* Calculate temporary vectorial force */
1915 tx = _mm256_mul_ps(fscal,dx00);
1916 ty = _mm256_mul_ps(fscal,dy00);
1917 tz = _mm256_mul_ps(fscal,dz00);
1919 /* Update vectorial force */
1920 fix0 = _mm256_add_ps(fix0,tx);
1921 fiy0 = _mm256_add_ps(fiy0,ty);
1922 fiz0 = _mm256_add_ps(fiz0,tz);
1924 fjx0 = _mm256_add_ps(fjx0,tx);
1925 fjy0 = _mm256_add_ps(fjy0,ty);
1926 fjz0 = _mm256_add_ps(fjz0,tz);
1930 /**************************
1931 * CALCULATE INTERACTIONS *
1932 **************************/
1934 if (gmx_mm256_any_lt(rsq01,rcutoff2))
1937 /* REACTION-FIELD ELECTROSTATICS */
1938 felec = _mm256_mul_ps(qq01,_mm256_sub_ps(_mm256_mul_ps(rinv01,rinvsq01),krf2));
1940 cutoff_mask = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
1944 fscal = _mm256_and_ps(fscal,cutoff_mask);
1946 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1948 /* Calculate temporary vectorial force */
1949 tx = _mm256_mul_ps(fscal,dx01);
1950 ty = _mm256_mul_ps(fscal,dy01);
1951 tz = _mm256_mul_ps(fscal,dz01);
1953 /* Update vectorial force */
1954 fix0 = _mm256_add_ps(fix0,tx);
1955 fiy0 = _mm256_add_ps(fiy0,ty);
1956 fiz0 = _mm256_add_ps(fiz0,tz);
1958 fjx1 = _mm256_add_ps(fjx1,tx);
1959 fjy1 = _mm256_add_ps(fjy1,ty);
1960 fjz1 = _mm256_add_ps(fjz1,tz);
1964 /**************************
1965 * CALCULATE INTERACTIONS *
1966 **************************/
1968 if (gmx_mm256_any_lt(rsq02,rcutoff2))
1971 /* REACTION-FIELD ELECTROSTATICS */
1972 felec = _mm256_mul_ps(qq02,_mm256_sub_ps(_mm256_mul_ps(rinv02,rinvsq02),krf2));
1974 cutoff_mask = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
1978 fscal = _mm256_and_ps(fscal,cutoff_mask);
1980 fscal = _mm256_andnot_ps(dummy_mask,fscal);
1982 /* Calculate temporary vectorial force */
1983 tx = _mm256_mul_ps(fscal,dx02);
1984 ty = _mm256_mul_ps(fscal,dy02);
1985 tz = _mm256_mul_ps(fscal,dz02);
1987 /* Update vectorial force */
1988 fix0 = _mm256_add_ps(fix0,tx);
1989 fiy0 = _mm256_add_ps(fiy0,ty);
1990 fiz0 = _mm256_add_ps(fiz0,tz);
1992 fjx2 = _mm256_add_ps(fjx2,tx);
1993 fjy2 = _mm256_add_ps(fjy2,ty);
1994 fjz2 = _mm256_add_ps(fjz2,tz);
1998 /**************************
1999 * CALCULATE INTERACTIONS *
2000 **************************/
2002 if (gmx_mm256_any_lt(rsq10,rcutoff2))
2005 /* REACTION-FIELD ELECTROSTATICS */
2006 felec = _mm256_mul_ps(qq10,_mm256_sub_ps(_mm256_mul_ps(rinv10,rinvsq10),krf2));
2008 cutoff_mask = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
2012 fscal = _mm256_and_ps(fscal,cutoff_mask);
2014 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2016 /* Calculate temporary vectorial force */
2017 tx = _mm256_mul_ps(fscal,dx10);
2018 ty = _mm256_mul_ps(fscal,dy10);
2019 tz = _mm256_mul_ps(fscal,dz10);
2021 /* Update vectorial force */
2022 fix1 = _mm256_add_ps(fix1,tx);
2023 fiy1 = _mm256_add_ps(fiy1,ty);
2024 fiz1 = _mm256_add_ps(fiz1,tz);
2026 fjx0 = _mm256_add_ps(fjx0,tx);
2027 fjy0 = _mm256_add_ps(fjy0,ty);
2028 fjz0 = _mm256_add_ps(fjz0,tz);
2032 /**************************
2033 * CALCULATE INTERACTIONS *
2034 **************************/
2036 if (gmx_mm256_any_lt(rsq11,rcutoff2))
2039 /* REACTION-FIELD ELECTROSTATICS */
2040 felec = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
2042 cutoff_mask = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2046 fscal = _mm256_and_ps(fscal,cutoff_mask);
2048 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2050 /* Calculate temporary vectorial force */
2051 tx = _mm256_mul_ps(fscal,dx11);
2052 ty = _mm256_mul_ps(fscal,dy11);
2053 tz = _mm256_mul_ps(fscal,dz11);
2055 /* Update vectorial force */
2056 fix1 = _mm256_add_ps(fix1,tx);
2057 fiy1 = _mm256_add_ps(fiy1,ty);
2058 fiz1 = _mm256_add_ps(fiz1,tz);
2060 fjx1 = _mm256_add_ps(fjx1,tx);
2061 fjy1 = _mm256_add_ps(fjy1,ty);
2062 fjz1 = _mm256_add_ps(fjz1,tz);
2066 /**************************
2067 * CALCULATE INTERACTIONS *
2068 **************************/
2070 if (gmx_mm256_any_lt(rsq12,rcutoff2))
2073 /* REACTION-FIELD ELECTROSTATICS */
2074 felec = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
2076 cutoff_mask = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2080 fscal = _mm256_and_ps(fscal,cutoff_mask);
2082 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2084 /* Calculate temporary vectorial force */
2085 tx = _mm256_mul_ps(fscal,dx12);
2086 ty = _mm256_mul_ps(fscal,dy12);
2087 tz = _mm256_mul_ps(fscal,dz12);
2089 /* Update vectorial force */
2090 fix1 = _mm256_add_ps(fix1,tx);
2091 fiy1 = _mm256_add_ps(fiy1,ty);
2092 fiz1 = _mm256_add_ps(fiz1,tz);
2094 fjx2 = _mm256_add_ps(fjx2,tx);
2095 fjy2 = _mm256_add_ps(fjy2,ty);
2096 fjz2 = _mm256_add_ps(fjz2,tz);
2100 /**************************
2101 * CALCULATE INTERACTIONS *
2102 **************************/
2104 if (gmx_mm256_any_lt(rsq20,rcutoff2))
2107 /* REACTION-FIELD ELECTROSTATICS */
2108 felec = _mm256_mul_ps(qq20,_mm256_sub_ps(_mm256_mul_ps(rinv20,rinvsq20),krf2));
2110 cutoff_mask = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
2114 fscal = _mm256_and_ps(fscal,cutoff_mask);
2116 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2118 /* Calculate temporary vectorial force */
2119 tx = _mm256_mul_ps(fscal,dx20);
2120 ty = _mm256_mul_ps(fscal,dy20);
2121 tz = _mm256_mul_ps(fscal,dz20);
2123 /* Update vectorial force */
2124 fix2 = _mm256_add_ps(fix2,tx);
2125 fiy2 = _mm256_add_ps(fiy2,ty);
2126 fiz2 = _mm256_add_ps(fiz2,tz);
2128 fjx0 = _mm256_add_ps(fjx0,tx);
2129 fjy0 = _mm256_add_ps(fjy0,ty);
2130 fjz0 = _mm256_add_ps(fjz0,tz);
2134 /**************************
2135 * CALCULATE INTERACTIONS *
2136 **************************/
2138 if (gmx_mm256_any_lt(rsq21,rcutoff2))
2141 /* REACTION-FIELD ELECTROSTATICS */
2142 felec = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
2144 cutoff_mask = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2148 fscal = _mm256_and_ps(fscal,cutoff_mask);
2150 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2152 /* Calculate temporary vectorial force */
2153 tx = _mm256_mul_ps(fscal,dx21);
2154 ty = _mm256_mul_ps(fscal,dy21);
2155 tz = _mm256_mul_ps(fscal,dz21);
2157 /* Update vectorial force */
2158 fix2 = _mm256_add_ps(fix2,tx);
2159 fiy2 = _mm256_add_ps(fiy2,ty);
2160 fiz2 = _mm256_add_ps(fiz2,tz);
2162 fjx1 = _mm256_add_ps(fjx1,tx);
2163 fjy1 = _mm256_add_ps(fjy1,ty);
2164 fjz1 = _mm256_add_ps(fjz1,tz);
2168 /**************************
2169 * CALCULATE INTERACTIONS *
2170 **************************/
2172 if (gmx_mm256_any_lt(rsq22,rcutoff2))
2175 /* REACTION-FIELD ELECTROSTATICS */
2176 felec = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
2178 cutoff_mask = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2182 fscal = _mm256_and_ps(fscal,cutoff_mask);
2184 fscal = _mm256_andnot_ps(dummy_mask,fscal);
2186 /* Calculate temporary vectorial force */
2187 tx = _mm256_mul_ps(fscal,dx22);
2188 ty = _mm256_mul_ps(fscal,dy22);
2189 tz = _mm256_mul_ps(fscal,dz22);
2191 /* Update vectorial force */
2192 fix2 = _mm256_add_ps(fix2,tx);
2193 fiy2 = _mm256_add_ps(fiy2,ty);
2194 fiz2 = _mm256_add_ps(fiz2,tz);
2196 fjx2 = _mm256_add_ps(fjx2,tx);
2197 fjy2 = _mm256_add_ps(fjy2,ty);
2198 fjz2 = _mm256_add_ps(fjz2,tz);
2202 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2203 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2204 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2205 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2206 fjptrE = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2207 fjptrF = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2208 fjptrG = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2209 fjptrH = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2211 gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2212 fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2214 /* Inner loop uses 277 flops */
2217 /* End of innermost loop */
2219 gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
2220 f+i_coord_offset,fshift+i_shift_offset);
2222 /* Increment number of inner iterations */
2223 inneriter += j_index_end - j_index_start;
2225 /* Outer loop uses 18 flops */
2228 /* Increment number of outer iterations */
2231 /* Update outer/inner flops */
2233 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*277);