2 * This file is part of the GROMACS molecular simulation package.
4 * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
36 * Note: this file was generated by the GROMACS avx_256_double kernel generator.
44 #include "../nb_kernel.h"
45 #include "gromacs/legacyheaders/types/simple.h"
46 #include "gromacs/math/vec.h"
47 #include "gromacs/legacyheaders/nrnb.h"
49 #include "gromacs/simd/math_x86_avx_256_double.h"
50 #include "kernelutil_x86_avx_256_double.h"
53 * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwNone_GeomW4W4_VF_avx_256_double
54 * Electrostatics interaction: Coulomb
55 * VdW interaction: None
56 * Geometry: Water4-Water4
57 * Calculate force/pot: PotentialAndForce
60 nb_kernel_ElecCoul_VdwNone_GeomW4W4_VF_avx_256_double
61 (t_nblist * gmx_restrict nlist,
62 rvec * gmx_restrict xx,
63 rvec * gmx_restrict ff,
64 t_forcerec * gmx_restrict fr,
65 t_mdatoms * gmx_restrict mdatoms,
66 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67 t_nrnb * gmx_restrict nrnb)
69 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
70 * just 0 for non-waters.
71 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
72 * jnr indices corresponding to data put in the four positions in the SIMD register.
74 int i_shift_offset,i_coord_offset,outeriter,inneriter;
75 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76 int jnrA,jnrB,jnrC,jnrD;
77 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
78 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
79 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
80 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
82 real *shiftvec,*fshift,*x,*f;
83 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
85 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86 real * vdwioffsetptr1;
87 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
88 real * vdwioffsetptr2;
89 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
90 real * vdwioffsetptr3;
91 __m256d ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
92 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
93 __m256d jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
94 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
95 __m256d jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
96 int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
97 __m256d jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
98 __m256d dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
99 __m256d dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
100 __m256d dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
101 __m256d dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
102 __m256d dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
103 __m256d dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
104 __m256d dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
105 __m256d dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
106 __m256d dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
107 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
109 __m256d dummy_mask,cutoff_mask;
110 __m128 tmpmask0,tmpmask1;
111 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
112 __m256d one = _mm256_set1_pd(1.0);
113 __m256d two = _mm256_set1_pd(2.0);
119 jindex = nlist->jindex;
121 shiftidx = nlist->shift;
123 shiftvec = fr->shift_vec[0];
124 fshift = fr->fshift[0];
125 facel = _mm256_set1_pd(fr->epsfac);
126 charge = mdatoms->chargeA;
128 /* Setup water-specific parameters */
129 inr = nlist->iinr[0];
130 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
131 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
132 iq3 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
134 jq1 = _mm256_set1_pd(charge[inr+1]);
135 jq2 = _mm256_set1_pd(charge[inr+2]);
136 jq3 = _mm256_set1_pd(charge[inr+3]);
137 qq11 = _mm256_mul_pd(iq1,jq1);
138 qq12 = _mm256_mul_pd(iq1,jq2);
139 qq13 = _mm256_mul_pd(iq1,jq3);
140 qq21 = _mm256_mul_pd(iq2,jq1);
141 qq22 = _mm256_mul_pd(iq2,jq2);
142 qq23 = _mm256_mul_pd(iq2,jq3);
143 qq31 = _mm256_mul_pd(iq3,jq1);
144 qq32 = _mm256_mul_pd(iq3,jq2);
145 qq33 = _mm256_mul_pd(iq3,jq3);
147 /* Avoid stupid compiler warnings */
148 jnrA = jnrB = jnrC = jnrD = 0;
157 for(iidx=0;iidx<4*DIM;iidx++)
162 /* Start outer loop over neighborlists */
163 for(iidx=0; iidx<nri; iidx++)
165 /* Load shift vector for this list */
166 i_shift_offset = DIM*shiftidx[iidx];
168 /* Load limits for loop over neighbors */
169 j_index_start = jindex[iidx];
170 j_index_end = jindex[iidx+1];
172 /* Get outer coordinate index */
174 i_coord_offset = DIM*inr;
176 /* Load i particle coords and add shift vector */
177 gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
178 &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
180 fix1 = _mm256_setzero_pd();
181 fiy1 = _mm256_setzero_pd();
182 fiz1 = _mm256_setzero_pd();
183 fix2 = _mm256_setzero_pd();
184 fiy2 = _mm256_setzero_pd();
185 fiz2 = _mm256_setzero_pd();
186 fix3 = _mm256_setzero_pd();
187 fiy3 = _mm256_setzero_pd();
188 fiz3 = _mm256_setzero_pd();
190 /* Reset potential sums */
191 velecsum = _mm256_setzero_pd();
193 /* Start inner kernel loop */
194 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
197 /* Get j neighbor index, and coordinate index */
202 j_coord_offsetA = DIM*jnrA;
203 j_coord_offsetB = DIM*jnrB;
204 j_coord_offsetC = DIM*jnrC;
205 j_coord_offsetD = DIM*jnrD;
207 /* load j atom coordinates */
208 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
209 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
210 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
212 /* Calculate displacement vector */
213 dx11 = _mm256_sub_pd(ix1,jx1);
214 dy11 = _mm256_sub_pd(iy1,jy1);
215 dz11 = _mm256_sub_pd(iz1,jz1);
216 dx12 = _mm256_sub_pd(ix1,jx2);
217 dy12 = _mm256_sub_pd(iy1,jy2);
218 dz12 = _mm256_sub_pd(iz1,jz2);
219 dx13 = _mm256_sub_pd(ix1,jx3);
220 dy13 = _mm256_sub_pd(iy1,jy3);
221 dz13 = _mm256_sub_pd(iz1,jz3);
222 dx21 = _mm256_sub_pd(ix2,jx1);
223 dy21 = _mm256_sub_pd(iy2,jy1);
224 dz21 = _mm256_sub_pd(iz2,jz1);
225 dx22 = _mm256_sub_pd(ix2,jx2);
226 dy22 = _mm256_sub_pd(iy2,jy2);
227 dz22 = _mm256_sub_pd(iz2,jz2);
228 dx23 = _mm256_sub_pd(ix2,jx3);
229 dy23 = _mm256_sub_pd(iy2,jy3);
230 dz23 = _mm256_sub_pd(iz2,jz3);
231 dx31 = _mm256_sub_pd(ix3,jx1);
232 dy31 = _mm256_sub_pd(iy3,jy1);
233 dz31 = _mm256_sub_pd(iz3,jz1);
234 dx32 = _mm256_sub_pd(ix3,jx2);
235 dy32 = _mm256_sub_pd(iy3,jy2);
236 dz32 = _mm256_sub_pd(iz3,jz2);
237 dx33 = _mm256_sub_pd(ix3,jx3);
238 dy33 = _mm256_sub_pd(iy3,jy3);
239 dz33 = _mm256_sub_pd(iz3,jz3);
241 /* Calculate squared distance and things based on it */
242 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
243 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
244 rsq13 = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
245 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
246 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
247 rsq23 = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
248 rsq31 = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
249 rsq32 = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
250 rsq33 = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
252 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
253 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
254 rinv13 = gmx_mm256_invsqrt_pd(rsq13);
255 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
256 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
257 rinv23 = gmx_mm256_invsqrt_pd(rsq23);
258 rinv31 = gmx_mm256_invsqrt_pd(rsq31);
259 rinv32 = gmx_mm256_invsqrt_pd(rsq32);
260 rinv33 = gmx_mm256_invsqrt_pd(rsq33);
262 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
263 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
264 rinvsq13 = _mm256_mul_pd(rinv13,rinv13);
265 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
266 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
267 rinvsq23 = _mm256_mul_pd(rinv23,rinv23);
268 rinvsq31 = _mm256_mul_pd(rinv31,rinv31);
269 rinvsq32 = _mm256_mul_pd(rinv32,rinv32);
270 rinvsq33 = _mm256_mul_pd(rinv33,rinv33);
272 fjx1 = _mm256_setzero_pd();
273 fjy1 = _mm256_setzero_pd();
274 fjz1 = _mm256_setzero_pd();
275 fjx2 = _mm256_setzero_pd();
276 fjy2 = _mm256_setzero_pd();
277 fjz2 = _mm256_setzero_pd();
278 fjx3 = _mm256_setzero_pd();
279 fjy3 = _mm256_setzero_pd();
280 fjz3 = _mm256_setzero_pd();
282 /**************************
283 * CALCULATE INTERACTIONS *
284 **************************/
286 /* COULOMB ELECTROSTATICS */
287 velec = _mm256_mul_pd(qq11,rinv11);
288 felec = _mm256_mul_pd(velec,rinvsq11);
290 /* Update potential sum for this i atom from the interaction with this j atom. */
291 velecsum = _mm256_add_pd(velecsum,velec);
295 /* Calculate temporary vectorial force */
296 tx = _mm256_mul_pd(fscal,dx11);
297 ty = _mm256_mul_pd(fscal,dy11);
298 tz = _mm256_mul_pd(fscal,dz11);
300 /* Update vectorial force */
301 fix1 = _mm256_add_pd(fix1,tx);
302 fiy1 = _mm256_add_pd(fiy1,ty);
303 fiz1 = _mm256_add_pd(fiz1,tz);
305 fjx1 = _mm256_add_pd(fjx1,tx);
306 fjy1 = _mm256_add_pd(fjy1,ty);
307 fjz1 = _mm256_add_pd(fjz1,tz);
309 /**************************
310 * CALCULATE INTERACTIONS *
311 **************************/
313 /* COULOMB ELECTROSTATICS */
314 velec = _mm256_mul_pd(qq12,rinv12);
315 felec = _mm256_mul_pd(velec,rinvsq12);
317 /* Update potential sum for this i atom from the interaction with this j atom. */
318 velecsum = _mm256_add_pd(velecsum,velec);
322 /* Calculate temporary vectorial force */
323 tx = _mm256_mul_pd(fscal,dx12);
324 ty = _mm256_mul_pd(fscal,dy12);
325 tz = _mm256_mul_pd(fscal,dz12);
327 /* Update vectorial force */
328 fix1 = _mm256_add_pd(fix1,tx);
329 fiy1 = _mm256_add_pd(fiy1,ty);
330 fiz1 = _mm256_add_pd(fiz1,tz);
332 fjx2 = _mm256_add_pd(fjx2,tx);
333 fjy2 = _mm256_add_pd(fjy2,ty);
334 fjz2 = _mm256_add_pd(fjz2,tz);
336 /**************************
337 * CALCULATE INTERACTIONS *
338 **************************/
340 /* COULOMB ELECTROSTATICS */
341 velec = _mm256_mul_pd(qq13,rinv13);
342 felec = _mm256_mul_pd(velec,rinvsq13);
344 /* Update potential sum for this i atom from the interaction with this j atom. */
345 velecsum = _mm256_add_pd(velecsum,velec);
349 /* Calculate temporary vectorial force */
350 tx = _mm256_mul_pd(fscal,dx13);
351 ty = _mm256_mul_pd(fscal,dy13);
352 tz = _mm256_mul_pd(fscal,dz13);
354 /* Update vectorial force */
355 fix1 = _mm256_add_pd(fix1,tx);
356 fiy1 = _mm256_add_pd(fiy1,ty);
357 fiz1 = _mm256_add_pd(fiz1,tz);
359 fjx3 = _mm256_add_pd(fjx3,tx);
360 fjy3 = _mm256_add_pd(fjy3,ty);
361 fjz3 = _mm256_add_pd(fjz3,tz);
363 /**************************
364 * CALCULATE INTERACTIONS *
365 **************************/
367 /* COULOMB ELECTROSTATICS */
368 velec = _mm256_mul_pd(qq21,rinv21);
369 felec = _mm256_mul_pd(velec,rinvsq21);
371 /* Update potential sum for this i atom from the interaction with this j atom. */
372 velecsum = _mm256_add_pd(velecsum,velec);
376 /* Calculate temporary vectorial force */
377 tx = _mm256_mul_pd(fscal,dx21);
378 ty = _mm256_mul_pd(fscal,dy21);
379 tz = _mm256_mul_pd(fscal,dz21);
381 /* Update vectorial force */
382 fix2 = _mm256_add_pd(fix2,tx);
383 fiy2 = _mm256_add_pd(fiy2,ty);
384 fiz2 = _mm256_add_pd(fiz2,tz);
386 fjx1 = _mm256_add_pd(fjx1,tx);
387 fjy1 = _mm256_add_pd(fjy1,ty);
388 fjz1 = _mm256_add_pd(fjz1,tz);
390 /**************************
391 * CALCULATE INTERACTIONS *
392 **************************/
394 /* COULOMB ELECTROSTATICS */
395 velec = _mm256_mul_pd(qq22,rinv22);
396 felec = _mm256_mul_pd(velec,rinvsq22);
398 /* Update potential sum for this i atom from the interaction with this j atom. */
399 velecsum = _mm256_add_pd(velecsum,velec);
403 /* Calculate temporary vectorial force */
404 tx = _mm256_mul_pd(fscal,dx22);
405 ty = _mm256_mul_pd(fscal,dy22);
406 tz = _mm256_mul_pd(fscal,dz22);
408 /* Update vectorial force */
409 fix2 = _mm256_add_pd(fix2,tx);
410 fiy2 = _mm256_add_pd(fiy2,ty);
411 fiz2 = _mm256_add_pd(fiz2,tz);
413 fjx2 = _mm256_add_pd(fjx2,tx);
414 fjy2 = _mm256_add_pd(fjy2,ty);
415 fjz2 = _mm256_add_pd(fjz2,tz);
417 /**************************
418 * CALCULATE INTERACTIONS *
419 **************************/
421 /* COULOMB ELECTROSTATICS */
422 velec = _mm256_mul_pd(qq23,rinv23);
423 felec = _mm256_mul_pd(velec,rinvsq23);
425 /* Update potential sum for this i atom from the interaction with this j atom. */
426 velecsum = _mm256_add_pd(velecsum,velec);
430 /* Calculate temporary vectorial force */
431 tx = _mm256_mul_pd(fscal,dx23);
432 ty = _mm256_mul_pd(fscal,dy23);
433 tz = _mm256_mul_pd(fscal,dz23);
435 /* Update vectorial force */
436 fix2 = _mm256_add_pd(fix2,tx);
437 fiy2 = _mm256_add_pd(fiy2,ty);
438 fiz2 = _mm256_add_pd(fiz2,tz);
440 fjx3 = _mm256_add_pd(fjx3,tx);
441 fjy3 = _mm256_add_pd(fjy3,ty);
442 fjz3 = _mm256_add_pd(fjz3,tz);
444 /**************************
445 * CALCULATE INTERACTIONS *
446 **************************/
448 /* COULOMB ELECTROSTATICS */
449 velec = _mm256_mul_pd(qq31,rinv31);
450 felec = _mm256_mul_pd(velec,rinvsq31);
452 /* Update potential sum for this i atom from the interaction with this j atom. */
453 velecsum = _mm256_add_pd(velecsum,velec);
457 /* Calculate temporary vectorial force */
458 tx = _mm256_mul_pd(fscal,dx31);
459 ty = _mm256_mul_pd(fscal,dy31);
460 tz = _mm256_mul_pd(fscal,dz31);
462 /* Update vectorial force */
463 fix3 = _mm256_add_pd(fix3,tx);
464 fiy3 = _mm256_add_pd(fiy3,ty);
465 fiz3 = _mm256_add_pd(fiz3,tz);
467 fjx1 = _mm256_add_pd(fjx1,tx);
468 fjy1 = _mm256_add_pd(fjy1,ty);
469 fjz1 = _mm256_add_pd(fjz1,tz);
471 /**************************
472 * CALCULATE INTERACTIONS *
473 **************************/
475 /* COULOMB ELECTROSTATICS */
476 velec = _mm256_mul_pd(qq32,rinv32);
477 felec = _mm256_mul_pd(velec,rinvsq32);
479 /* Update potential sum for this i atom from the interaction with this j atom. */
480 velecsum = _mm256_add_pd(velecsum,velec);
484 /* Calculate temporary vectorial force */
485 tx = _mm256_mul_pd(fscal,dx32);
486 ty = _mm256_mul_pd(fscal,dy32);
487 tz = _mm256_mul_pd(fscal,dz32);
489 /* Update vectorial force */
490 fix3 = _mm256_add_pd(fix3,tx);
491 fiy3 = _mm256_add_pd(fiy3,ty);
492 fiz3 = _mm256_add_pd(fiz3,tz);
494 fjx2 = _mm256_add_pd(fjx2,tx);
495 fjy2 = _mm256_add_pd(fjy2,ty);
496 fjz2 = _mm256_add_pd(fjz2,tz);
498 /**************************
499 * CALCULATE INTERACTIONS *
500 **************************/
502 /* COULOMB ELECTROSTATICS */
503 velec = _mm256_mul_pd(qq33,rinv33);
504 felec = _mm256_mul_pd(velec,rinvsq33);
506 /* Update potential sum for this i atom from the interaction with this j atom. */
507 velecsum = _mm256_add_pd(velecsum,velec);
511 /* Calculate temporary vectorial force */
512 tx = _mm256_mul_pd(fscal,dx33);
513 ty = _mm256_mul_pd(fscal,dy33);
514 tz = _mm256_mul_pd(fscal,dz33);
516 /* Update vectorial force */
517 fix3 = _mm256_add_pd(fix3,tx);
518 fiy3 = _mm256_add_pd(fiy3,ty);
519 fiz3 = _mm256_add_pd(fiz3,tz);
521 fjx3 = _mm256_add_pd(fjx3,tx);
522 fjy3 = _mm256_add_pd(fjy3,ty);
523 fjz3 = _mm256_add_pd(fjz3,tz);
525 fjptrA = f+j_coord_offsetA;
526 fjptrB = f+j_coord_offsetB;
527 fjptrC = f+j_coord_offsetC;
528 fjptrD = f+j_coord_offsetD;
530 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
531 fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
533 /* Inner loop uses 243 flops */
539 /* Get j neighbor index, and coordinate index */
540 jnrlistA = jjnr[jidx];
541 jnrlistB = jjnr[jidx+1];
542 jnrlistC = jjnr[jidx+2];
543 jnrlistD = jjnr[jidx+3];
544 /* Sign of each element will be negative for non-real atoms.
545 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
546 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
548 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
550 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
551 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
552 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
554 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
555 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
556 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
557 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
558 j_coord_offsetA = DIM*jnrA;
559 j_coord_offsetB = DIM*jnrB;
560 j_coord_offsetC = DIM*jnrC;
561 j_coord_offsetD = DIM*jnrD;
563 /* load j atom coordinates */
564 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
565 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
566 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
568 /* Calculate displacement vector */
569 dx11 = _mm256_sub_pd(ix1,jx1);
570 dy11 = _mm256_sub_pd(iy1,jy1);
571 dz11 = _mm256_sub_pd(iz1,jz1);
572 dx12 = _mm256_sub_pd(ix1,jx2);
573 dy12 = _mm256_sub_pd(iy1,jy2);
574 dz12 = _mm256_sub_pd(iz1,jz2);
575 dx13 = _mm256_sub_pd(ix1,jx3);
576 dy13 = _mm256_sub_pd(iy1,jy3);
577 dz13 = _mm256_sub_pd(iz1,jz3);
578 dx21 = _mm256_sub_pd(ix2,jx1);
579 dy21 = _mm256_sub_pd(iy2,jy1);
580 dz21 = _mm256_sub_pd(iz2,jz1);
581 dx22 = _mm256_sub_pd(ix2,jx2);
582 dy22 = _mm256_sub_pd(iy2,jy2);
583 dz22 = _mm256_sub_pd(iz2,jz2);
584 dx23 = _mm256_sub_pd(ix2,jx3);
585 dy23 = _mm256_sub_pd(iy2,jy3);
586 dz23 = _mm256_sub_pd(iz2,jz3);
587 dx31 = _mm256_sub_pd(ix3,jx1);
588 dy31 = _mm256_sub_pd(iy3,jy1);
589 dz31 = _mm256_sub_pd(iz3,jz1);
590 dx32 = _mm256_sub_pd(ix3,jx2);
591 dy32 = _mm256_sub_pd(iy3,jy2);
592 dz32 = _mm256_sub_pd(iz3,jz2);
593 dx33 = _mm256_sub_pd(ix3,jx3);
594 dy33 = _mm256_sub_pd(iy3,jy3);
595 dz33 = _mm256_sub_pd(iz3,jz3);
597 /* Calculate squared distance and things based on it */
598 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
599 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
600 rsq13 = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
601 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
602 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
603 rsq23 = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
604 rsq31 = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
605 rsq32 = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
606 rsq33 = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
608 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
609 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
610 rinv13 = gmx_mm256_invsqrt_pd(rsq13);
611 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
612 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
613 rinv23 = gmx_mm256_invsqrt_pd(rsq23);
614 rinv31 = gmx_mm256_invsqrt_pd(rsq31);
615 rinv32 = gmx_mm256_invsqrt_pd(rsq32);
616 rinv33 = gmx_mm256_invsqrt_pd(rsq33);
618 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
619 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
620 rinvsq13 = _mm256_mul_pd(rinv13,rinv13);
621 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
622 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
623 rinvsq23 = _mm256_mul_pd(rinv23,rinv23);
624 rinvsq31 = _mm256_mul_pd(rinv31,rinv31);
625 rinvsq32 = _mm256_mul_pd(rinv32,rinv32);
626 rinvsq33 = _mm256_mul_pd(rinv33,rinv33);
628 fjx1 = _mm256_setzero_pd();
629 fjy1 = _mm256_setzero_pd();
630 fjz1 = _mm256_setzero_pd();
631 fjx2 = _mm256_setzero_pd();
632 fjy2 = _mm256_setzero_pd();
633 fjz2 = _mm256_setzero_pd();
634 fjx3 = _mm256_setzero_pd();
635 fjy3 = _mm256_setzero_pd();
636 fjz3 = _mm256_setzero_pd();
638 /**************************
639 * CALCULATE INTERACTIONS *
640 **************************/
642 /* COULOMB ELECTROSTATICS */
643 velec = _mm256_mul_pd(qq11,rinv11);
644 felec = _mm256_mul_pd(velec,rinvsq11);
646 /* Update potential sum for this i atom from the interaction with this j atom. */
647 velec = _mm256_andnot_pd(dummy_mask,velec);
648 velecsum = _mm256_add_pd(velecsum,velec);
652 fscal = _mm256_andnot_pd(dummy_mask,fscal);
654 /* Calculate temporary vectorial force */
655 tx = _mm256_mul_pd(fscal,dx11);
656 ty = _mm256_mul_pd(fscal,dy11);
657 tz = _mm256_mul_pd(fscal,dz11);
659 /* Update vectorial force */
660 fix1 = _mm256_add_pd(fix1,tx);
661 fiy1 = _mm256_add_pd(fiy1,ty);
662 fiz1 = _mm256_add_pd(fiz1,tz);
664 fjx1 = _mm256_add_pd(fjx1,tx);
665 fjy1 = _mm256_add_pd(fjy1,ty);
666 fjz1 = _mm256_add_pd(fjz1,tz);
668 /**************************
669 * CALCULATE INTERACTIONS *
670 **************************/
672 /* COULOMB ELECTROSTATICS */
673 velec = _mm256_mul_pd(qq12,rinv12);
674 felec = _mm256_mul_pd(velec,rinvsq12);
676 /* Update potential sum for this i atom from the interaction with this j atom. */
677 velec = _mm256_andnot_pd(dummy_mask,velec);
678 velecsum = _mm256_add_pd(velecsum,velec);
682 fscal = _mm256_andnot_pd(dummy_mask,fscal);
684 /* Calculate temporary vectorial force */
685 tx = _mm256_mul_pd(fscal,dx12);
686 ty = _mm256_mul_pd(fscal,dy12);
687 tz = _mm256_mul_pd(fscal,dz12);
689 /* Update vectorial force */
690 fix1 = _mm256_add_pd(fix1,tx);
691 fiy1 = _mm256_add_pd(fiy1,ty);
692 fiz1 = _mm256_add_pd(fiz1,tz);
694 fjx2 = _mm256_add_pd(fjx2,tx);
695 fjy2 = _mm256_add_pd(fjy2,ty);
696 fjz2 = _mm256_add_pd(fjz2,tz);
698 /**************************
699 * CALCULATE INTERACTIONS *
700 **************************/
702 /* COULOMB ELECTROSTATICS */
703 velec = _mm256_mul_pd(qq13,rinv13);
704 felec = _mm256_mul_pd(velec,rinvsq13);
706 /* Update potential sum for this i atom from the interaction with this j atom. */
707 velec = _mm256_andnot_pd(dummy_mask,velec);
708 velecsum = _mm256_add_pd(velecsum,velec);
712 fscal = _mm256_andnot_pd(dummy_mask,fscal);
714 /* Calculate temporary vectorial force */
715 tx = _mm256_mul_pd(fscal,dx13);
716 ty = _mm256_mul_pd(fscal,dy13);
717 tz = _mm256_mul_pd(fscal,dz13);
719 /* Update vectorial force */
720 fix1 = _mm256_add_pd(fix1,tx);
721 fiy1 = _mm256_add_pd(fiy1,ty);
722 fiz1 = _mm256_add_pd(fiz1,tz);
724 fjx3 = _mm256_add_pd(fjx3,tx);
725 fjy3 = _mm256_add_pd(fjy3,ty);
726 fjz3 = _mm256_add_pd(fjz3,tz);
728 /**************************
729 * CALCULATE INTERACTIONS *
730 **************************/
732 /* COULOMB ELECTROSTATICS */
733 velec = _mm256_mul_pd(qq21,rinv21);
734 felec = _mm256_mul_pd(velec,rinvsq21);
736 /* Update potential sum for this i atom from the interaction with this j atom. */
737 velec = _mm256_andnot_pd(dummy_mask,velec);
738 velecsum = _mm256_add_pd(velecsum,velec);
742 fscal = _mm256_andnot_pd(dummy_mask,fscal);
744 /* Calculate temporary vectorial force */
745 tx = _mm256_mul_pd(fscal,dx21);
746 ty = _mm256_mul_pd(fscal,dy21);
747 tz = _mm256_mul_pd(fscal,dz21);
749 /* Update vectorial force */
750 fix2 = _mm256_add_pd(fix2,tx);
751 fiy2 = _mm256_add_pd(fiy2,ty);
752 fiz2 = _mm256_add_pd(fiz2,tz);
754 fjx1 = _mm256_add_pd(fjx1,tx);
755 fjy1 = _mm256_add_pd(fjy1,ty);
756 fjz1 = _mm256_add_pd(fjz1,tz);
758 /**************************
759 * CALCULATE INTERACTIONS *
760 **************************/
762 /* COULOMB ELECTROSTATICS */
763 velec = _mm256_mul_pd(qq22,rinv22);
764 felec = _mm256_mul_pd(velec,rinvsq22);
766 /* Update potential sum for this i atom from the interaction with this j atom. */
767 velec = _mm256_andnot_pd(dummy_mask,velec);
768 velecsum = _mm256_add_pd(velecsum,velec);
772 fscal = _mm256_andnot_pd(dummy_mask,fscal);
774 /* Calculate temporary vectorial force */
775 tx = _mm256_mul_pd(fscal,dx22);
776 ty = _mm256_mul_pd(fscal,dy22);
777 tz = _mm256_mul_pd(fscal,dz22);
779 /* Update vectorial force */
780 fix2 = _mm256_add_pd(fix2,tx);
781 fiy2 = _mm256_add_pd(fiy2,ty);
782 fiz2 = _mm256_add_pd(fiz2,tz);
784 fjx2 = _mm256_add_pd(fjx2,tx);
785 fjy2 = _mm256_add_pd(fjy2,ty);
786 fjz2 = _mm256_add_pd(fjz2,tz);
788 /**************************
789 * CALCULATE INTERACTIONS *
790 **************************/
792 /* COULOMB ELECTROSTATICS */
793 velec = _mm256_mul_pd(qq23,rinv23);
794 felec = _mm256_mul_pd(velec,rinvsq23);
796 /* Update potential sum for this i atom from the interaction with this j atom. */
797 velec = _mm256_andnot_pd(dummy_mask,velec);
798 velecsum = _mm256_add_pd(velecsum,velec);
802 fscal = _mm256_andnot_pd(dummy_mask,fscal);
804 /* Calculate temporary vectorial force */
805 tx = _mm256_mul_pd(fscal,dx23);
806 ty = _mm256_mul_pd(fscal,dy23);
807 tz = _mm256_mul_pd(fscal,dz23);
809 /* Update vectorial force */
810 fix2 = _mm256_add_pd(fix2,tx);
811 fiy2 = _mm256_add_pd(fiy2,ty);
812 fiz2 = _mm256_add_pd(fiz2,tz);
814 fjx3 = _mm256_add_pd(fjx3,tx);
815 fjy3 = _mm256_add_pd(fjy3,ty);
816 fjz3 = _mm256_add_pd(fjz3,tz);
818 /**************************
819 * CALCULATE INTERACTIONS *
820 **************************/
822 /* COULOMB ELECTROSTATICS */
823 velec = _mm256_mul_pd(qq31,rinv31);
824 felec = _mm256_mul_pd(velec,rinvsq31);
826 /* Update potential sum for this i atom from the interaction with this j atom. */
827 velec = _mm256_andnot_pd(dummy_mask,velec);
828 velecsum = _mm256_add_pd(velecsum,velec);
832 fscal = _mm256_andnot_pd(dummy_mask,fscal);
834 /* Calculate temporary vectorial force */
835 tx = _mm256_mul_pd(fscal,dx31);
836 ty = _mm256_mul_pd(fscal,dy31);
837 tz = _mm256_mul_pd(fscal,dz31);
839 /* Update vectorial force */
840 fix3 = _mm256_add_pd(fix3,tx);
841 fiy3 = _mm256_add_pd(fiy3,ty);
842 fiz3 = _mm256_add_pd(fiz3,tz);
844 fjx1 = _mm256_add_pd(fjx1,tx);
845 fjy1 = _mm256_add_pd(fjy1,ty);
846 fjz1 = _mm256_add_pd(fjz1,tz);
848 /**************************
849 * CALCULATE INTERACTIONS *
850 **************************/
852 /* COULOMB ELECTROSTATICS */
853 velec = _mm256_mul_pd(qq32,rinv32);
854 felec = _mm256_mul_pd(velec,rinvsq32);
856 /* Update potential sum for this i atom from the interaction with this j atom. */
857 velec = _mm256_andnot_pd(dummy_mask,velec);
858 velecsum = _mm256_add_pd(velecsum,velec);
862 fscal = _mm256_andnot_pd(dummy_mask,fscal);
864 /* Calculate temporary vectorial force */
865 tx = _mm256_mul_pd(fscal,dx32);
866 ty = _mm256_mul_pd(fscal,dy32);
867 tz = _mm256_mul_pd(fscal,dz32);
869 /* Update vectorial force */
870 fix3 = _mm256_add_pd(fix3,tx);
871 fiy3 = _mm256_add_pd(fiy3,ty);
872 fiz3 = _mm256_add_pd(fiz3,tz);
874 fjx2 = _mm256_add_pd(fjx2,tx);
875 fjy2 = _mm256_add_pd(fjy2,ty);
876 fjz2 = _mm256_add_pd(fjz2,tz);
878 /**************************
879 * CALCULATE INTERACTIONS *
880 **************************/
882 /* COULOMB ELECTROSTATICS */
883 velec = _mm256_mul_pd(qq33,rinv33);
884 felec = _mm256_mul_pd(velec,rinvsq33);
886 /* Update potential sum for this i atom from the interaction with this j atom. */
887 velec = _mm256_andnot_pd(dummy_mask,velec);
888 velecsum = _mm256_add_pd(velecsum,velec);
892 fscal = _mm256_andnot_pd(dummy_mask,fscal);
894 /* Calculate temporary vectorial force */
895 tx = _mm256_mul_pd(fscal,dx33);
896 ty = _mm256_mul_pd(fscal,dy33);
897 tz = _mm256_mul_pd(fscal,dz33);
899 /* Update vectorial force */
900 fix3 = _mm256_add_pd(fix3,tx);
901 fiy3 = _mm256_add_pd(fiy3,ty);
902 fiz3 = _mm256_add_pd(fiz3,tz);
904 fjx3 = _mm256_add_pd(fjx3,tx);
905 fjy3 = _mm256_add_pd(fjy3,ty);
906 fjz3 = _mm256_add_pd(fjz3,tz);
908 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
909 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
910 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
911 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
913 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
914 fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
916 /* Inner loop uses 243 flops */
919 /* End of innermost loop */
921 gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
922 f+i_coord_offset+DIM,fshift+i_shift_offset);
925 /* Update potential energies */
926 gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
928 /* Increment number of inner iterations */
929 inneriter += j_index_end - j_index_start;
931 /* Outer loop uses 19 flops */
934 /* Increment number of outer iterations */
937 /* Update outer/inner flops */
939 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_VF,outeriter*19 + inneriter*243);
942 * Gromacs nonbonded kernel: nb_kernel_ElecCoul_VdwNone_GeomW4W4_F_avx_256_double
943 * Electrostatics interaction: Coulomb
944 * VdW interaction: None
945 * Geometry: Water4-Water4
946 * Calculate force/pot: Force
949 nb_kernel_ElecCoul_VdwNone_GeomW4W4_F_avx_256_double
950 (t_nblist * gmx_restrict nlist,
951 rvec * gmx_restrict xx,
952 rvec * gmx_restrict ff,
953 t_forcerec * gmx_restrict fr,
954 t_mdatoms * gmx_restrict mdatoms,
955 nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
956 t_nrnb * gmx_restrict nrnb)
958 /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or
959 * just 0 for non-waters.
960 * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
961 * jnr indices corresponding to data put in the four positions in the SIMD register.
963 int i_shift_offset,i_coord_offset,outeriter,inneriter;
964 int j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
965 int jnrA,jnrB,jnrC,jnrD;
966 int jnrlistA,jnrlistB,jnrlistC,jnrlistD;
967 int jnrlistE,jnrlistF,jnrlistG,jnrlistH;
968 int j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
969 int *iinr,*jindex,*jjnr,*shiftidx,*gid;
971 real *shiftvec,*fshift,*x,*f;
972 real *fjptrA,*fjptrB,*fjptrC,*fjptrD;
974 __m256d tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
975 real * vdwioffsetptr1;
976 __m256d ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
977 real * vdwioffsetptr2;
978 __m256d ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
979 real * vdwioffsetptr3;
980 __m256d ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
981 int vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
982 __m256d jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
983 int vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
984 __m256d jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
985 int vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
986 __m256d jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
987 __m256d dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
988 __m256d dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
989 __m256d dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
990 __m256d dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
991 __m256d dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
992 __m256d dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
993 __m256d dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
994 __m256d dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
995 __m256d dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
996 __m256d velec,felec,velecsum,facel,crf,krf,krf2;
998 __m256d dummy_mask,cutoff_mask;
999 __m128 tmpmask0,tmpmask1;
1000 __m256d signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1001 __m256d one = _mm256_set1_pd(1.0);
1002 __m256d two = _mm256_set1_pd(2.0);
1008 jindex = nlist->jindex;
1010 shiftidx = nlist->shift;
1012 shiftvec = fr->shift_vec[0];
1013 fshift = fr->fshift[0];
1014 facel = _mm256_set1_pd(fr->epsfac);
1015 charge = mdatoms->chargeA;
1017 /* Setup water-specific parameters */
1018 inr = nlist->iinr[0];
1019 iq1 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1020 iq2 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1021 iq3 = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
1023 jq1 = _mm256_set1_pd(charge[inr+1]);
1024 jq2 = _mm256_set1_pd(charge[inr+2]);
1025 jq3 = _mm256_set1_pd(charge[inr+3]);
1026 qq11 = _mm256_mul_pd(iq1,jq1);
1027 qq12 = _mm256_mul_pd(iq1,jq2);
1028 qq13 = _mm256_mul_pd(iq1,jq3);
1029 qq21 = _mm256_mul_pd(iq2,jq1);
1030 qq22 = _mm256_mul_pd(iq2,jq2);
1031 qq23 = _mm256_mul_pd(iq2,jq3);
1032 qq31 = _mm256_mul_pd(iq3,jq1);
1033 qq32 = _mm256_mul_pd(iq3,jq2);
1034 qq33 = _mm256_mul_pd(iq3,jq3);
1036 /* Avoid stupid compiler warnings */
1037 jnrA = jnrB = jnrC = jnrD = 0;
1038 j_coord_offsetA = 0;
1039 j_coord_offsetB = 0;
1040 j_coord_offsetC = 0;
1041 j_coord_offsetD = 0;
1046 for(iidx=0;iidx<4*DIM;iidx++)
1048 scratch[iidx] = 0.0;
1051 /* Start outer loop over neighborlists */
1052 for(iidx=0; iidx<nri; iidx++)
1054 /* Load shift vector for this list */
1055 i_shift_offset = DIM*shiftidx[iidx];
1057 /* Load limits for loop over neighbors */
1058 j_index_start = jindex[iidx];
1059 j_index_end = jindex[iidx+1];
1061 /* Get outer coordinate index */
1063 i_coord_offset = DIM*inr;
1065 /* Load i particle coords and add shift vector */
1066 gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset+DIM,
1067 &ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1069 fix1 = _mm256_setzero_pd();
1070 fiy1 = _mm256_setzero_pd();
1071 fiz1 = _mm256_setzero_pd();
1072 fix2 = _mm256_setzero_pd();
1073 fiy2 = _mm256_setzero_pd();
1074 fiz2 = _mm256_setzero_pd();
1075 fix3 = _mm256_setzero_pd();
1076 fiy3 = _mm256_setzero_pd();
1077 fiz3 = _mm256_setzero_pd();
1079 /* Start inner kernel loop */
1080 for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1083 /* Get j neighbor index, and coordinate index */
1085 jnrB = jjnr[jidx+1];
1086 jnrC = jjnr[jidx+2];
1087 jnrD = jjnr[jidx+3];
1088 j_coord_offsetA = DIM*jnrA;
1089 j_coord_offsetB = DIM*jnrB;
1090 j_coord_offsetC = DIM*jnrC;
1091 j_coord_offsetD = DIM*jnrD;
1093 /* load j atom coordinates */
1094 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
1095 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
1096 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
1098 /* Calculate displacement vector */
1099 dx11 = _mm256_sub_pd(ix1,jx1);
1100 dy11 = _mm256_sub_pd(iy1,jy1);
1101 dz11 = _mm256_sub_pd(iz1,jz1);
1102 dx12 = _mm256_sub_pd(ix1,jx2);
1103 dy12 = _mm256_sub_pd(iy1,jy2);
1104 dz12 = _mm256_sub_pd(iz1,jz2);
1105 dx13 = _mm256_sub_pd(ix1,jx3);
1106 dy13 = _mm256_sub_pd(iy1,jy3);
1107 dz13 = _mm256_sub_pd(iz1,jz3);
1108 dx21 = _mm256_sub_pd(ix2,jx1);
1109 dy21 = _mm256_sub_pd(iy2,jy1);
1110 dz21 = _mm256_sub_pd(iz2,jz1);
1111 dx22 = _mm256_sub_pd(ix2,jx2);
1112 dy22 = _mm256_sub_pd(iy2,jy2);
1113 dz22 = _mm256_sub_pd(iz2,jz2);
1114 dx23 = _mm256_sub_pd(ix2,jx3);
1115 dy23 = _mm256_sub_pd(iy2,jy3);
1116 dz23 = _mm256_sub_pd(iz2,jz3);
1117 dx31 = _mm256_sub_pd(ix3,jx1);
1118 dy31 = _mm256_sub_pd(iy3,jy1);
1119 dz31 = _mm256_sub_pd(iz3,jz1);
1120 dx32 = _mm256_sub_pd(ix3,jx2);
1121 dy32 = _mm256_sub_pd(iy3,jy2);
1122 dz32 = _mm256_sub_pd(iz3,jz2);
1123 dx33 = _mm256_sub_pd(ix3,jx3);
1124 dy33 = _mm256_sub_pd(iy3,jy3);
1125 dz33 = _mm256_sub_pd(iz3,jz3);
1127 /* Calculate squared distance and things based on it */
1128 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1129 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1130 rsq13 = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
1131 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1132 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1133 rsq23 = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
1134 rsq31 = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
1135 rsq32 = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
1136 rsq33 = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
1138 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
1139 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
1140 rinv13 = gmx_mm256_invsqrt_pd(rsq13);
1141 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
1142 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
1143 rinv23 = gmx_mm256_invsqrt_pd(rsq23);
1144 rinv31 = gmx_mm256_invsqrt_pd(rsq31);
1145 rinv32 = gmx_mm256_invsqrt_pd(rsq32);
1146 rinv33 = gmx_mm256_invsqrt_pd(rsq33);
1148 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
1149 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
1150 rinvsq13 = _mm256_mul_pd(rinv13,rinv13);
1151 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
1152 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
1153 rinvsq23 = _mm256_mul_pd(rinv23,rinv23);
1154 rinvsq31 = _mm256_mul_pd(rinv31,rinv31);
1155 rinvsq32 = _mm256_mul_pd(rinv32,rinv32);
1156 rinvsq33 = _mm256_mul_pd(rinv33,rinv33);
1158 fjx1 = _mm256_setzero_pd();
1159 fjy1 = _mm256_setzero_pd();
1160 fjz1 = _mm256_setzero_pd();
1161 fjx2 = _mm256_setzero_pd();
1162 fjy2 = _mm256_setzero_pd();
1163 fjz2 = _mm256_setzero_pd();
1164 fjx3 = _mm256_setzero_pd();
1165 fjy3 = _mm256_setzero_pd();
1166 fjz3 = _mm256_setzero_pd();
1168 /**************************
1169 * CALCULATE INTERACTIONS *
1170 **************************/
1172 /* COULOMB ELECTROSTATICS */
1173 velec = _mm256_mul_pd(qq11,rinv11);
1174 felec = _mm256_mul_pd(velec,rinvsq11);
1178 /* Calculate temporary vectorial force */
1179 tx = _mm256_mul_pd(fscal,dx11);
1180 ty = _mm256_mul_pd(fscal,dy11);
1181 tz = _mm256_mul_pd(fscal,dz11);
1183 /* Update vectorial force */
1184 fix1 = _mm256_add_pd(fix1,tx);
1185 fiy1 = _mm256_add_pd(fiy1,ty);
1186 fiz1 = _mm256_add_pd(fiz1,tz);
1188 fjx1 = _mm256_add_pd(fjx1,tx);
1189 fjy1 = _mm256_add_pd(fjy1,ty);
1190 fjz1 = _mm256_add_pd(fjz1,tz);
1192 /**************************
1193 * CALCULATE INTERACTIONS *
1194 **************************/
1196 /* COULOMB ELECTROSTATICS */
1197 velec = _mm256_mul_pd(qq12,rinv12);
1198 felec = _mm256_mul_pd(velec,rinvsq12);
1202 /* Calculate temporary vectorial force */
1203 tx = _mm256_mul_pd(fscal,dx12);
1204 ty = _mm256_mul_pd(fscal,dy12);
1205 tz = _mm256_mul_pd(fscal,dz12);
1207 /* Update vectorial force */
1208 fix1 = _mm256_add_pd(fix1,tx);
1209 fiy1 = _mm256_add_pd(fiy1,ty);
1210 fiz1 = _mm256_add_pd(fiz1,tz);
1212 fjx2 = _mm256_add_pd(fjx2,tx);
1213 fjy2 = _mm256_add_pd(fjy2,ty);
1214 fjz2 = _mm256_add_pd(fjz2,tz);
1216 /**************************
1217 * CALCULATE INTERACTIONS *
1218 **************************/
1220 /* COULOMB ELECTROSTATICS */
1221 velec = _mm256_mul_pd(qq13,rinv13);
1222 felec = _mm256_mul_pd(velec,rinvsq13);
1226 /* Calculate temporary vectorial force */
1227 tx = _mm256_mul_pd(fscal,dx13);
1228 ty = _mm256_mul_pd(fscal,dy13);
1229 tz = _mm256_mul_pd(fscal,dz13);
1231 /* Update vectorial force */
1232 fix1 = _mm256_add_pd(fix1,tx);
1233 fiy1 = _mm256_add_pd(fiy1,ty);
1234 fiz1 = _mm256_add_pd(fiz1,tz);
1236 fjx3 = _mm256_add_pd(fjx3,tx);
1237 fjy3 = _mm256_add_pd(fjy3,ty);
1238 fjz3 = _mm256_add_pd(fjz3,tz);
1240 /**************************
1241 * CALCULATE INTERACTIONS *
1242 **************************/
1244 /* COULOMB ELECTROSTATICS */
1245 velec = _mm256_mul_pd(qq21,rinv21);
1246 felec = _mm256_mul_pd(velec,rinvsq21);
1250 /* Calculate temporary vectorial force */
1251 tx = _mm256_mul_pd(fscal,dx21);
1252 ty = _mm256_mul_pd(fscal,dy21);
1253 tz = _mm256_mul_pd(fscal,dz21);
1255 /* Update vectorial force */
1256 fix2 = _mm256_add_pd(fix2,tx);
1257 fiy2 = _mm256_add_pd(fiy2,ty);
1258 fiz2 = _mm256_add_pd(fiz2,tz);
1260 fjx1 = _mm256_add_pd(fjx1,tx);
1261 fjy1 = _mm256_add_pd(fjy1,ty);
1262 fjz1 = _mm256_add_pd(fjz1,tz);
1264 /**************************
1265 * CALCULATE INTERACTIONS *
1266 **************************/
1268 /* COULOMB ELECTROSTATICS */
1269 velec = _mm256_mul_pd(qq22,rinv22);
1270 felec = _mm256_mul_pd(velec,rinvsq22);
1274 /* Calculate temporary vectorial force */
1275 tx = _mm256_mul_pd(fscal,dx22);
1276 ty = _mm256_mul_pd(fscal,dy22);
1277 tz = _mm256_mul_pd(fscal,dz22);
1279 /* Update vectorial force */
1280 fix2 = _mm256_add_pd(fix2,tx);
1281 fiy2 = _mm256_add_pd(fiy2,ty);
1282 fiz2 = _mm256_add_pd(fiz2,tz);
1284 fjx2 = _mm256_add_pd(fjx2,tx);
1285 fjy2 = _mm256_add_pd(fjy2,ty);
1286 fjz2 = _mm256_add_pd(fjz2,tz);
1288 /**************************
1289 * CALCULATE INTERACTIONS *
1290 **************************/
1292 /* COULOMB ELECTROSTATICS */
1293 velec = _mm256_mul_pd(qq23,rinv23);
1294 felec = _mm256_mul_pd(velec,rinvsq23);
1298 /* Calculate temporary vectorial force */
1299 tx = _mm256_mul_pd(fscal,dx23);
1300 ty = _mm256_mul_pd(fscal,dy23);
1301 tz = _mm256_mul_pd(fscal,dz23);
1303 /* Update vectorial force */
1304 fix2 = _mm256_add_pd(fix2,tx);
1305 fiy2 = _mm256_add_pd(fiy2,ty);
1306 fiz2 = _mm256_add_pd(fiz2,tz);
1308 fjx3 = _mm256_add_pd(fjx3,tx);
1309 fjy3 = _mm256_add_pd(fjy3,ty);
1310 fjz3 = _mm256_add_pd(fjz3,tz);
1312 /**************************
1313 * CALCULATE INTERACTIONS *
1314 **************************/
1316 /* COULOMB ELECTROSTATICS */
1317 velec = _mm256_mul_pd(qq31,rinv31);
1318 felec = _mm256_mul_pd(velec,rinvsq31);
1322 /* Calculate temporary vectorial force */
1323 tx = _mm256_mul_pd(fscal,dx31);
1324 ty = _mm256_mul_pd(fscal,dy31);
1325 tz = _mm256_mul_pd(fscal,dz31);
1327 /* Update vectorial force */
1328 fix3 = _mm256_add_pd(fix3,tx);
1329 fiy3 = _mm256_add_pd(fiy3,ty);
1330 fiz3 = _mm256_add_pd(fiz3,tz);
1332 fjx1 = _mm256_add_pd(fjx1,tx);
1333 fjy1 = _mm256_add_pd(fjy1,ty);
1334 fjz1 = _mm256_add_pd(fjz1,tz);
1336 /**************************
1337 * CALCULATE INTERACTIONS *
1338 **************************/
1340 /* COULOMB ELECTROSTATICS */
1341 velec = _mm256_mul_pd(qq32,rinv32);
1342 felec = _mm256_mul_pd(velec,rinvsq32);
1346 /* Calculate temporary vectorial force */
1347 tx = _mm256_mul_pd(fscal,dx32);
1348 ty = _mm256_mul_pd(fscal,dy32);
1349 tz = _mm256_mul_pd(fscal,dz32);
1351 /* Update vectorial force */
1352 fix3 = _mm256_add_pd(fix3,tx);
1353 fiy3 = _mm256_add_pd(fiy3,ty);
1354 fiz3 = _mm256_add_pd(fiz3,tz);
1356 fjx2 = _mm256_add_pd(fjx2,tx);
1357 fjy2 = _mm256_add_pd(fjy2,ty);
1358 fjz2 = _mm256_add_pd(fjz2,tz);
1360 /**************************
1361 * CALCULATE INTERACTIONS *
1362 **************************/
1364 /* COULOMB ELECTROSTATICS */
1365 velec = _mm256_mul_pd(qq33,rinv33);
1366 felec = _mm256_mul_pd(velec,rinvsq33);
1370 /* Calculate temporary vectorial force */
1371 tx = _mm256_mul_pd(fscal,dx33);
1372 ty = _mm256_mul_pd(fscal,dy33);
1373 tz = _mm256_mul_pd(fscal,dz33);
1375 /* Update vectorial force */
1376 fix3 = _mm256_add_pd(fix3,tx);
1377 fiy3 = _mm256_add_pd(fiy3,ty);
1378 fiz3 = _mm256_add_pd(fiz3,tz);
1380 fjx3 = _mm256_add_pd(fjx3,tx);
1381 fjy3 = _mm256_add_pd(fjy3,ty);
1382 fjz3 = _mm256_add_pd(fjz3,tz);
1384 fjptrA = f+j_coord_offsetA;
1385 fjptrB = f+j_coord_offsetB;
1386 fjptrC = f+j_coord_offsetC;
1387 fjptrD = f+j_coord_offsetD;
1389 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
1390 fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1392 /* Inner loop uses 234 flops */
1395 if(jidx<j_index_end)
1398 /* Get j neighbor index, and coordinate index */
1399 jnrlistA = jjnr[jidx];
1400 jnrlistB = jjnr[jidx+1];
1401 jnrlistC = jjnr[jidx+2];
1402 jnrlistD = jjnr[jidx+3];
1403 /* Sign of each element will be negative for non-real atoms.
1404 * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1405 * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
1407 tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
1409 tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
1410 tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
1411 dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
1413 jnrA = (jnrlistA>=0) ? jnrlistA : 0;
1414 jnrB = (jnrlistB>=0) ? jnrlistB : 0;
1415 jnrC = (jnrlistC>=0) ? jnrlistC : 0;
1416 jnrD = (jnrlistD>=0) ? jnrlistD : 0;
1417 j_coord_offsetA = DIM*jnrA;
1418 j_coord_offsetB = DIM*jnrB;
1419 j_coord_offsetC = DIM*jnrC;
1420 j_coord_offsetD = DIM*jnrD;
1422 /* load j atom coordinates */
1423 gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA+DIM,x+j_coord_offsetB+DIM,
1424 x+j_coord_offsetC+DIM,x+j_coord_offsetD+DIM,
1425 &jx1,&jy1,&jz1,&jx2,&jy2,&jz2,&jx3,&jy3,&jz3);
1427 /* Calculate displacement vector */
1428 dx11 = _mm256_sub_pd(ix1,jx1);
1429 dy11 = _mm256_sub_pd(iy1,jy1);
1430 dz11 = _mm256_sub_pd(iz1,jz1);
1431 dx12 = _mm256_sub_pd(ix1,jx2);
1432 dy12 = _mm256_sub_pd(iy1,jy2);
1433 dz12 = _mm256_sub_pd(iz1,jz2);
1434 dx13 = _mm256_sub_pd(ix1,jx3);
1435 dy13 = _mm256_sub_pd(iy1,jy3);
1436 dz13 = _mm256_sub_pd(iz1,jz3);
1437 dx21 = _mm256_sub_pd(ix2,jx1);
1438 dy21 = _mm256_sub_pd(iy2,jy1);
1439 dz21 = _mm256_sub_pd(iz2,jz1);
1440 dx22 = _mm256_sub_pd(ix2,jx2);
1441 dy22 = _mm256_sub_pd(iy2,jy2);
1442 dz22 = _mm256_sub_pd(iz2,jz2);
1443 dx23 = _mm256_sub_pd(ix2,jx3);
1444 dy23 = _mm256_sub_pd(iy2,jy3);
1445 dz23 = _mm256_sub_pd(iz2,jz3);
1446 dx31 = _mm256_sub_pd(ix3,jx1);
1447 dy31 = _mm256_sub_pd(iy3,jy1);
1448 dz31 = _mm256_sub_pd(iz3,jz1);
1449 dx32 = _mm256_sub_pd(ix3,jx2);
1450 dy32 = _mm256_sub_pd(iy3,jy2);
1451 dz32 = _mm256_sub_pd(iz3,jz2);
1452 dx33 = _mm256_sub_pd(ix3,jx3);
1453 dy33 = _mm256_sub_pd(iy3,jy3);
1454 dz33 = _mm256_sub_pd(iz3,jz3);
1456 /* Calculate squared distance and things based on it */
1457 rsq11 = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1458 rsq12 = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1459 rsq13 = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
1460 rsq21 = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1461 rsq22 = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1462 rsq23 = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
1463 rsq31 = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
1464 rsq32 = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
1465 rsq33 = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
1467 rinv11 = gmx_mm256_invsqrt_pd(rsq11);
1468 rinv12 = gmx_mm256_invsqrt_pd(rsq12);
1469 rinv13 = gmx_mm256_invsqrt_pd(rsq13);
1470 rinv21 = gmx_mm256_invsqrt_pd(rsq21);
1471 rinv22 = gmx_mm256_invsqrt_pd(rsq22);
1472 rinv23 = gmx_mm256_invsqrt_pd(rsq23);
1473 rinv31 = gmx_mm256_invsqrt_pd(rsq31);
1474 rinv32 = gmx_mm256_invsqrt_pd(rsq32);
1475 rinv33 = gmx_mm256_invsqrt_pd(rsq33);
1477 rinvsq11 = _mm256_mul_pd(rinv11,rinv11);
1478 rinvsq12 = _mm256_mul_pd(rinv12,rinv12);
1479 rinvsq13 = _mm256_mul_pd(rinv13,rinv13);
1480 rinvsq21 = _mm256_mul_pd(rinv21,rinv21);
1481 rinvsq22 = _mm256_mul_pd(rinv22,rinv22);
1482 rinvsq23 = _mm256_mul_pd(rinv23,rinv23);
1483 rinvsq31 = _mm256_mul_pd(rinv31,rinv31);
1484 rinvsq32 = _mm256_mul_pd(rinv32,rinv32);
1485 rinvsq33 = _mm256_mul_pd(rinv33,rinv33);
1487 fjx1 = _mm256_setzero_pd();
1488 fjy1 = _mm256_setzero_pd();
1489 fjz1 = _mm256_setzero_pd();
1490 fjx2 = _mm256_setzero_pd();
1491 fjy2 = _mm256_setzero_pd();
1492 fjz2 = _mm256_setzero_pd();
1493 fjx3 = _mm256_setzero_pd();
1494 fjy3 = _mm256_setzero_pd();
1495 fjz3 = _mm256_setzero_pd();
1497 /**************************
1498 * CALCULATE INTERACTIONS *
1499 **************************/
1501 /* COULOMB ELECTROSTATICS */
1502 velec = _mm256_mul_pd(qq11,rinv11);
1503 felec = _mm256_mul_pd(velec,rinvsq11);
1507 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1509 /* Calculate temporary vectorial force */
1510 tx = _mm256_mul_pd(fscal,dx11);
1511 ty = _mm256_mul_pd(fscal,dy11);
1512 tz = _mm256_mul_pd(fscal,dz11);
1514 /* Update vectorial force */
1515 fix1 = _mm256_add_pd(fix1,tx);
1516 fiy1 = _mm256_add_pd(fiy1,ty);
1517 fiz1 = _mm256_add_pd(fiz1,tz);
1519 fjx1 = _mm256_add_pd(fjx1,tx);
1520 fjy1 = _mm256_add_pd(fjy1,ty);
1521 fjz1 = _mm256_add_pd(fjz1,tz);
1523 /**************************
1524 * CALCULATE INTERACTIONS *
1525 **************************/
1527 /* COULOMB ELECTROSTATICS */
1528 velec = _mm256_mul_pd(qq12,rinv12);
1529 felec = _mm256_mul_pd(velec,rinvsq12);
1533 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1535 /* Calculate temporary vectorial force */
1536 tx = _mm256_mul_pd(fscal,dx12);
1537 ty = _mm256_mul_pd(fscal,dy12);
1538 tz = _mm256_mul_pd(fscal,dz12);
1540 /* Update vectorial force */
1541 fix1 = _mm256_add_pd(fix1,tx);
1542 fiy1 = _mm256_add_pd(fiy1,ty);
1543 fiz1 = _mm256_add_pd(fiz1,tz);
1545 fjx2 = _mm256_add_pd(fjx2,tx);
1546 fjy2 = _mm256_add_pd(fjy2,ty);
1547 fjz2 = _mm256_add_pd(fjz2,tz);
1549 /**************************
1550 * CALCULATE INTERACTIONS *
1551 **************************/
1553 /* COULOMB ELECTROSTATICS */
1554 velec = _mm256_mul_pd(qq13,rinv13);
1555 felec = _mm256_mul_pd(velec,rinvsq13);
1559 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1561 /* Calculate temporary vectorial force */
1562 tx = _mm256_mul_pd(fscal,dx13);
1563 ty = _mm256_mul_pd(fscal,dy13);
1564 tz = _mm256_mul_pd(fscal,dz13);
1566 /* Update vectorial force */
1567 fix1 = _mm256_add_pd(fix1,tx);
1568 fiy1 = _mm256_add_pd(fiy1,ty);
1569 fiz1 = _mm256_add_pd(fiz1,tz);
1571 fjx3 = _mm256_add_pd(fjx3,tx);
1572 fjy3 = _mm256_add_pd(fjy3,ty);
1573 fjz3 = _mm256_add_pd(fjz3,tz);
1575 /**************************
1576 * CALCULATE INTERACTIONS *
1577 **************************/
1579 /* COULOMB ELECTROSTATICS */
1580 velec = _mm256_mul_pd(qq21,rinv21);
1581 felec = _mm256_mul_pd(velec,rinvsq21);
1585 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1587 /* Calculate temporary vectorial force */
1588 tx = _mm256_mul_pd(fscal,dx21);
1589 ty = _mm256_mul_pd(fscal,dy21);
1590 tz = _mm256_mul_pd(fscal,dz21);
1592 /* Update vectorial force */
1593 fix2 = _mm256_add_pd(fix2,tx);
1594 fiy2 = _mm256_add_pd(fiy2,ty);
1595 fiz2 = _mm256_add_pd(fiz2,tz);
1597 fjx1 = _mm256_add_pd(fjx1,tx);
1598 fjy1 = _mm256_add_pd(fjy1,ty);
1599 fjz1 = _mm256_add_pd(fjz1,tz);
1601 /**************************
1602 * CALCULATE INTERACTIONS *
1603 **************************/
1605 /* COULOMB ELECTROSTATICS */
1606 velec = _mm256_mul_pd(qq22,rinv22);
1607 felec = _mm256_mul_pd(velec,rinvsq22);
1611 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1613 /* Calculate temporary vectorial force */
1614 tx = _mm256_mul_pd(fscal,dx22);
1615 ty = _mm256_mul_pd(fscal,dy22);
1616 tz = _mm256_mul_pd(fscal,dz22);
1618 /* Update vectorial force */
1619 fix2 = _mm256_add_pd(fix2,tx);
1620 fiy2 = _mm256_add_pd(fiy2,ty);
1621 fiz2 = _mm256_add_pd(fiz2,tz);
1623 fjx2 = _mm256_add_pd(fjx2,tx);
1624 fjy2 = _mm256_add_pd(fjy2,ty);
1625 fjz2 = _mm256_add_pd(fjz2,tz);
1627 /**************************
1628 * CALCULATE INTERACTIONS *
1629 **************************/
1631 /* COULOMB ELECTROSTATICS */
1632 velec = _mm256_mul_pd(qq23,rinv23);
1633 felec = _mm256_mul_pd(velec,rinvsq23);
1637 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1639 /* Calculate temporary vectorial force */
1640 tx = _mm256_mul_pd(fscal,dx23);
1641 ty = _mm256_mul_pd(fscal,dy23);
1642 tz = _mm256_mul_pd(fscal,dz23);
1644 /* Update vectorial force */
1645 fix2 = _mm256_add_pd(fix2,tx);
1646 fiy2 = _mm256_add_pd(fiy2,ty);
1647 fiz2 = _mm256_add_pd(fiz2,tz);
1649 fjx3 = _mm256_add_pd(fjx3,tx);
1650 fjy3 = _mm256_add_pd(fjy3,ty);
1651 fjz3 = _mm256_add_pd(fjz3,tz);
1653 /**************************
1654 * CALCULATE INTERACTIONS *
1655 **************************/
1657 /* COULOMB ELECTROSTATICS */
1658 velec = _mm256_mul_pd(qq31,rinv31);
1659 felec = _mm256_mul_pd(velec,rinvsq31);
1663 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1665 /* Calculate temporary vectorial force */
1666 tx = _mm256_mul_pd(fscal,dx31);
1667 ty = _mm256_mul_pd(fscal,dy31);
1668 tz = _mm256_mul_pd(fscal,dz31);
1670 /* Update vectorial force */
1671 fix3 = _mm256_add_pd(fix3,tx);
1672 fiy3 = _mm256_add_pd(fiy3,ty);
1673 fiz3 = _mm256_add_pd(fiz3,tz);
1675 fjx1 = _mm256_add_pd(fjx1,tx);
1676 fjy1 = _mm256_add_pd(fjy1,ty);
1677 fjz1 = _mm256_add_pd(fjz1,tz);
1679 /**************************
1680 * CALCULATE INTERACTIONS *
1681 **************************/
1683 /* COULOMB ELECTROSTATICS */
1684 velec = _mm256_mul_pd(qq32,rinv32);
1685 felec = _mm256_mul_pd(velec,rinvsq32);
1689 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1691 /* Calculate temporary vectorial force */
1692 tx = _mm256_mul_pd(fscal,dx32);
1693 ty = _mm256_mul_pd(fscal,dy32);
1694 tz = _mm256_mul_pd(fscal,dz32);
1696 /* Update vectorial force */
1697 fix3 = _mm256_add_pd(fix3,tx);
1698 fiy3 = _mm256_add_pd(fiy3,ty);
1699 fiz3 = _mm256_add_pd(fiz3,tz);
1701 fjx2 = _mm256_add_pd(fjx2,tx);
1702 fjy2 = _mm256_add_pd(fjy2,ty);
1703 fjz2 = _mm256_add_pd(fjz2,tz);
1705 /**************************
1706 * CALCULATE INTERACTIONS *
1707 **************************/
1709 /* COULOMB ELECTROSTATICS */
1710 velec = _mm256_mul_pd(qq33,rinv33);
1711 felec = _mm256_mul_pd(velec,rinvsq33);
1715 fscal = _mm256_andnot_pd(dummy_mask,fscal);
1717 /* Calculate temporary vectorial force */
1718 tx = _mm256_mul_pd(fscal,dx33);
1719 ty = _mm256_mul_pd(fscal,dy33);
1720 tz = _mm256_mul_pd(fscal,dz33);
1722 /* Update vectorial force */
1723 fix3 = _mm256_add_pd(fix3,tx);
1724 fiy3 = _mm256_add_pd(fiy3,ty);
1725 fiz3 = _mm256_add_pd(fiz3,tz);
1727 fjx3 = _mm256_add_pd(fjx3,tx);
1728 fjy3 = _mm256_add_pd(fjy3,ty);
1729 fjz3 = _mm256_add_pd(fjz3,tz);
1731 fjptrA = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1732 fjptrB = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1733 fjptrC = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1734 fjptrD = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1736 gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA+DIM,fjptrB+DIM,fjptrC+DIM,fjptrD+DIM,
1737 fjx1,fjy1,fjz1,fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1739 /* Inner loop uses 234 flops */
1742 /* End of innermost loop */
1744 gmx_mm256_update_iforce_3atom_swizzle_pd(fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1745 f+i_coord_offset+DIM,fshift+i_shift_offset);
1747 /* Increment number of inner iterations */
1748 inneriter += j_index_end - j_index_start;
1750 /* Outer loop uses 18 flops */
1753 /* Increment number of outer iterations */
1756 /* Update outer/inner flops */
1758 inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_W4W4_F,outeriter*18 + inneriter*234);