Merge release-4-6 into master
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_avx_256_double.c
1 /*
2  * Note: this file was generated by the Gromacs avx_256_double kernel generator.
3  *
4  *                This source code is part of
5  *
6  *                 G   R   O   M   A   C   S
7  *
8  * Copyright (c) 2001-2012, The GROMACS Development Team
9  *
10  * Gromacs is a library for molecular simulation and trajectory analysis,
11  * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
12  * a full list of developers and information, check out http://www.gromacs.org
13  *
14  * This program is free software; you can redistribute it and/or modify it under
15  * the terms of the GNU Lesser General Public License as published by the Free
16  * Software Foundation; either version 2 of the License, or (at your option) any
17  * later version.
18  *
19  * To help fund GROMACS development, we humbly ask that you cite
20  * the papers people have written on it - you can find them on the website.
21  */
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <math.h>
27
28 #include "../nb_kernel.h"
29 #include "types/simple.h"
30 #include "vec.h"
31 #include "nrnb.h"
32
33 #include "gmx_math_x86_avx_256_double.h"
34 #include "kernelutil_x86_avx_256_double.h"
35
36 /*
37  * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_VF_avx_256_double
38  * Electrostatics interaction: Coulomb
39  * VdW interaction:            CubicSplineTable
40  * Geometry:                   Water4-Water4
41  * Calculate force/pot:        PotentialAndForce
42  */
43 void
44 nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_VF_avx_256_double
45                     (t_nblist * gmx_restrict                nlist,
46                      rvec * gmx_restrict                    xx,
47                      rvec * gmx_restrict                    ff,
48                      t_forcerec * gmx_restrict              fr,
49                      t_mdatoms * gmx_restrict               mdatoms,
50                      nb_kernel_data_t * gmx_restrict        kernel_data,
51                      t_nrnb * gmx_restrict                  nrnb)
52 {
53     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
54      * just 0 for non-waters.
55      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
56      * jnr indices corresponding to data put in the four positions in the SIMD register.
57      */
58     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
59     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
60     int              jnrA,jnrB,jnrC,jnrD;
61     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
62     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
63     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
64     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
65     real             rcutoff_scalar;
66     real             *shiftvec,*fshift,*x,*f;
67     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
68     real             scratch[4*DIM];
69     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
70     real *           vdwioffsetptr0;
71     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
72     real *           vdwioffsetptr1;
73     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
74     real *           vdwioffsetptr2;
75     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
76     real *           vdwioffsetptr3;
77     __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
78     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
79     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
80     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
81     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
82     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
83     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
84     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
85     __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
86     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
87     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
88     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
89     __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
90     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
91     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
92     __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
93     __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
94     __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
95     __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
96     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
97     real             *charge;
98     int              nvdwtype;
99     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
100     int              *vdwtype;
101     real             *vdwparam;
102     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
103     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
104     __m128i          vfitab;
105     __m128i          ifour       = _mm_set1_epi32(4);
106     __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
107     real             *vftab;
108     __m256d          dummy_mask,cutoff_mask;
109     __m128           tmpmask0,tmpmask1;
110     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
111     __m256d          one     = _mm256_set1_pd(1.0);
112     __m256d          two     = _mm256_set1_pd(2.0);
113     x                = xx[0];
114     f                = ff[0];
115
116     nri              = nlist->nri;
117     iinr             = nlist->iinr;
118     jindex           = nlist->jindex;
119     jjnr             = nlist->jjnr;
120     shiftidx         = nlist->shift;
121     gid              = nlist->gid;
122     shiftvec         = fr->shift_vec[0];
123     fshift           = fr->fshift[0];
124     facel            = _mm256_set1_pd(fr->epsfac);
125     charge           = mdatoms->chargeA;
126     nvdwtype         = fr->ntype;
127     vdwparam         = fr->nbfp;
128     vdwtype          = mdatoms->typeA;
129
130     vftab            = kernel_data->table_vdw->data;
131     vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
132
133     /* Setup water-specific parameters */
134     inr              = nlist->iinr[0];
135     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
136     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
137     iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
138     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
139
140     jq1              = _mm256_set1_pd(charge[inr+1]);
141     jq2              = _mm256_set1_pd(charge[inr+2]);
142     jq3              = _mm256_set1_pd(charge[inr+3]);
143     vdwjidx0A        = 2*vdwtype[inr+0];
144     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
145     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
146     qq11             = _mm256_mul_pd(iq1,jq1);
147     qq12             = _mm256_mul_pd(iq1,jq2);
148     qq13             = _mm256_mul_pd(iq1,jq3);
149     qq21             = _mm256_mul_pd(iq2,jq1);
150     qq22             = _mm256_mul_pd(iq2,jq2);
151     qq23             = _mm256_mul_pd(iq2,jq3);
152     qq31             = _mm256_mul_pd(iq3,jq1);
153     qq32             = _mm256_mul_pd(iq3,jq2);
154     qq33             = _mm256_mul_pd(iq3,jq3);
155
156     /* Avoid stupid compiler warnings */
157     jnrA = jnrB = jnrC = jnrD = 0;
158     j_coord_offsetA = 0;
159     j_coord_offsetB = 0;
160     j_coord_offsetC = 0;
161     j_coord_offsetD = 0;
162
163     outeriter        = 0;
164     inneriter        = 0;
165
166     for(iidx=0;iidx<4*DIM;iidx++)
167     {
168         scratch[iidx] = 0.0;
169     }
170
171     /* Start outer loop over neighborlists */
172     for(iidx=0; iidx<nri; iidx++)
173     {
174         /* Load shift vector for this list */
175         i_shift_offset   = DIM*shiftidx[iidx];
176
177         /* Load limits for loop over neighbors */
178         j_index_start    = jindex[iidx];
179         j_index_end      = jindex[iidx+1];
180
181         /* Get outer coordinate index */
182         inr              = iinr[iidx];
183         i_coord_offset   = DIM*inr;
184
185         /* Load i particle coords and add shift vector */
186         gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
187                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
188
189         fix0             = _mm256_setzero_pd();
190         fiy0             = _mm256_setzero_pd();
191         fiz0             = _mm256_setzero_pd();
192         fix1             = _mm256_setzero_pd();
193         fiy1             = _mm256_setzero_pd();
194         fiz1             = _mm256_setzero_pd();
195         fix2             = _mm256_setzero_pd();
196         fiy2             = _mm256_setzero_pd();
197         fiz2             = _mm256_setzero_pd();
198         fix3             = _mm256_setzero_pd();
199         fiy3             = _mm256_setzero_pd();
200         fiz3             = _mm256_setzero_pd();
201
202         /* Reset potential sums */
203         velecsum         = _mm256_setzero_pd();
204         vvdwsum          = _mm256_setzero_pd();
205
206         /* Start inner kernel loop */
207         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
208         {
209
210             /* Get j neighbor index, and coordinate index */
211             jnrA             = jjnr[jidx];
212             jnrB             = jjnr[jidx+1];
213             jnrC             = jjnr[jidx+2];
214             jnrD             = jjnr[jidx+3];
215             j_coord_offsetA  = DIM*jnrA;
216             j_coord_offsetB  = DIM*jnrB;
217             j_coord_offsetC  = DIM*jnrC;
218             j_coord_offsetD  = DIM*jnrD;
219
220             /* load j atom coordinates */
221             gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
222                                                  x+j_coord_offsetC,x+j_coord_offsetD,
223                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
224                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
225
226             /* Calculate displacement vector */
227             dx00             = _mm256_sub_pd(ix0,jx0);
228             dy00             = _mm256_sub_pd(iy0,jy0);
229             dz00             = _mm256_sub_pd(iz0,jz0);
230             dx11             = _mm256_sub_pd(ix1,jx1);
231             dy11             = _mm256_sub_pd(iy1,jy1);
232             dz11             = _mm256_sub_pd(iz1,jz1);
233             dx12             = _mm256_sub_pd(ix1,jx2);
234             dy12             = _mm256_sub_pd(iy1,jy2);
235             dz12             = _mm256_sub_pd(iz1,jz2);
236             dx13             = _mm256_sub_pd(ix1,jx3);
237             dy13             = _mm256_sub_pd(iy1,jy3);
238             dz13             = _mm256_sub_pd(iz1,jz3);
239             dx21             = _mm256_sub_pd(ix2,jx1);
240             dy21             = _mm256_sub_pd(iy2,jy1);
241             dz21             = _mm256_sub_pd(iz2,jz1);
242             dx22             = _mm256_sub_pd(ix2,jx2);
243             dy22             = _mm256_sub_pd(iy2,jy2);
244             dz22             = _mm256_sub_pd(iz2,jz2);
245             dx23             = _mm256_sub_pd(ix2,jx3);
246             dy23             = _mm256_sub_pd(iy2,jy3);
247             dz23             = _mm256_sub_pd(iz2,jz3);
248             dx31             = _mm256_sub_pd(ix3,jx1);
249             dy31             = _mm256_sub_pd(iy3,jy1);
250             dz31             = _mm256_sub_pd(iz3,jz1);
251             dx32             = _mm256_sub_pd(ix3,jx2);
252             dy32             = _mm256_sub_pd(iy3,jy2);
253             dz32             = _mm256_sub_pd(iz3,jz2);
254             dx33             = _mm256_sub_pd(ix3,jx3);
255             dy33             = _mm256_sub_pd(iy3,jy3);
256             dz33             = _mm256_sub_pd(iz3,jz3);
257
258             /* Calculate squared distance and things based on it */
259             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
260             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
261             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
262             rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
263             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
264             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
265             rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
266             rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
267             rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
268             rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
269
270             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
271             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
272             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
273             rinv13           = gmx_mm256_invsqrt_pd(rsq13);
274             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
275             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
276             rinv23           = gmx_mm256_invsqrt_pd(rsq23);
277             rinv31           = gmx_mm256_invsqrt_pd(rsq31);
278             rinv32           = gmx_mm256_invsqrt_pd(rsq32);
279             rinv33           = gmx_mm256_invsqrt_pd(rsq33);
280
281             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
282             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
283             rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
284             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
285             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
286             rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
287             rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
288             rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
289             rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
290
291             fjx0             = _mm256_setzero_pd();
292             fjy0             = _mm256_setzero_pd();
293             fjz0             = _mm256_setzero_pd();
294             fjx1             = _mm256_setzero_pd();
295             fjy1             = _mm256_setzero_pd();
296             fjz1             = _mm256_setzero_pd();
297             fjx2             = _mm256_setzero_pd();
298             fjy2             = _mm256_setzero_pd();
299             fjz2             = _mm256_setzero_pd();
300             fjx3             = _mm256_setzero_pd();
301             fjy3             = _mm256_setzero_pd();
302             fjz3             = _mm256_setzero_pd();
303
304             /**************************
305              * CALCULATE INTERACTIONS *
306              **************************/
307
308             r00              = _mm256_mul_pd(rsq00,rinv00);
309
310             /* Calculate table index by multiplying r with table scale and truncate to integer */
311             rt               = _mm256_mul_pd(r00,vftabscale);
312             vfitab           = _mm256_cvttpd_epi32(rt);
313             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
314             vfitab           = _mm_slli_epi32(vfitab,3);
315
316             /* CUBIC SPLINE TABLE DISPERSION */
317             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
318             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
319             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
320             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
321             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
322             Heps             = _mm256_mul_pd(vfeps,H);
323             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
324             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
325             vvdw6            = _mm256_mul_pd(c6_00,VV);
326             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
327             fvdw6            = _mm256_mul_pd(c6_00,FF);
328
329             /* CUBIC SPLINE TABLE REPULSION */
330             vfitab           = _mm_add_epi32(vfitab,ifour);
331             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
332             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
333             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
334             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
335             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
336             Heps             = _mm256_mul_pd(vfeps,H);
337             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
338             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
339             vvdw12           = _mm256_mul_pd(c12_00,VV);
340             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
341             fvdw12           = _mm256_mul_pd(c12_00,FF);
342             vvdw             = _mm256_add_pd(vvdw12,vvdw6);
343             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
344
345             /* Update potential sum for this i atom from the interaction with this j atom. */
346             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
347
348             fscal            = fvdw;
349
350             /* Calculate temporary vectorial force */
351             tx               = _mm256_mul_pd(fscal,dx00);
352             ty               = _mm256_mul_pd(fscal,dy00);
353             tz               = _mm256_mul_pd(fscal,dz00);
354
355             /* Update vectorial force */
356             fix0             = _mm256_add_pd(fix0,tx);
357             fiy0             = _mm256_add_pd(fiy0,ty);
358             fiz0             = _mm256_add_pd(fiz0,tz);
359
360             fjx0             = _mm256_add_pd(fjx0,tx);
361             fjy0             = _mm256_add_pd(fjy0,ty);
362             fjz0             = _mm256_add_pd(fjz0,tz);
363
364             /**************************
365              * CALCULATE INTERACTIONS *
366              **************************/
367
368             /* COULOMB ELECTROSTATICS */
369             velec            = _mm256_mul_pd(qq11,rinv11);
370             felec            = _mm256_mul_pd(velec,rinvsq11);
371
372             /* Update potential sum for this i atom from the interaction with this j atom. */
373             velecsum         = _mm256_add_pd(velecsum,velec);
374
375             fscal            = felec;
376
377             /* Calculate temporary vectorial force */
378             tx               = _mm256_mul_pd(fscal,dx11);
379             ty               = _mm256_mul_pd(fscal,dy11);
380             tz               = _mm256_mul_pd(fscal,dz11);
381
382             /* Update vectorial force */
383             fix1             = _mm256_add_pd(fix1,tx);
384             fiy1             = _mm256_add_pd(fiy1,ty);
385             fiz1             = _mm256_add_pd(fiz1,tz);
386
387             fjx1             = _mm256_add_pd(fjx1,tx);
388             fjy1             = _mm256_add_pd(fjy1,ty);
389             fjz1             = _mm256_add_pd(fjz1,tz);
390
391             /**************************
392              * CALCULATE INTERACTIONS *
393              **************************/
394
395             /* COULOMB ELECTROSTATICS */
396             velec            = _mm256_mul_pd(qq12,rinv12);
397             felec            = _mm256_mul_pd(velec,rinvsq12);
398
399             /* Update potential sum for this i atom from the interaction with this j atom. */
400             velecsum         = _mm256_add_pd(velecsum,velec);
401
402             fscal            = felec;
403
404             /* Calculate temporary vectorial force */
405             tx               = _mm256_mul_pd(fscal,dx12);
406             ty               = _mm256_mul_pd(fscal,dy12);
407             tz               = _mm256_mul_pd(fscal,dz12);
408
409             /* Update vectorial force */
410             fix1             = _mm256_add_pd(fix1,tx);
411             fiy1             = _mm256_add_pd(fiy1,ty);
412             fiz1             = _mm256_add_pd(fiz1,tz);
413
414             fjx2             = _mm256_add_pd(fjx2,tx);
415             fjy2             = _mm256_add_pd(fjy2,ty);
416             fjz2             = _mm256_add_pd(fjz2,tz);
417
418             /**************************
419              * CALCULATE INTERACTIONS *
420              **************************/
421
422             /* COULOMB ELECTROSTATICS */
423             velec            = _mm256_mul_pd(qq13,rinv13);
424             felec            = _mm256_mul_pd(velec,rinvsq13);
425
426             /* Update potential sum for this i atom from the interaction with this j atom. */
427             velecsum         = _mm256_add_pd(velecsum,velec);
428
429             fscal            = felec;
430
431             /* Calculate temporary vectorial force */
432             tx               = _mm256_mul_pd(fscal,dx13);
433             ty               = _mm256_mul_pd(fscal,dy13);
434             tz               = _mm256_mul_pd(fscal,dz13);
435
436             /* Update vectorial force */
437             fix1             = _mm256_add_pd(fix1,tx);
438             fiy1             = _mm256_add_pd(fiy1,ty);
439             fiz1             = _mm256_add_pd(fiz1,tz);
440
441             fjx3             = _mm256_add_pd(fjx3,tx);
442             fjy3             = _mm256_add_pd(fjy3,ty);
443             fjz3             = _mm256_add_pd(fjz3,tz);
444
445             /**************************
446              * CALCULATE INTERACTIONS *
447              **************************/
448
449             /* COULOMB ELECTROSTATICS */
450             velec            = _mm256_mul_pd(qq21,rinv21);
451             felec            = _mm256_mul_pd(velec,rinvsq21);
452
453             /* Update potential sum for this i atom from the interaction with this j atom. */
454             velecsum         = _mm256_add_pd(velecsum,velec);
455
456             fscal            = felec;
457
458             /* Calculate temporary vectorial force */
459             tx               = _mm256_mul_pd(fscal,dx21);
460             ty               = _mm256_mul_pd(fscal,dy21);
461             tz               = _mm256_mul_pd(fscal,dz21);
462
463             /* Update vectorial force */
464             fix2             = _mm256_add_pd(fix2,tx);
465             fiy2             = _mm256_add_pd(fiy2,ty);
466             fiz2             = _mm256_add_pd(fiz2,tz);
467
468             fjx1             = _mm256_add_pd(fjx1,tx);
469             fjy1             = _mm256_add_pd(fjy1,ty);
470             fjz1             = _mm256_add_pd(fjz1,tz);
471
472             /**************************
473              * CALCULATE INTERACTIONS *
474              **************************/
475
476             /* COULOMB ELECTROSTATICS */
477             velec            = _mm256_mul_pd(qq22,rinv22);
478             felec            = _mm256_mul_pd(velec,rinvsq22);
479
480             /* Update potential sum for this i atom from the interaction with this j atom. */
481             velecsum         = _mm256_add_pd(velecsum,velec);
482
483             fscal            = felec;
484
485             /* Calculate temporary vectorial force */
486             tx               = _mm256_mul_pd(fscal,dx22);
487             ty               = _mm256_mul_pd(fscal,dy22);
488             tz               = _mm256_mul_pd(fscal,dz22);
489
490             /* Update vectorial force */
491             fix2             = _mm256_add_pd(fix2,tx);
492             fiy2             = _mm256_add_pd(fiy2,ty);
493             fiz2             = _mm256_add_pd(fiz2,tz);
494
495             fjx2             = _mm256_add_pd(fjx2,tx);
496             fjy2             = _mm256_add_pd(fjy2,ty);
497             fjz2             = _mm256_add_pd(fjz2,tz);
498
499             /**************************
500              * CALCULATE INTERACTIONS *
501              **************************/
502
503             /* COULOMB ELECTROSTATICS */
504             velec            = _mm256_mul_pd(qq23,rinv23);
505             felec            = _mm256_mul_pd(velec,rinvsq23);
506
507             /* Update potential sum for this i atom from the interaction with this j atom. */
508             velecsum         = _mm256_add_pd(velecsum,velec);
509
510             fscal            = felec;
511
512             /* Calculate temporary vectorial force */
513             tx               = _mm256_mul_pd(fscal,dx23);
514             ty               = _mm256_mul_pd(fscal,dy23);
515             tz               = _mm256_mul_pd(fscal,dz23);
516
517             /* Update vectorial force */
518             fix2             = _mm256_add_pd(fix2,tx);
519             fiy2             = _mm256_add_pd(fiy2,ty);
520             fiz2             = _mm256_add_pd(fiz2,tz);
521
522             fjx3             = _mm256_add_pd(fjx3,tx);
523             fjy3             = _mm256_add_pd(fjy3,ty);
524             fjz3             = _mm256_add_pd(fjz3,tz);
525
526             /**************************
527              * CALCULATE INTERACTIONS *
528              **************************/
529
530             /* COULOMB ELECTROSTATICS */
531             velec            = _mm256_mul_pd(qq31,rinv31);
532             felec            = _mm256_mul_pd(velec,rinvsq31);
533
534             /* Update potential sum for this i atom from the interaction with this j atom. */
535             velecsum         = _mm256_add_pd(velecsum,velec);
536
537             fscal            = felec;
538
539             /* Calculate temporary vectorial force */
540             tx               = _mm256_mul_pd(fscal,dx31);
541             ty               = _mm256_mul_pd(fscal,dy31);
542             tz               = _mm256_mul_pd(fscal,dz31);
543
544             /* Update vectorial force */
545             fix3             = _mm256_add_pd(fix3,tx);
546             fiy3             = _mm256_add_pd(fiy3,ty);
547             fiz3             = _mm256_add_pd(fiz3,tz);
548
549             fjx1             = _mm256_add_pd(fjx1,tx);
550             fjy1             = _mm256_add_pd(fjy1,ty);
551             fjz1             = _mm256_add_pd(fjz1,tz);
552
553             /**************************
554              * CALCULATE INTERACTIONS *
555              **************************/
556
557             /* COULOMB ELECTROSTATICS */
558             velec            = _mm256_mul_pd(qq32,rinv32);
559             felec            = _mm256_mul_pd(velec,rinvsq32);
560
561             /* Update potential sum for this i atom from the interaction with this j atom. */
562             velecsum         = _mm256_add_pd(velecsum,velec);
563
564             fscal            = felec;
565
566             /* Calculate temporary vectorial force */
567             tx               = _mm256_mul_pd(fscal,dx32);
568             ty               = _mm256_mul_pd(fscal,dy32);
569             tz               = _mm256_mul_pd(fscal,dz32);
570
571             /* Update vectorial force */
572             fix3             = _mm256_add_pd(fix3,tx);
573             fiy3             = _mm256_add_pd(fiy3,ty);
574             fiz3             = _mm256_add_pd(fiz3,tz);
575
576             fjx2             = _mm256_add_pd(fjx2,tx);
577             fjy2             = _mm256_add_pd(fjy2,ty);
578             fjz2             = _mm256_add_pd(fjz2,tz);
579
580             /**************************
581              * CALCULATE INTERACTIONS *
582              **************************/
583
584             /* COULOMB ELECTROSTATICS */
585             velec            = _mm256_mul_pd(qq33,rinv33);
586             felec            = _mm256_mul_pd(velec,rinvsq33);
587
588             /* Update potential sum for this i atom from the interaction with this j atom. */
589             velecsum         = _mm256_add_pd(velecsum,velec);
590
591             fscal            = felec;
592
593             /* Calculate temporary vectorial force */
594             tx               = _mm256_mul_pd(fscal,dx33);
595             ty               = _mm256_mul_pd(fscal,dy33);
596             tz               = _mm256_mul_pd(fscal,dz33);
597
598             /* Update vectorial force */
599             fix3             = _mm256_add_pd(fix3,tx);
600             fiy3             = _mm256_add_pd(fiy3,ty);
601             fiz3             = _mm256_add_pd(fiz3,tz);
602
603             fjx3             = _mm256_add_pd(fjx3,tx);
604             fjy3             = _mm256_add_pd(fjy3,ty);
605             fjz3             = _mm256_add_pd(fjz3,tz);
606
607             fjptrA             = f+j_coord_offsetA;
608             fjptrB             = f+j_coord_offsetB;
609             fjptrC             = f+j_coord_offsetC;
610             fjptrD             = f+j_coord_offsetD;
611
612             gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
613                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
614                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
615
616             /* Inner loop uses 302 flops */
617         }
618
619         if(jidx<j_index_end)
620         {
621
622             /* Get j neighbor index, and coordinate index */
623             jnrlistA         = jjnr[jidx];
624             jnrlistB         = jjnr[jidx+1];
625             jnrlistC         = jjnr[jidx+2];
626             jnrlistD         = jjnr[jidx+3];
627             /* Sign of each element will be negative for non-real atoms.
628              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
629              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
630              */
631             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
632
633             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
634             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
635             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
636
637             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
638             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
639             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
640             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
641             j_coord_offsetA  = DIM*jnrA;
642             j_coord_offsetB  = DIM*jnrB;
643             j_coord_offsetC  = DIM*jnrC;
644             j_coord_offsetD  = DIM*jnrD;
645
646             /* load j atom coordinates */
647             gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
648                                                  x+j_coord_offsetC,x+j_coord_offsetD,
649                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
650                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
651
652             /* Calculate displacement vector */
653             dx00             = _mm256_sub_pd(ix0,jx0);
654             dy00             = _mm256_sub_pd(iy0,jy0);
655             dz00             = _mm256_sub_pd(iz0,jz0);
656             dx11             = _mm256_sub_pd(ix1,jx1);
657             dy11             = _mm256_sub_pd(iy1,jy1);
658             dz11             = _mm256_sub_pd(iz1,jz1);
659             dx12             = _mm256_sub_pd(ix1,jx2);
660             dy12             = _mm256_sub_pd(iy1,jy2);
661             dz12             = _mm256_sub_pd(iz1,jz2);
662             dx13             = _mm256_sub_pd(ix1,jx3);
663             dy13             = _mm256_sub_pd(iy1,jy3);
664             dz13             = _mm256_sub_pd(iz1,jz3);
665             dx21             = _mm256_sub_pd(ix2,jx1);
666             dy21             = _mm256_sub_pd(iy2,jy1);
667             dz21             = _mm256_sub_pd(iz2,jz1);
668             dx22             = _mm256_sub_pd(ix2,jx2);
669             dy22             = _mm256_sub_pd(iy2,jy2);
670             dz22             = _mm256_sub_pd(iz2,jz2);
671             dx23             = _mm256_sub_pd(ix2,jx3);
672             dy23             = _mm256_sub_pd(iy2,jy3);
673             dz23             = _mm256_sub_pd(iz2,jz3);
674             dx31             = _mm256_sub_pd(ix3,jx1);
675             dy31             = _mm256_sub_pd(iy3,jy1);
676             dz31             = _mm256_sub_pd(iz3,jz1);
677             dx32             = _mm256_sub_pd(ix3,jx2);
678             dy32             = _mm256_sub_pd(iy3,jy2);
679             dz32             = _mm256_sub_pd(iz3,jz2);
680             dx33             = _mm256_sub_pd(ix3,jx3);
681             dy33             = _mm256_sub_pd(iy3,jy3);
682             dz33             = _mm256_sub_pd(iz3,jz3);
683
684             /* Calculate squared distance and things based on it */
685             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
686             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
687             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
688             rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
689             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
690             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
691             rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
692             rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
693             rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
694             rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
695
696             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
697             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
698             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
699             rinv13           = gmx_mm256_invsqrt_pd(rsq13);
700             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
701             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
702             rinv23           = gmx_mm256_invsqrt_pd(rsq23);
703             rinv31           = gmx_mm256_invsqrt_pd(rsq31);
704             rinv32           = gmx_mm256_invsqrt_pd(rsq32);
705             rinv33           = gmx_mm256_invsqrt_pd(rsq33);
706
707             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
708             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
709             rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
710             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
711             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
712             rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
713             rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
714             rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
715             rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
716
717             fjx0             = _mm256_setzero_pd();
718             fjy0             = _mm256_setzero_pd();
719             fjz0             = _mm256_setzero_pd();
720             fjx1             = _mm256_setzero_pd();
721             fjy1             = _mm256_setzero_pd();
722             fjz1             = _mm256_setzero_pd();
723             fjx2             = _mm256_setzero_pd();
724             fjy2             = _mm256_setzero_pd();
725             fjz2             = _mm256_setzero_pd();
726             fjx3             = _mm256_setzero_pd();
727             fjy3             = _mm256_setzero_pd();
728             fjz3             = _mm256_setzero_pd();
729
730             /**************************
731              * CALCULATE INTERACTIONS *
732              **************************/
733
734             r00              = _mm256_mul_pd(rsq00,rinv00);
735             r00              = _mm256_andnot_pd(dummy_mask,r00);
736
737             /* Calculate table index by multiplying r with table scale and truncate to integer */
738             rt               = _mm256_mul_pd(r00,vftabscale);
739             vfitab           = _mm256_cvttpd_epi32(rt);
740             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
741             vfitab           = _mm_slli_epi32(vfitab,3);
742
743             /* CUBIC SPLINE TABLE DISPERSION */
744             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
745             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
746             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
747             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
748             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
749             Heps             = _mm256_mul_pd(vfeps,H);
750             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
751             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
752             vvdw6            = _mm256_mul_pd(c6_00,VV);
753             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
754             fvdw6            = _mm256_mul_pd(c6_00,FF);
755
756             /* CUBIC SPLINE TABLE REPULSION */
757             vfitab           = _mm_add_epi32(vfitab,ifour);
758             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
759             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
760             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
761             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
762             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
763             Heps             = _mm256_mul_pd(vfeps,H);
764             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
765             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
766             vvdw12           = _mm256_mul_pd(c12_00,VV);
767             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
768             fvdw12           = _mm256_mul_pd(c12_00,FF);
769             vvdw             = _mm256_add_pd(vvdw12,vvdw6);
770             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
771
772             /* Update potential sum for this i atom from the interaction with this j atom. */
773             vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
774             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
775
776             fscal            = fvdw;
777
778             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
779
780             /* Calculate temporary vectorial force */
781             tx               = _mm256_mul_pd(fscal,dx00);
782             ty               = _mm256_mul_pd(fscal,dy00);
783             tz               = _mm256_mul_pd(fscal,dz00);
784
785             /* Update vectorial force */
786             fix0             = _mm256_add_pd(fix0,tx);
787             fiy0             = _mm256_add_pd(fiy0,ty);
788             fiz0             = _mm256_add_pd(fiz0,tz);
789
790             fjx0             = _mm256_add_pd(fjx0,tx);
791             fjy0             = _mm256_add_pd(fjy0,ty);
792             fjz0             = _mm256_add_pd(fjz0,tz);
793
794             /**************************
795              * CALCULATE INTERACTIONS *
796              **************************/
797
798             /* COULOMB ELECTROSTATICS */
799             velec            = _mm256_mul_pd(qq11,rinv11);
800             felec            = _mm256_mul_pd(velec,rinvsq11);
801
802             /* Update potential sum for this i atom from the interaction with this j atom. */
803             velec            = _mm256_andnot_pd(dummy_mask,velec);
804             velecsum         = _mm256_add_pd(velecsum,velec);
805
806             fscal            = felec;
807
808             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
809
810             /* Calculate temporary vectorial force */
811             tx               = _mm256_mul_pd(fscal,dx11);
812             ty               = _mm256_mul_pd(fscal,dy11);
813             tz               = _mm256_mul_pd(fscal,dz11);
814
815             /* Update vectorial force */
816             fix1             = _mm256_add_pd(fix1,tx);
817             fiy1             = _mm256_add_pd(fiy1,ty);
818             fiz1             = _mm256_add_pd(fiz1,tz);
819
820             fjx1             = _mm256_add_pd(fjx1,tx);
821             fjy1             = _mm256_add_pd(fjy1,ty);
822             fjz1             = _mm256_add_pd(fjz1,tz);
823
824             /**************************
825              * CALCULATE INTERACTIONS *
826              **************************/
827
828             /* COULOMB ELECTROSTATICS */
829             velec            = _mm256_mul_pd(qq12,rinv12);
830             felec            = _mm256_mul_pd(velec,rinvsq12);
831
832             /* Update potential sum for this i atom from the interaction with this j atom. */
833             velec            = _mm256_andnot_pd(dummy_mask,velec);
834             velecsum         = _mm256_add_pd(velecsum,velec);
835
836             fscal            = felec;
837
838             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
839
840             /* Calculate temporary vectorial force */
841             tx               = _mm256_mul_pd(fscal,dx12);
842             ty               = _mm256_mul_pd(fscal,dy12);
843             tz               = _mm256_mul_pd(fscal,dz12);
844
845             /* Update vectorial force */
846             fix1             = _mm256_add_pd(fix1,tx);
847             fiy1             = _mm256_add_pd(fiy1,ty);
848             fiz1             = _mm256_add_pd(fiz1,tz);
849
850             fjx2             = _mm256_add_pd(fjx2,tx);
851             fjy2             = _mm256_add_pd(fjy2,ty);
852             fjz2             = _mm256_add_pd(fjz2,tz);
853
854             /**************************
855              * CALCULATE INTERACTIONS *
856              **************************/
857
858             /* COULOMB ELECTROSTATICS */
859             velec            = _mm256_mul_pd(qq13,rinv13);
860             felec            = _mm256_mul_pd(velec,rinvsq13);
861
862             /* Update potential sum for this i atom from the interaction with this j atom. */
863             velec            = _mm256_andnot_pd(dummy_mask,velec);
864             velecsum         = _mm256_add_pd(velecsum,velec);
865
866             fscal            = felec;
867
868             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
869
870             /* Calculate temporary vectorial force */
871             tx               = _mm256_mul_pd(fscal,dx13);
872             ty               = _mm256_mul_pd(fscal,dy13);
873             tz               = _mm256_mul_pd(fscal,dz13);
874
875             /* Update vectorial force */
876             fix1             = _mm256_add_pd(fix1,tx);
877             fiy1             = _mm256_add_pd(fiy1,ty);
878             fiz1             = _mm256_add_pd(fiz1,tz);
879
880             fjx3             = _mm256_add_pd(fjx3,tx);
881             fjy3             = _mm256_add_pd(fjy3,ty);
882             fjz3             = _mm256_add_pd(fjz3,tz);
883
884             /**************************
885              * CALCULATE INTERACTIONS *
886              **************************/
887
888             /* COULOMB ELECTROSTATICS */
889             velec            = _mm256_mul_pd(qq21,rinv21);
890             felec            = _mm256_mul_pd(velec,rinvsq21);
891
892             /* Update potential sum for this i atom from the interaction with this j atom. */
893             velec            = _mm256_andnot_pd(dummy_mask,velec);
894             velecsum         = _mm256_add_pd(velecsum,velec);
895
896             fscal            = felec;
897
898             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
899
900             /* Calculate temporary vectorial force */
901             tx               = _mm256_mul_pd(fscal,dx21);
902             ty               = _mm256_mul_pd(fscal,dy21);
903             tz               = _mm256_mul_pd(fscal,dz21);
904
905             /* Update vectorial force */
906             fix2             = _mm256_add_pd(fix2,tx);
907             fiy2             = _mm256_add_pd(fiy2,ty);
908             fiz2             = _mm256_add_pd(fiz2,tz);
909
910             fjx1             = _mm256_add_pd(fjx1,tx);
911             fjy1             = _mm256_add_pd(fjy1,ty);
912             fjz1             = _mm256_add_pd(fjz1,tz);
913
914             /**************************
915              * CALCULATE INTERACTIONS *
916              **************************/
917
918             /* COULOMB ELECTROSTATICS */
919             velec            = _mm256_mul_pd(qq22,rinv22);
920             felec            = _mm256_mul_pd(velec,rinvsq22);
921
922             /* Update potential sum for this i atom from the interaction with this j atom. */
923             velec            = _mm256_andnot_pd(dummy_mask,velec);
924             velecsum         = _mm256_add_pd(velecsum,velec);
925
926             fscal            = felec;
927
928             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
929
930             /* Calculate temporary vectorial force */
931             tx               = _mm256_mul_pd(fscal,dx22);
932             ty               = _mm256_mul_pd(fscal,dy22);
933             tz               = _mm256_mul_pd(fscal,dz22);
934
935             /* Update vectorial force */
936             fix2             = _mm256_add_pd(fix2,tx);
937             fiy2             = _mm256_add_pd(fiy2,ty);
938             fiz2             = _mm256_add_pd(fiz2,tz);
939
940             fjx2             = _mm256_add_pd(fjx2,tx);
941             fjy2             = _mm256_add_pd(fjy2,ty);
942             fjz2             = _mm256_add_pd(fjz2,tz);
943
944             /**************************
945              * CALCULATE INTERACTIONS *
946              **************************/
947
948             /* COULOMB ELECTROSTATICS */
949             velec            = _mm256_mul_pd(qq23,rinv23);
950             felec            = _mm256_mul_pd(velec,rinvsq23);
951
952             /* Update potential sum for this i atom from the interaction with this j atom. */
953             velec            = _mm256_andnot_pd(dummy_mask,velec);
954             velecsum         = _mm256_add_pd(velecsum,velec);
955
956             fscal            = felec;
957
958             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
959
960             /* Calculate temporary vectorial force */
961             tx               = _mm256_mul_pd(fscal,dx23);
962             ty               = _mm256_mul_pd(fscal,dy23);
963             tz               = _mm256_mul_pd(fscal,dz23);
964
965             /* Update vectorial force */
966             fix2             = _mm256_add_pd(fix2,tx);
967             fiy2             = _mm256_add_pd(fiy2,ty);
968             fiz2             = _mm256_add_pd(fiz2,tz);
969
970             fjx3             = _mm256_add_pd(fjx3,tx);
971             fjy3             = _mm256_add_pd(fjy3,ty);
972             fjz3             = _mm256_add_pd(fjz3,tz);
973
974             /**************************
975              * CALCULATE INTERACTIONS *
976              **************************/
977
978             /* COULOMB ELECTROSTATICS */
979             velec            = _mm256_mul_pd(qq31,rinv31);
980             felec            = _mm256_mul_pd(velec,rinvsq31);
981
982             /* Update potential sum for this i atom from the interaction with this j atom. */
983             velec            = _mm256_andnot_pd(dummy_mask,velec);
984             velecsum         = _mm256_add_pd(velecsum,velec);
985
986             fscal            = felec;
987
988             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
989
990             /* Calculate temporary vectorial force */
991             tx               = _mm256_mul_pd(fscal,dx31);
992             ty               = _mm256_mul_pd(fscal,dy31);
993             tz               = _mm256_mul_pd(fscal,dz31);
994
995             /* Update vectorial force */
996             fix3             = _mm256_add_pd(fix3,tx);
997             fiy3             = _mm256_add_pd(fiy3,ty);
998             fiz3             = _mm256_add_pd(fiz3,tz);
999
1000             fjx1             = _mm256_add_pd(fjx1,tx);
1001             fjy1             = _mm256_add_pd(fjy1,ty);
1002             fjz1             = _mm256_add_pd(fjz1,tz);
1003
1004             /**************************
1005              * CALCULATE INTERACTIONS *
1006              **************************/
1007
1008             /* COULOMB ELECTROSTATICS */
1009             velec            = _mm256_mul_pd(qq32,rinv32);
1010             felec            = _mm256_mul_pd(velec,rinvsq32);
1011
1012             /* Update potential sum for this i atom from the interaction with this j atom. */
1013             velec            = _mm256_andnot_pd(dummy_mask,velec);
1014             velecsum         = _mm256_add_pd(velecsum,velec);
1015
1016             fscal            = felec;
1017
1018             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1019
1020             /* Calculate temporary vectorial force */
1021             tx               = _mm256_mul_pd(fscal,dx32);
1022             ty               = _mm256_mul_pd(fscal,dy32);
1023             tz               = _mm256_mul_pd(fscal,dz32);
1024
1025             /* Update vectorial force */
1026             fix3             = _mm256_add_pd(fix3,tx);
1027             fiy3             = _mm256_add_pd(fiy3,ty);
1028             fiz3             = _mm256_add_pd(fiz3,tz);
1029
1030             fjx2             = _mm256_add_pd(fjx2,tx);
1031             fjy2             = _mm256_add_pd(fjy2,ty);
1032             fjz2             = _mm256_add_pd(fjz2,tz);
1033
1034             /**************************
1035              * CALCULATE INTERACTIONS *
1036              **************************/
1037
1038             /* COULOMB ELECTROSTATICS */
1039             velec            = _mm256_mul_pd(qq33,rinv33);
1040             felec            = _mm256_mul_pd(velec,rinvsq33);
1041
1042             /* Update potential sum for this i atom from the interaction with this j atom. */
1043             velec            = _mm256_andnot_pd(dummy_mask,velec);
1044             velecsum         = _mm256_add_pd(velecsum,velec);
1045
1046             fscal            = felec;
1047
1048             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1049
1050             /* Calculate temporary vectorial force */
1051             tx               = _mm256_mul_pd(fscal,dx33);
1052             ty               = _mm256_mul_pd(fscal,dy33);
1053             tz               = _mm256_mul_pd(fscal,dz33);
1054
1055             /* Update vectorial force */
1056             fix3             = _mm256_add_pd(fix3,tx);
1057             fiy3             = _mm256_add_pd(fiy3,ty);
1058             fiz3             = _mm256_add_pd(fiz3,tz);
1059
1060             fjx3             = _mm256_add_pd(fjx3,tx);
1061             fjy3             = _mm256_add_pd(fjy3,ty);
1062             fjz3             = _mm256_add_pd(fjz3,tz);
1063
1064             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1065             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1066             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1067             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1068
1069             gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1070                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
1071                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1072
1073             /* Inner loop uses 303 flops */
1074         }
1075
1076         /* End of innermost loop */
1077
1078         gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1079                                                  f+i_coord_offset,fshift+i_shift_offset);
1080
1081         ggid                        = gid[iidx];
1082         /* Update potential energies */
1083         gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1084         gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
1085
1086         /* Increment number of inner iterations */
1087         inneriter                  += j_index_end - j_index_start;
1088
1089         /* Outer loop uses 26 flops */
1090     }
1091
1092     /* Increment number of outer iterations */
1093     outeriter        += nri;
1094
1095     /* Update outer/inner flops */
1096
1097     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*303);
1098 }
1099 /*
1100  * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_F_avx_256_double
1101  * Electrostatics interaction: Coulomb
1102  * VdW interaction:            CubicSplineTable
1103  * Geometry:                   Water4-Water4
1104  * Calculate force/pot:        Force
1105  */
1106 void
1107 nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_F_avx_256_double
1108                     (t_nblist * gmx_restrict                nlist,
1109                      rvec * gmx_restrict                    xx,
1110                      rvec * gmx_restrict                    ff,
1111                      t_forcerec * gmx_restrict              fr,
1112                      t_mdatoms * gmx_restrict               mdatoms,
1113                      nb_kernel_data_t * gmx_restrict        kernel_data,
1114                      t_nrnb * gmx_restrict                  nrnb)
1115 {
1116     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1117      * just 0 for non-waters.
1118      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1119      * jnr indices corresponding to data put in the four positions in the SIMD register.
1120      */
1121     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1122     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1123     int              jnrA,jnrB,jnrC,jnrD;
1124     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1125     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1126     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1127     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1128     real             rcutoff_scalar;
1129     real             *shiftvec,*fshift,*x,*f;
1130     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1131     real             scratch[4*DIM];
1132     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1133     real *           vdwioffsetptr0;
1134     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1135     real *           vdwioffsetptr1;
1136     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1137     real *           vdwioffsetptr2;
1138     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1139     real *           vdwioffsetptr3;
1140     __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1141     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
1142     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1143     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1144     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1145     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1146     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1147     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
1148     __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
1149     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1150     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1151     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1152     __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
1153     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1154     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1155     __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
1156     __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
1157     __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
1158     __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
1159     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
1160     real             *charge;
1161     int              nvdwtype;
1162     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1163     int              *vdwtype;
1164     real             *vdwparam;
1165     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
1166     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
1167     __m128i          vfitab;
1168     __m128i          ifour       = _mm_set1_epi32(4);
1169     __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
1170     real             *vftab;
1171     __m256d          dummy_mask,cutoff_mask;
1172     __m128           tmpmask0,tmpmask1;
1173     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1174     __m256d          one     = _mm256_set1_pd(1.0);
1175     __m256d          two     = _mm256_set1_pd(2.0);
1176     x                = xx[0];
1177     f                = ff[0];
1178
1179     nri              = nlist->nri;
1180     iinr             = nlist->iinr;
1181     jindex           = nlist->jindex;
1182     jjnr             = nlist->jjnr;
1183     shiftidx         = nlist->shift;
1184     gid              = nlist->gid;
1185     shiftvec         = fr->shift_vec[0];
1186     fshift           = fr->fshift[0];
1187     facel            = _mm256_set1_pd(fr->epsfac);
1188     charge           = mdatoms->chargeA;
1189     nvdwtype         = fr->ntype;
1190     vdwparam         = fr->nbfp;
1191     vdwtype          = mdatoms->typeA;
1192
1193     vftab            = kernel_data->table_vdw->data;
1194     vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
1195
1196     /* Setup water-specific parameters */
1197     inr              = nlist->iinr[0];
1198     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1199     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1200     iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
1201     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1202
1203     jq1              = _mm256_set1_pd(charge[inr+1]);
1204     jq2              = _mm256_set1_pd(charge[inr+2]);
1205     jq3              = _mm256_set1_pd(charge[inr+3]);
1206     vdwjidx0A        = 2*vdwtype[inr+0];
1207     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
1208     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
1209     qq11             = _mm256_mul_pd(iq1,jq1);
1210     qq12             = _mm256_mul_pd(iq1,jq2);
1211     qq13             = _mm256_mul_pd(iq1,jq3);
1212     qq21             = _mm256_mul_pd(iq2,jq1);
1213     qq22             = _mm256_mul_pd(iq2,jq2);
1214     qq23             = _mm256_mul_pd(iq2,jq3);
1215     qq31             = _mm256_mul_pd(iq3,jq1);
1216     qq32             = _mm256_mul_pd(iq3,jq2);
1217     qq33             = _mm256_mul_pd(iq3,jq3);
1218
1219     /* Avoid stupid compiler warnings */
1220     jnrA = jnrB = jnrC = jnrD = 0;
1221     j_coord_offsetA = 0;
1222     j_coord_offsetB = 0;
1223     j_coord_offsetC = 0;
1224     j_coord_offsetD = 0;
1225
1226     outeriter        = 0;
1227     inneriter        = 0;
1228
1229     for(iidx=0;iidx<4*DIM;iidx++)
1230     {
1231         scratch[iidx] = 0.0;
1232     }
1233
1234     /* Start outer loop over neighborlists */
1235     for(iidx=0; iidx<nri; iidx++)
1236     {
1237         /* Load shift vector for this list */
1238         i_shift_offset   = DIM*shiftidx[iidx];
1239
1240         /* Load limits for loop over neighbors */
1241         j_index_start    = jindex[iidx];
1242         j_index_end      = jindex[iidx+1];
1243
1244         /* Get outer coordinate index */
1245         inr              = iinr[iidx];
1246         i_coord_offset   = DIM*inr;
1247
1248         /* Load i particle coords and add shift vector */
1249         gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1250                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1251
1252         fix0             = _mm256_setzero_pd();
1253         fiy0             = _mm256_setzero_pd();
1254         fiz0             = _mm256_setzero_pd();
1255         fix1             = _mm256_setzero_pd();
1256         fiy1             = _mm256_setzero_pd();
1257         fiz1             = _mm256_setzero_pd();
1258         fix2             = _mm256_setzero_pd();
1259         fiy2             = _mm256_setzero_pd();
1260         fiz2             = _mm256_setzero_pd();
1261         fix3             = _mm256_setzero_pd();
1262         fiy3             = _mm256_setzero_pd();
1263         fiz3             = _mm256_setzero_pd();
1264
1265         /* Start inner kernel loop */
1266         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1267         {
1268
1269             /* Get j neighbor index, and coordinate index */
1270             jnrA             = jjnr[jidx];
1271             jnrB             = jjnr[jidx+1];
1272             jnrC             = jjnr[jidx+2];
1273             jnrD             = jjnr[jidx+3];
1274             j_coord_offsetA  = DIM*jnrA;
1275             j_coord_offsetB  = DIM*jnrB;
1276             j_coord_offsetC  = DIM*jnrC;
1277             j_coord_offsetD  = DIM*jnrD;
1278
1279             /* load j atom coordinates */
1280             gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1281                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1282                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1283                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
1284
1285             /* Calculate displacement vector */
1286             dx00             = _mm256_sub_pd(ix0,jx0);
1287             dy00             = _mm256_sub_pd(iy0,jy0);
1288             dz00             = _mm256_sub_pd(iz0,jz0);
1289             dx11             = _mm256_sub_pd(ix1,jx1);
1290             dy11             = _mm256_sub_pd(iy1,jy1);
1291             dz11             = _mm256_sub_pd(iz1,jz1);
1292             dx12             = _mm256_sub_pd(ix1,jx2);
1293             dy12             = _mm256_sub_pd(iy1,jy2);
1294             dz12             = _mm256_sub_pd(iz1,jz2);
1295             dx13             = _mm256_sub_pd(ix1,jx3);
1296             dy13             = _mm256_sub_pd(iy1,jy3);
1297             dz13             = _mm256_sub_pd(iz1,jz3);
1298             dx21             = _mm256_sub_pd(ix2,jx1);
1299             dy21             = _mm256_sub_pd(iy2,jy1);
1300             dz21             = _mm256_sub_pd(iz2,jz1);
1301             dx22             = _mm256_sub_pd(ix2,jx2);
1302             dy22             = _mm256_sub_pd(iy2,jy2);
1303             dz22             = _mm256_sub_pd(iz2,jz2);
1304             dx23             = _mm256_sub_pd(ix2,jx3);
1305             dy23             = _mm256_sub_pd(iy2,jy3);
1306             dz23             = _mm256_sub_pd(iz2,jz3);
1307             dx31             = _mm256_sub_pd(ix3,jx1);
1308             dy31             = _mm256_sub_pd(iy3,jy1);
1309             dz31             = _mm256_sub_pd(iz3,jz1);
1310             dx32             = _mm256_sub_pd(ix3,jx2);
1311             dy32             = _mm256_sub_pd(iy3,jy2);
1312             dz32             = _mm256_sub_pd(iz3,jz2);
1313             dx33             = _mm256_sub_pd(ix3,jx3);
1314             dy33             = _mm256_sub_pd(iy3,jy3);
1315             dz33             = _mm256_sub_pd(iz3,jz3);
1316
1317             /* Calculate squared distance and things based on it */
1318             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1319             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1320             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1321             rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
1322             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1323             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1324             rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
1325             rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
1326             rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
1327             rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
1328
1329             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1330             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
1331             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
1332             rinv13           = gmx_mm256_invsqrt_pd(rsq13);
1333             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
1334             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
1335             rinv23           = gmx_mm256_invsqrt_pd(rsq23);
1336             rinv31           = gmx_mm256_invsqrt_pd(rsq31);
1337             rinv32           = gmx_mm256_invsqrt_pd(rsq32);
1338             rinv33           = gmx_mm256_invsqrt_pd(rsq33);
1339
1340             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1341             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1342             rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
1343             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1344             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1345             rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
1346             rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
1347             rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
1348             rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
1349
1350             fjx0             = _mm256_setzero_pd();
1351             fjy0             = _mm256_setzero_pd();
1352             fjz0             = _mm256_setzero_pd();
1353             fjx1             = _mm256_setzero_pd();
1354             fjy1             = _mm256_setzero_pd();
1355             fjz1             = _mm256_setzero_pd();
1356             fjx2             = _mm256_setzero_pd();
1357             fjy2             = _mm256_setzero_pd();
1358             fjz2             = _mm256_setzero_pd();
1359             fjx3             = _mm256_setzero_pd();
1360             fjy3             = _mm256_setzero_pd();
1361             fjz3             = _mm256_setzero_pd();
1362
1363             /**************************
1364              * CALCULATE INTERACTIONS *
1365              **************************/
1366
1367             r00              = _mm256_mul_pd(rsq00,rinv00);
1368
1369             /* Calculate table index by multiplying r with table scale and truncate to integer */
1370             rt               = _mm256_mul_pd(r00,vftabscale);
1371             vfitab           = _mm256_cvttpd_epi32(rt);
1372             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1373             vfitab           = _mm_slli_epi32(vfitab,3);
1374
1375             /* CUBIC SPLINE TABLE DISPERSION */
1376             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1377             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1378             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1379             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1380             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1381             Heps             = _mm256_mul_pd(vfeps,H);
1382             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1383             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1384             fvdw6            = _mm256_mul_pd(c6_00,FF);
1385
1386             /* CUBIC SPLINE TABLE REPULSION */
1387             vfitab           = _mm_add_epi32(vfitab,ifour);
1388             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1389             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1390             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1391             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1392             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1393             Heps             = _mm256_mul_pd(vfeps,H);
1394             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1395             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1396             fvdw12           = _mm256_mul_pd(c12_00,FF);
1397             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1398
1399             fscal            = fvdw;
1400
1401             /* Calculate temporary vectorial force */
1402             tx               = _mm256_mul_pd(fscal,dx00);
1403             ty               = _mm256_mul_pd(fscal,dy00);
1404             tz               = _mm256_mul_pd(fscal,dz00);
1405
1406             /* Update vectorial force */
1407             fix0             = _mm256_add_pd(fix0,tx);
1408             fiy0             = _mm256_add_pd(fiy0,ty);
1409             fiz0             = _mm256_add_pd(fiz0,tz);
1410
1411             fjx0             = _mm256_add_pd(fjx0,tx);
1412             fjy0             = _mm256_add_pd(fjy0,ty);
1413             fjz0             = _mm256_add_pd(fjz0,tz);
1414
1415             /**************************
1416              * CALCULATE INTERACTIONS *
1417              **************************/
1418
1419             /* COULOMB ELECTROSTATICS */
1420             velec            = _mm256_mul_pd(qq11,rinv11);
1421             felec            = _mm256_mul_pd(velec,rinvsq11);
1422
1423             fscal            = felec;
1424
1425             /* Calculate temporary vectorial force */
1426             tx               = _mm256_mul_pd(fscal,dx11);
1427             ty               = _mm256_mul_pd(fscal,dy11);
1428             tz               = _mm256_mul_pd(fscal,dz11);
1429
1430             /* Update vectorial force */
1431             fix1             = _mm256_add_pd(fix1,tx);
1432             fiy1             = _mm256_add_pd(fiy1,ty);
1433             fiz1             = _mm256_add_pd(fiz1,tz);
1434
1435             fjx1             = _mm256_add_pd(fjx1,tx);
1436             fjy1             = _mm256_add_pd(fjy1,ty);
1437             fjz1             = _mm256_add_pd(fjz1,tz);
1438
1439             /**************************
1440              * CALCULATE INTERACTIONS *
1441              **************************/
1442
1443             /* COULOMB ELECTROSTATICS */
1444             velec            = _mm256_mul_pd(qq12,rinv12);
1445             felec            = _mm256_mul_pd(velec,rinvsq12);
1446
1447             fscal            = felec;
1448
1449             /* Calculate temporary vectorial force */
1450             tx               = _mm256_mul_pd(fscal,dx12);
1451             ty               = _mm256_mul_pd(fscal,dy12);
1452             tz               = _mm256_mul_pd(fscal,dz12);
1453
1454             /* Update vectorial force */
1455             fix1             = _mm256_add_pd(fix1,tx);
1456             fiy1             = _mm256_add_pd(fiy1,ty);
1457             fiz1             = _mm256_add_pd(fiz1,tz);
1458
1459             fjx2             = _mm256_add_pd(fjx2,tx);
1460             fjy2             = _mm256_add_pd(fjy2,ty);
1461             fjz2             = _mm256_add_pd(fjz2,tz);
1462
1463             /**************************
1464              * CALCULATE INTERACTIONS *
1465              **************************/
1466
1467             /* COULOMB ELECTROSTATICS */
1468             velec            = _mm256_mul_pd(qq13,rinv13);
1469             felec            = _mm256_mul_pd(velec,rinvsq13);
1470
1471             fscal            = felec;
1472
1473             /* Calculate temporary vectorial force */
1474             tx               = _mm256_mul_pd(fscal,dx13);
1475             ty               = _mm256_mul_pd(fscal,dy13);
1476             tz               = _mm256_mul_pd(fscal,dz13);
1477
1478             /* Update vectorial force */
1479             fix1             = _mm256_add_pd(fix1,tx);
1480             fiy1             = _mm256_add_pd(fiy1,ty);
1481             fiz1             = _mm256_add_pd(fiz1,tz);
1482
1483             fjx3             = _mm256_add_pd(fjx3,tx);
1484             fjy3             = _mm256_add_pd(fjy3,ty);
1485             fjz3             = _mm256_add_pd(fjz3,tz);
1486
1487             /**************************
1488              * CALCULATE INTERACTIONS *
1489              **************************/
1490
1491             /* COULOMB ELECTROSTATICS */
1492             velec            = _mm256_mul_pd(qq21,rinv21);
1493             felec            = _mm256_mul_pd(velec,rinvsq21);
1494
1495             fscal            = felec;
1496
1497             /* Calculate temporary vectorial force */
1498             tx               = _mm256_mul_pd(fscal,dx21);
1499             ty               = _mm256_mul_pd(fscal,dy21);
1500             tz               = _mm256_mul_pd(fscal,dz21);
1501
1502             /* Update vectorial force */
1503             fix2             = _mm256_add_pd(fix2,tx);
1504             fiy2             = _mm256_add_pd(fiy2,ty);
1505             fiz2             = _mm256_add_pd(fiz2,tz);
1506
1507             fjx1             = _mm256_add_pd(fjx1,tx);
1508             fjy1             = _mm256_add_pd(fjy1,ty);
1509             fjz1             = _mm256_add_pd(fjz1,tz);
1510
1511             /**************************
1512              * CALCULATE INTERACTIONS *
1513              **************************/
1514
1515             /* COULOMB ELECTROSTATICS */
1516             velec            = _mm256_mul_pd(qq22,rinv22);
1517             felec            = _mm256_mul_pd(velec,rinvsq22);
1518
1519             fscal            = felec;
1520
1521             /* Calculate temporary vectorial force */
1522             tx               = _mm256_mul_pd(fscal,dx22);
1523             ty               = _mm256_mul_pd(fscal,dy22);
1524             tz               = _mm256_mul_pd(fscal,dz22);
1525
1526             /* Update vectorial force */
1527             fix2             = _mm256_add_pd(fix2,tx);
1528             fiy2             = _mm256_add_pd(fiy2,ty);
1529             fiz2             = _mm256_add_pd(fiz2,tz);
1530
1531             fjx2             = _mm256_add_pd(fjx2,tx);
1532             fjy2             = _mm256_add_pd(fjy2,ty);
1533             fjz2             = _mm256_add_pd(fjz2,tz);
1534
1535             /**************************
1536              * CALCULATE INTERACTIONS *
1537              **************************/
1538
1539             /* COULOMB ELECTROSTATICS */
1540             velec            = _mm256_mul_pd(qq23,rinv23);
1541             felec            = _mm256_mul_pd(velec,rinvsq23);
1542
1543             fscal            = felec;
1544
1545             /* Calculate temporary vectorial force */
1546             tx               = _mm256_mul_pd(fscal,dx23);
1547             ty               = _mm256_mul_pd(fscal,dy23);
1548             tz               = _mm256_mul_pd(fscal,dz23);
1549
1550             /* Update vectorial force */
1551             fix2             = _mm256_add_pd(fix2,tx);
1552             fiy2             = _mm256_add_pd(fiy2,ty);
1553             fiz2             = _mm256_add_pd(fiz2,tz);
1554
1555             fjx3             = _mm256_add_pd(fjx3,tx);
1556             fjy3             = _mm256_add_pd(fjy3,ty);
1557             fjz3             = _mm256_add_pd(fjz3,tz);
1558
1559             /**************************
1560              * CALCULATE INTERACTIONS *
1561              **************************/
1562
1563             /* COULOMB ELECTROSTATICS */
1564             velec            = _mm256_mul_pd(qq31,rinv31);
1565             felec            = _mm256_mul_pd(velec,rinvsq31);
1566
1567             fscal            = felec;
1568
1569             /* Calculate temporary vectorial force */
1570             tx               = _mm256_mul_pd(fscal,dx31);
1571             ty               = _mm256_mul_pd(fscal,dy31);
1572             tz               = _mm256_mul_pd(fscal,dz31);
1573
1574             /* Update vectorial force */
1575             fix3             = _mm256_add_pd(fix3,tx);
1576             fiy3             = _mm256_add_pd(fiy3,ty);
1577             fiz3             = _mm256_add_pd(fiz3,tz);
1578
1579             fjx1             = _mm256_add_pd(fjx1,tx);
1580             fjy1             = _mm256_add_pd(fjy1,ty);
1581             fjz1             = _mm256_add_pd(fjz1,tz);
1582
1583             /**************************
1584              * CALCULATE INTERACTIONS *
1585              **************************/
1586
1587             /* COULOMB ELECTROSTATICS */
1588             velec            = _mm256_mul_pd(qq32,rinv32);
1589             felec            = _mm256_mul_pd(velec,rinvsq32);
1590
1591             fscal            = felec;
1592
1593             /* Calculate temporary vectorial force */
1594             tx               = _mm256_mul_pd(fscal,dx32);
1595             ty               = _mm256_mul_pd(fscal,dy32);
1596             tz               = _mm256_mul_pd(fscal,dz32);
1597
1598             /* Update vectorial force */
1599             fix3             = _mm256_add_pd(fix3,tx);
1600             fiy3             = _mm256_add_pd(fiy3,ty);
1601             fiz3             = _mm256_add_pd(fiz3,tz);
1602
1603             fjx2             = _mm256_add_pd(fjx2,tx);
1604             fjy2             = _mm256_add_pd(fjy2,ty);
1605             fjz2             = _mm256_add_pd(fjz2,tz);
1606
1607             /**************************
1608              * CALCULATE INTERACTIONS *
1609              **************************/
1610
1611             /* COULOMB ELECTROSTATICS */
1612             velec            = _mm256_mul_pd(qq33,rinv33);
1613             felec            = _mm256_mul_pd(velec,rinvsq33);
1614
1615             fscal            = felec;
1616
1617             /* Calculate temporary vectorial force */
1618             tx               = _mm256_mul_pd(fscal,dx33);
1619             ty               = _mm256_mul_pd(fscal,dy33);
1620             tz               = _mm256_mul_pd(fscal,dz33);
1621
1622             /* Update vectorial force */
1623             fix3             = _mm256_add_pd(fix3,tx);
1624             fiy3             = _mm256_add_pd(fiy3,ty);
1625             fiz3             = _mm256_add_pd(fiz3,tz);
1626
1627             fjx3             = _mm256_add_pd(fjx3,tx);
1628             fjy3             = _mm256_add_pd(fjy3,ty);
1629             fjz3             = _mm256_add_pd(fjz3,tz);
1630
1631             fjptrA             = f+j_coord_offsetA;
1632             fjptrB             = f+j_coord_offsetB;
1633             fjptrC             = f+j_coord_offsetC;
1634             fjptrD             = f+j_coord_offsetD;
1635
1636             gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1637                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
1638                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1639
1640             /* Inner loop uses 285 flops */
1641         }
1642
1643         if(jidx<j_index_end)
1644         {
1645
1646             /* Get j neighbor index, and coordinate index */
1647             jnrlistA         = jjnr[jidx];
1648             jnrlistB         = jjnr[jidx+1];
1649             jnrlistC         = jjnr[jidx+2];
1650             jnrlistD         = jjnr[jidx+3];
1651             /* Sign of each element will be negative for non-real atoms.
1652              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1653              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
1654              */
1655             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
1656
1657             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
1658             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
1659             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
1660
1661             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1662             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1663             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1664             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1665             j_coord_offsetA  = DIM*jnrA;
1666             j_coord_offsetB  = DIM*jnrB;
1667             j_coord_offsetC  = DIM*jnrC;
1668             j_coord_offsetD  = DIM*jnrD;
1669
1670             /* load j atom coordinates */
1671             gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1672                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1673                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1674                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
1675
1676             /* Calculate displacement vector */
1677             dx00             = _mm256_sub_pd(ix0,jx0);
1678             dy00             = _mm256_sub_pd(iy0,jy0);
1679             dz00             = _mm256_sub_pd(iz0,jz0);
1680             dx11             = _mm256_sub_pd(ix1,jx1);
1681             dy11             = _mm256_sub_pd(iy1,jy1);
1682             dz11             = _mm256_sub_pd(iz1,jz1);
1683             dx12             = _mm256_sub_pd(ix1,jx2);
1684             dy12             = _mm256_sub_pd(iy1,jy2);
1685             dz12             = _mm256_sub_pd(iz1,jz2);
1686             dx13             = _mm256_sub_pd(ix1,jx3);
1687             dy13             = _mm256_sub_pd(iy1,jy3);
1688             dz13             = _mm256_sub_pd(iz1,jz3);
1689             dx21             = _mm256_sub_pd(ix2,jx1);
1690             dy21             = _mm256_sub_pd(iy2,jy1);
1691             dz21             = _mm256_sub_pd(iz2,jz1);
1692             dx22             = _mm256_sub_pd(ix2,jx2);
1693             dy22             = _mm256_sub_pd(iy2,jy2);
1694             dz22             = _mm256_sub_pd(iz2,jz2);
1695             dx23             = _mm256_sub_pd(ix2,jx3);
1696             dy23             = _mm256_sub_pd(iy2,jy3);
1697             dz23             = _mm256_sub_pd(iz2,jz3);
1698             dx31             = _mm256_sub_pd(ix3,jx1);
1699             dy31             = _mm256_sub_pd(iy3,jy1);
1700             dz31             = _mm256_sub_pd(iz3,jz1);
1701             dx32             = _mm256_sub_pd(ix3,jx2);
1702             dy32             = _mm256_sub_pd(iy3,jy2);
1703             dz32             = _mm256_sub_pd(iz3,jz2);
1704             dx33             = _mm256_sub_pd(ix3,jx3);
1705             dy33             = _mm256_sub_pd(iy3,jy3);
1706             dz33             = _mm256_sub_pd(iz3,jz3);
1707
1708             /* Calculate squared distance and things based on it */
1709             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1710             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1711             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1712             rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
1713             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1714             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1715             rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
1716             rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
1717             rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
1718             rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
1719
1720             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1721             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
1722             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
1723             rinv13           = gmx_mm256_invsqrt_pd(rsq13);
1724             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
1725             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
1726             rinv23           = gmx_mm256_invsqrt_pd(rsq23);
1727             rinv31           = gmx_mm256_invsqrt_pd(rsq31);
1728             rinv32           = gmx_mm256_invsqrt_pd(rsq32);
1729             rinv33           = gmx_mm256_invsqrt_pd(rsq33);
1730
1731             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1732             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1733             rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
1734             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1735             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1736             rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
1737             rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
1738             rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
1739             rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
1740
1741             fjx0             = _mm256_setzero_pd();
1742             fjy0             = _mm256_setzero_pd();
1743             fjz0             = _mm256_setzero_pd();
1744             fjx1             = _mm256_setzero_pd();
1745             fjy1             = _mm256_setzero_pd();
1746             fjz1             = _mm256_setzero_pd();
1747             fjx2             = _mm256_setzero_pd();
1748             fjy2             = _mm256_setzero_pd();
1749             fjz2             = _mm256_setzero_pd();
1750             fjx3             = _mm256_setzero_pd();
1751             fjy3             = _mm256_setzero_pd();
1752             fjz3             = _mm256_setzero_pd();
1753
1754             /**************************
1755              * CALCULATE INTERACTIONS *
1756              **************************/
1757
1758             r00              = _mm256_mul_pd(rsq00,rinv00);
1759             r00              = _mm256_andnot_pd(dummy_mask,r00);
1760
1761             /* Calculate table index by multiplying r with table scale and truncate to integer */
1762             rt               = _mm256_mul_pd(r00,vftabscale);
1763             vfitab           = _mm256_cvttpd_epi32(rt);
1764             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1765             vfitab           = _mm_slli_epi32(vfitab,3);
1766
1767             /* CUBIC SPLINE TABLE DISPERSION */
1768             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1769             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1770             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1771             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1772             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1773             Heps             = _mm256_mul_pd(vfeps,H);
1774             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1775             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1776             fvdw6            = _mm256_mul_pd(c6_00,FF);
1777
1778             /* CUBIC SPLINE TABLE REPULSION */
1779             vfitab           = _mm_add_epi32(vfitab,ifour);
1780             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1781             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1782             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1783             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1784             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1785             Heps             = _mm256_mul_pd(vfeps,H);
1786             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1787             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1788             fvdw12           = _mm256_mul_pd(c12_00,FF);
1789             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1790
1791             fscal            = fvdw;
1792
1793             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1794
1795             /* Calculate temporary vectorial force */
1796             tx               = _mm256_mul_pd(fscal,dx00);
1797             ty               = _mm256_mul_pd(fscal,dy00);
1798             tz               = _mm256_mul_pd(fscal,dz00);
1799
1800             /* Update vectorial force */
1801             fix0             = _mm256_add_pd(fix0,tx);
1802             fiy0             = _mm256_add_pd(fiy0,ty);
1803             fiz0             = _mm256_add_pd(fiz0,tz);
1804
1805             fjx0             = _mm256_add_pd(fjx0,tx);
1806             fjy0             = _mm256_add_pd(fjy0,ty);
1807             fjz0             = _mm256_add_pd(fjz0,tz);
1808
1809             /**************************
1810              * CALCULATE INTERACTIONS *
1811              **************************/
1812
1813             /* COULOMB ELECTROSTATICS */
1814             velec            = _mm256_mul_pd(qq11,rinv11);
1815             felec            = _mm256_mul_pd(velec,rinvsq11);
1816
1817             fscal            = felec;
1818
1819             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1820
1821             /* Calculate temporary vectorial force */
1822             tx               = _mm256_mul_pd(fscal,dx11);
1823             ty               = _mm256_mul_pd(fscal,dy11);
1824             tz               = _mm256_mul_pd(fscal,dz11);
1825
1826             /* Update vectorial force */
1827             fix1             = _mm256_add_pd(fix1,tx);
1828             fiy1             = _mm256_add_pd(fiy1,ty);
1829             fiz1             = _mm256_add_pd(fiz1,tz);
1830
1831             fjx1             = _mm256_add_pd(fjx1,tx);
1832             fjy1             = _mm256_add_pd(fjy1,ty);
1833             fjz1             = _mm256_add_pd(fjz1,tz);
1834
1835             /**************************
1836              * CALCULATE INTERACTIONS *
1837              **************************/
1838
1839             /* COULOMB ELECTROSTATICS */
1840             velec            = _mm256_mul_pd(qq12,rinv12);
1841             felec            = _mm256_mul_pd(velec,rinvsq12);
1842
1843             fscal            = felec;
1844
1845             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1846
1847             /* Calculate temporary vectorial force */
1848             tx               = _mm256_mul_pd(fscal,dx12);
1849             ty               = _mm256_mul_pd(fscal,dy12);
1850             tz               = _mm256_mul_pd(fscal,dz12);
1851
1852             /* Update vectorial force */
1853             fix1             = _mm256_add_pd(fix1,tx);
1854             fiy1             = _mm256_add_pd(fiy1,ty);
1855             fiz1             = _mm256_add_pd(fiz1,tz);
1856
1857             fjx2             = _mm256_add_pd(fjx2,tx);
1858             fjy2             = _mm256_add_pd(fjy2,ty);
1859             fjz2             = _mm256_add_pd(fjz2,tz);
1860
1861             /**************************
1862              * CALCULATE INTERACTIONS *
1863              **************************/
1864
1865             /* COULOMB ELECTROSTATICS */
1866             velec            = _mm256_mul_pd(qq13,rinv13);
1867             felec            = _mm256_mul_pd(velec,rinvsq13);
1868
1869             fscal            = felec;
1870
1871             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1872
1873             /* Calculate temporary vectorial force */
1874             tx               = _mm256_mul_pd(fscal,dx13);
1875             ty               = _mm256_mul_pd(fscal,dy13);
1876             tz               = _mm256_mul_pd(fscal,dz13);
1877
1878             /* Update vectorial force */
1879             fix1             = _mm256_add_pd(fix1,tx);
1880             fiy1             = _mm256_add_pd(fiy1,ty);
1881             fiz1             = _mm256_add_pd(fiz1,tz);
1882
1883             fjx3             = _mm256_add_pd(fjx3,tx);
1884             fjy3             = _mm256_add_pd(fjy3,ty);
1885             fjz3             = _mm256_add_pd(fjz3,tz);
1886
1887             /**************************
1888              * CALCULATE INTERACTIONS *
1889              **************************/
1890
1891             /* COULOMB ELECTROSTATICS */
1892             velec            = _mm256_mul_pd(qq21,rinv21);
1893             felec            = _mm256_mul_pd(velec,rinvsq21);
1894
1895             fscal            = felec;
1896
1897             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1898
1899             /* Calculate temporary vectorial force */
1900             tx               = _mm256_mul_pd(fscal,dx21);
1901             ty               = _mm256_mul_pd(fscal,dy21);
1902             tz               = _mm256_mul_pd(fscal,dz21);
1903
1904             /* Update vectorial force */
1905             fix2             = _mm256_add_pd(fix2,tx);
1906             fiy2             = _mm256_add_pd(fiy2,ty);
1907             fiz2             = _mm256_add_pd(fiz2,tz);
1908
1909             fjx1             = _mm256_add_pd(fjx1,tx);
1910             fjy1             = _mm256_add_pd(fjy1,ty);
1911             fjz1             = _mm256_add_pd(fjz1,tz);
1912
1913             /**************************
1914              * CALCULATE INTERACTIONS *
1915              **************************/
1916
1917             /* COULOMB ELECTROSTATICS */
1918             velec            = _mm256_mul_pd(qq22,rinv22);
1919             felec            = _mm256_mul_pd(velec,rinvsq22);
1920
1921             fscal            = felec;
1922
1923             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1924
1925             /* Calculate temporary vectorial force */
1926             tx               = _mm256_mul_pd(fscal,dx22);
1927             ty               = _mm256_mul_pd(fscal,dy22);
1928             tz               = _mm256_mul_pd(fscal,dz22);
1929
1930             /* Update vectorial force */
1931             fix2             = _mm256_add_pd(fix2,tx);
1932             fiy2             = _mm256_add_pd(fiy2,ty);
1933             fiz2             = _mm256_add_pd(fiz2,tz);
1934
1935             fjx2             = _mm256_add_pd(fjx2,tx);
1936             fjy2             = _mm256_add_pd(fjy2,ty);
1937             fjz2             = _mm256_add_pd(fjz2,tz);
1938
1939             /**************************
1940              * CALCULATE INTERACTIONS *
1941              **************************/
1942
1943             /* COULOMB ELECTROSTATICS */
1944             velec            = _mm256_mul_pd(qq23,rinv23);
1945             felec            = _mm256_mul_pd(velec,rinvsq23);
1946
1947             fscal            = felec;
1948
1949             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1950
1951             /* Calculate temporary vectorial force */
1952             tx               = _mm256_mul_pd(fscal,dx23);
1953             ty               = _mm256_mul_pd(fscal,dy23);
1954             tz               = _mm256_mul_pd(fscal,dz23);
1955
1956             /* Update vectorial force */
1957             fix2             = _mm256_add_pd(fix2,tx);
1958             fiy2             = _mm256_add_pd(fiy2,ty);
1959             fiz2             = _mm256_add_pd(fiz2,tz);
1960
1961             fjx3             = _mm256_add_pd(fjx3,tx);
1962             fjy3             = _mm256_add_pd(fjy3,ty);
1963             fjz3             = _mm256_add_pd(fjz3,tz);
1964
1965             /**************************
1966              * CALCULATE INTERACTIONS *
1967              **************************/
1968
1969             /* COULOMB ELECTROSTATICS */
1970             velec            = _mm256_mul_pd(qq31,rinv31);
1971             felec            = _mm256_mul_pd(velec,rinvsq31);
1972
1973             fscal            = felec;
1974
1975             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1976
1977             /* Calculate temporary vectorial force */
1978             tx               = _mm256_mul_pd(fscal,dx31);
1979             ty               = _mm256_mul_pd(fscal,dy31);
1980             tz               = _mm256_mul_pd(fscal,dz31);
1981
1982             /* Update vectorial force */
1983             fix3             = _mm256_add_pd(fix3,tx);
1984             fiy3             = _mm256_add_pd(fiy3,ty);
1985             fiz3             = _mm256_add_pd(fiz3,tz);
1986
1987             fjx1             = _mm256_add_pd(fjx1,tx);
1988             fjy1             = _mm256_add_pd(fjy1,ty);
1989             fjz1             = _mm256_add_pd(fjz1,tz);
1990
1991             /**************************
1992              * CALCULATE INTERACTIONS *
1993              **************************/
1994
1995             /* COULOMB ELECTROSTATICS */
1996             velec            = _mm256_mul_pd(qq32,rinv32);
1997             felec            = _mm256_mul_pd(velec,rinvsq32);
1998
1999             fscal            = felec;
2000
2001             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2002
2003             /* Calculate temporary vectorial force */
2004             tx               = _mm256_mul_pd(fscal,dx32);
2005             ty               = _mm256_mul_pd(fscal,dy32);
2006             tz               = _mm256_mul_pd(fscal,dz32);
2007
2008             /* Update vectorial force */
2009             fix3             = _mm256_add_pd(fix3,tx);
2010             fiy3             = _mm256_add_pd(fiy3,ty);
2011             fiz3             = _mm256_add_pd(fiz3,tz);
2012
2013             fjx2             = _mm256_add_pd(fjx2,tx);
2014             fjy2             = _mm256_add_pd(fjy2,ty);
2015             fjz2             = _mm256_add_pd(fjz2,tz);
2016
2017             /**************************
2018              * CALCULATE INTERACTIONS *
2019              **************************/
2020
2021             /* COULOMB ELECTROSTATICS */
2022             velec            = _mm256_mul_pd(qq33,rinv33);
2023             felec            = _mm256_mul_pd(velec,rinvsq33);
2024
2025             fscal            = felec;
2026
2027             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2028
2029             /* Calculate temporary vectorial force */
2030             tx               = _mm256_mul_pd(fscal,dx33);
2031             ty               = _mm256_mul_pd(fscal,dy33);
2032             tz               = _mm256_mul_pd(fscal,dz33);
2033
2034             /* Update vectorial force */
2035             fix3             = _mm256_add_pd(fix3,tx);
2036             fiy3             = _mm256_add_pd(fiy3,ty);
2037             fiz3             = _mm256_add_pd(fiz3,tz);
2038
2039             fjx3             = _mm256_add_pd(fjx3,tx);
2040             fjy3             = _mm256_add_pd(fjy3,ty);
2041             fjz3             = _mm256_add_pd(fjz3,tz);
2042
2043             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2044             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2045             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2046             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2047
2048             gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
2049                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2050                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2051
2052             /* Inner loop uses 286 flops */
2053         }
2054
2055         /* End of innermost loop */
2056
2057         gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
2058                                                  f+i_coord_offset,fshift+i_shift_offset);
2059
2060         /* Increment number of inner iterations */
2061         inneriter                  += j_index_end - j_index_start;
2062
2063         /* Outer loop uses 24 flops */
2064     }
2065
2066     /* Increment number of outer iterations */
2067     outeriter        += nri;
2068
2069     /* Update outer/inner flops */
2070
2071     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*286);
2072 }