Introduce gmxpre.h for truly global definitions
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_avx_256_double.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_double kernel generator.
37  */
38 #include "gmxpre.h"
39
40 #include "config.h"
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "gromacs/legacyheaders/types/simple.h"
46 #include "gromacs/math/vec.h"
47 #include "gromacs/legacyheaders/nrnb.h"
48
49 #include "gromacs/simd/math_x86_avx_256_double.h"
50 #include "kernelutil_x86_avx_256_double.h"
51
52 /*
53  * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_VF_avx_256_double
54  * Electrostatics interaction: Coulomb
55  * VdW interaction:            CubicSplineTable
56  * Geometry:                   Water4-Water4
57  * Calculate force/pot:        PotentialAndForce
58  */
59 void
60 nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_VF_avx_256_double
61                     (t_nblist                    * gmx_restrict       nlist,
62                      rvec                        * gmx_restrict          xx,
63                      rvec                        * gmx_restrict          ff,
64                      t_forcerec                  * gmx_restrict          fr,
65                      t_mdatoms                   * gmx_restrict     mdatoms,
66                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67                      t_nrnb                      * gmx_restrict        nrnb)
68 {
69     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
70      * just 0 for non-waters.
71      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
72      * jnr indices corresponding to data put in the four positions in the SIMD register.
73      */
74     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
75     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76     int              jnrA,jnrB,jnrC,jnrD;
77     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
78     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
79     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
80     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
81     real             rcutoff_scalar;
82     real             *shiftvec,*fshift,*x,*f;
83     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
84     real             scratch[4*DIM];
85     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86     real *           vdwioffsetptr0;
87     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
88     real *           vdwioffsetptr1;
89     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
90     real *           vdwioffsetptr2;
91     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
92     real *           vdwioffsetptr3;
93     __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
94     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
95     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
96     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
97     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
98     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
99     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
100     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
101     __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
102     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
103     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
104     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
105     __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
106     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
107     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
108     __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
109     __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
110     __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
111     __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
112     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
113     real             *charge;
114     int              nvdwtype;
115     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
116     int              *vdwtype;
117     real             *vdwparam;
118     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
119     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
120     __m128i          vfitab;
121     __m128i          ifour       = _mm_set1_epi32(4);
122     __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
123     real             *vftab;
124     __m256d          dummy_mask,cutoff_mask;
125     __m128           tmpmask0,tmpmask1;
126     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
127     __m256d          one     = _mm256_set1_pd(1.0);
128     __m256d          two     = _mm256_set1_pd(2.0);
129     x                = xx[0];
130     f                = ff[0];
131
132     nri              = nlist->nri;
133     iinr             = nlist->iinr;
134     jindex           = nlist->jindex;
135     jjnr             = nlist->jjnr;
136     shiftidx         = nlist->shift;
137     gid              = nlist->gid;
138     shiftvec         = fr->shift_vec[0];
139     fshift           = fr->fshift[0];
140     facel            = _mm256_set1_pd(fr->epsfac);
141     charge           = mdatoms->chargeA;
142     nvdwtype         = fr->ntype;
143     vdwparam         = fr->nbfp;
144     vdwtype          = mdatoms->typeA;
145
146     vftab            = kernel_data->table_vdw->data;
147     vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
148
149     /* Setup water-specific parameters */
150     inr              = nlist->iinr[0];
151     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
152     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
153     iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
154     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
155
156     jq1              = _mm256_set1_pd(charge[inr+1]);
157     jq2              = _mm256_set1_pd(charge[inr+2]);
158     jq3              = _mm256_set1_pd(charge[inr+3]);
159     vdwjidx0A        = 2*vdwtype[inr+0];
160     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
161     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
162     qq11             = _mm256_mul_pd(iq1,jq1);
163     qq12             = _mm256_mul_pd(iq1,jq2);
164     qq13             = _mm256_mul_pd(iq1,jq3);
165     qq21             = _mm256_mul_pd(iq2,jq1);
166     qq22             = _mm256_mul_pd(iq2,jq2);
167     qq23             = _mm256_mul_pd(iq2,jq3);
168     qq31             = _mm256_mul_pd(iq3,jq1);
169     qq32             = _mm256_mul_pd(iq3,jq2);
170     qq33             = _mm256_mul_pd(iq3,jq3);
171
172     /* Avoid stupid compiler warnings */
173     jnrA = jnrB = jnrC = jnrD = 0;
174     j_coord_offsetA = 0;
175     j_coord_offsetB = 0;
176     j_coord_offsetC = 0;
177     j_coord_offsetD = 0;
178
179     outeriter        = 0;
180     inneriter        = 0;
181
182     for(iidx=0;iidx<4*DIM;iidx++)
183     {
184         scratch[iidx] = 0.0;
185     }
186
187     /* Start outer loop over neighborlists */
188     for(iidx=0; iidx<nri; iidx++)
189     {
190         /* Load shift vector for this list */
191         i_shift_offset   = DIM*shiftidx[iidx];
192
193         /* Load limits for loop over neighbors */
194         j_index_start    = jindex[iidx];
195         j_index_end      = jindex[iidx+1];
196
197         /* Get outer coordinate index */
198         inr              = iinr[iidx];
199         i_coord_offset   = DIM*inr;
200
201         /* Load i particle coords and add shift vector */
202         gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
203                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
204
205         fix0             = _mm256_setzero_pd();
206         fiy0             = _mm256_setzero_pd();
207         fiz0             = _mm256_setzero_pd();
208         fix1             = _mm256_setzero_pd();
209         fiy1             = _mm256_setzero_pd();
210         fiz1             = _mm256_setzero_pd();
211         fix2             = _mm256_setzero_pd();
212         fiy2             = _mm256_setzero_pd();
213         fiz2             = _mm256_setzero_pd();
214         fix3             = _mm256_setzero_pd();
215         fiy3             = _mm256_setzero_pd();
216         fiz3             = _mm256_setzero_pd();
217
218         /* Reset potential sums */
219         velecsum         = _mm256_setzero_pd();
220         vvdwsum          = _mm256_setzero_pd();
221
222         /* Start inner kernel loop */
223         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
224         {
225
226             /* Get j neighbor index, and coordinate index */
227             jnrA             = jjnr[jidx];
228             jnrB             = jjnr[jidx+1];
229             jnrC             = jjnr[jidx+2];
230             jnrD             = jjnr[jidx+3];
231             j_coord_offsetA  = DIM*jnrA;
232             j_coord_offsetB  = DIM*jnrB;
233             j_coord_offsetC  = DIM*jnrC;
234             j_coord_offsetD  = DIM*jnrD;
235
236             /* load j atom coordinates */
237             gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
238                                                  x+j_coord_offsetC,x+j_coord_offsetD,
239                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
240                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
241
242             /* Calculate displacement vector */
243             dx00             = _mm256_sub_pd(ix0,jx0);
244             dy00             = _mm256_sub_pd(iy0,jy0);
245             dz00             = _mm256_sub_pd(iz0,jz0);
246             dx11             = _mm256_sub_pd(ix1,jx1);
247             dy11             = _mm256_sub_pd(iy1,jy1);
248             dz11             = _mm256_sub_pd(iz1,jz1);
249             dx12             = _mm256_sub_pd(ix1,jx2);
250             dy12             = _mm256_sub_pd(iy1,jy2);
251             dz12             = _mm256_sub_pd(iz1,jz2);
252             dx13             = _mm256_sub_pd(ix1,jx3);
253             dy13             = _mm256_sub_pd(iy1,jy3);
254             dz13             = _mm256_sub_pd(iz1,jz3);
255             dx21             = _mm256_sub_pd(ix2,jx1);
256             dy21             = _mm256_sub_pd(iy2,jy1);
257             dz21             = _mm256_sub_pd(iz2,jz1);
258             dx22             = _mm256_sub_pd(ix2,jx2);
259             dy22             = _mm256_sub_pd(iy2,jy2);
260             dz22             = _mm256_sub_pd(iz2,jz2);
261             dx23             = _mm256_sub_pd(ix2,jx3);
262             dy23             = _mm256_sub_pd(iy2,jy3);
263             dz23             = _mm256_sub_pd(iz2,jz3);
264             dx31             = _mm256_sub_pd(ix3,jx1);
265             dy31             = _mm256_sub_pd(iy3,jy1);
266             dz31             = _mm256_sub_pd(iz3,jz1);
267             dx32             = _mm256_sub_pd(ix3,jx2);
268             dy32             = _mm256_sub_pd(iy3,jy2);
269             dz32             = _mm256_sub_pd(iz3,jz2);
270             dx33             = _mm256_sub_pd(ix3,jx3);
271             dy33             = _mm256_sub_pd(iy3,jy3);
272             dz33             = _mm256_sub_pd(iz3,jz3);
273
274             /* Calculate squared distance and things based on it */
275             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
276             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
277             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
278             rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
279             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
280             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
281             rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
282             rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
283             rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
284             rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
285
286             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
287             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
288             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
289             rinv13           = gmx_mm256_invsqrt_pd(rsq13);
290             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
291             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
292             rinv23           = gmx_mm256_invsqrt_pd(rsq23);
293             rinv31           = gmx_mm256_invsqrt_pd(rsq31);
294             rinv32           = gmx_mm256_invsqrt_pd(rsq32);
295             rinv33           = gmx_mm256_invsqrt_pd(rsq33);
296
297             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
298             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
299             rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
300             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
301             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
302             rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
303             rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
304             rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
305             rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
306
307             fjx0             = _mm256_setzero_pd();
308             fjy0             = _mm256_setzero_pd();
309             fjz0             = _mm256_setzero_pd();
310             fjx1             = _mm256_setzero_pd();
311             fjy1             = _mm256_setzero_pd();
312             fjz1             = _mm256_setzero_pd();
313             fjx2             = _mm256_setzero_pd();
314             fjy2             = _mm256_setzero_pd();
315             fjz2             = _mm256_setzero_pd();
316             fjx3             = _mm256_setzero_pd();
317             fjy3             = _mm256_setzero_pd();
318             fjz3             = _mm256_setzero_pd();
319
320             /**************************
321              * CALCULATE INTERACTIONS *
322              **************************/
323
324             r00              = _mm256_mul_pd(rsq00,rinv00);
325
326             /* Calculate table index by multiplying r with table scale and truncate to integer */
327             rt               = _mm256_mul_pd(r00,vftabscale);
328             vfitab           = _mm256_cvttpd_epi32(rt);
329             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
330             vfitab           = _mm_slli_epi32(vfitab,3);
331
332             /* CUBIC SPLINE TABLE DISPERSION */
333             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
334             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
335             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
336             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
337             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
338             Heps             = _mm256_mul_pd(vfeps,H);
339             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
340             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
341             vvdw6            = _mm256_mul_pd(c6_00,VV);
342             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
343             fvdw6            = _mm256_mul_pd(c6_00,FF);
344
345             /* CUBIC SPLINE TABLE REPULSION */
346             vfitab           = _mm_add_epi32(vfitab,ifour);
347             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
348             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
349             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
350             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
351             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
352             Heps             = _mm256_mul_pd(vfeps,H);
353             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
354             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
355             vvdw12           = _mm256_mul_pd(c12_00,VV);
356             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
357             fvdw12           = _mm256_mul_pd(c12_00,FF);
358             vvdw             = _mm256_add_pd(vvdw12,vvdw6);
359             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
360
361             /* Update potential sum for this i atom from the interaction with this j atom. */
362             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
363
364             fscal            = fvdw;
365
366             /* Calculate temporary vectorial force */
367             tx               = _mm256_mul_pd(fscal,dx00);
368             ty               = _mm256_mul_pd(fscal,dy00);
369             tz               = _mm256_mul_pd(fscal,dz00);
370
371             /* Update vectorial force */
372             fix0             = _mm256_add_pd(fix0,tx);
373             fiy0             = _mm256_add_pd(fiy0,ty);
374             fiz0             = _mm256_add_pd(fiz0,tz);
375
376             fjx0             = _mm256_add_pd(fjx0,tx);
377             fjy0             = _mm256_add_pd(fjy0,ty);
378             fjz0             = _mm256_add_pd(fjz0,tz);
379
380             /**************************
381              * CALCULATE INTERACTIONS *
382              **************************/
383
384             /* COULOMB ELECTROSTATICS */
385             velec            = _mm256_mul_pd(qq11,rinv11);
386             felec            = _mm256_mul_pd(velec,rinvsq11);
387
388             /* Update potential sum for this i atom from the interaction with this j atom. */
389             velecsum         = _mm256_add_pd(velecsum,velec);
390
391             fscal            = felec;
392
393             /* Calculate temporary vectorial force */
394             tx               = _mm256_mul_pd(fscal,dx11);
395             ty               = _mm256_mul_pd(fscal,dy11);
396             tz               = _mm256_mul_pd(fscal,dz11);
397
398             /* Update vectorial force */
399             fix1             = _mm256_add_pd(fix1,tx);
400             fiy1             = _mm256_add_pd(fiy1,ty);
401             fiz1             = _mm256_add_pd(fiz1,tz);
402
403             fjx1             = _mm256_add_pd(fjx1,tx);
404             fjy1             = _mm256_add_pd(fjy1,ty);
405             fjz1             = _mm256_add_pd(fjz1,tz);
406
407             /**************************
408              * CALCULATE INTERACTIONS *
409              **************************/
410
411             /* COULOMB ELECTROSTATICS */
412             velec            = _mm256_mul_pd(qq12,rinv12);
413             felec            = _mm256_mul_pd(velec,rinvsq12);
414
415             /* Update potential sum for this i atom from the interaction with this j atom. */
416             velecsum         = _mm256_add_pd(velecsum,velec);
417
418             fscal            = felec;
419
420             /* Calculate temporary vectorial force */
421             tx               = _mm256_mul_pd(fscal,dx12);
422             ty               = _mm256_mul_pd(fscal,dy12);
423             tz               = _mm256_mul_pd(fscal,dz12);
424
425             /* Update vectorial force */
426             fix1             = _mm256_add_pd(fix1,tx);
427             fiy1             = _mm256_add_pd(fiy1,ty);
428             fiz1             = _mm256_add_pd(fiz1,tz);
429
430             fjx2             = _mm256_add_pd(fjx2,tx);
431             fjy2             = _mm256_add_pd(fjy2,ty);
432             fjz2             = _mm256_add_pd(fjz2,tz);
433
434             /**************************
435              * CALCULATE INTERACTIONS *
436              **************************/
437
438             /* COULOMB ELECTROSTATICS */
439             velec            = _mm256_mul_pd(qq13,rinv13);
440             felec            = _mm256_mul_pd(velec,rinvsq13);
441
442             /* Update potential sum for this i atom from the interaction with this j atom. */
443             velecsum         = _mm256_add_pd(velecsum,velec);
444
445             fscal            = felec;
446
447             /* Calculate temporary vectorial force */
448             tx               = _mm256_mul_pd(fscal,dx13);
449             ty               = _mm256_mul_pd(fscal,dy13);
450             tz               = _mm256_mul_pd(fscal,dz13);
451
452             /* Update vectorial force */
453             fix1             = _mm256_add_pd(fix1,tx);
454             fiy1             = _mm256_add_pd(fiy1,ty);
455             fiz1             = _mm256_add_pd(fiz1,tz);
456
457             fjx3             = _mm256_add_pd(fjx3,tx);
458             fjy3             = _mm256_add_pd(fjy3,ty);
459             fjz3             = _mm256_add_pd(fjz3,tz);
460
461             /**************************
462              * CALCULATE INTERACTIONS *
463              **************************/
464
465             /* COULOMB ELECTROSTATICS */
466             velec            = _mm256_mul_pd(qq21,rinv21);
467             felec            = _mm256_mul_pd(velec,rinvsq21);
468
469             /* Update potential sum for this i atom from the interaction with this j atom. */
470             velecsum         = _mm256_add_pd(velecsum,velec);
471
472             fscal            = felec;
473
474             /* Calculate temporary vectorial force */
475             tx               = _mm256_mul_pd(fscal,dx21);
476             ty               = _mm256_mul_pd(fscal,dy21);
477             tz               = _mm256_mul_pd(fscal,dz21);
478
479             /* Update vectorial force */
480             fix2             = _mm256_add_pd(fix2,tx);
481             fiy2             = _mm256_add_pd(fiy2,ty);
482             fiz2             = _mm256_add_pd(fiz2,tz);
483
484             fjx1             = _mm256_add_pd(fjx1,tx);
485             fjy1             = _mm256_add_pd(fjy1,ty);
486             fjz1             = _mm256_add_pd(fjz1,tz);
487
488             /**************************
489              * CALCULATE INTERACTIONS *
490              **************************/
491
492             /* COULOMB ELECTROSTATICS */
493             velec            = _mm256_mul_pd(qq22,rinv22);
494             felec            = _mm256_mul_pd(velec,rinvsq22);
495
496             /* Update potential sum for this i atom from the interaction with this j atom. */
497             velecsum         = _mm256_add_pd(velecsum,velec);
498
499             fscal            = felec;
500
501             /* Calculate temporary vectorial force */
502             tx               = _mm256_mul_pd(fscal,dx22);
503             ty               = _mm256_mul_pd(fscal,dy22);
504             tz               = _mm256_mul_pd(fscal,dz22);
505
506             /* Update vectorial force */
507             fix2             = _mm256_add_pd(fix2,tx);
508             fiy2             = _mm256_add_pd(fiy2,ty);
509             fiz2             = _mm256_add_pd(fiz2,tz);
510
511             fjx2             = _mm256_add_pd(fjx2,tx);
512             fjy2             = _mm256_add_pd(fjy2,ty);
513             fjz2             = _mm256_add_pd(fjz2,tz);
514
515             /**************************
516              * CALCULATE INTERACTIONS *
517              **************************/
518
519             /* COULOMB ELECTROSTATICS */
520             velec            = _mm256_mul_pd(qq23,rinv23);
521             felec            = _mm256_mul_pd(velec,rinvsq23);
522
523             /* Update potential sum for this i atom from the interaction with this j atom. */
524             velecsum         = _mm256_add_pd(velecsum,velec);
525
526             fscal            = felec;
527
528             /* Calculate temporary vectorial force */
529             tx               = _mm256_mul_pd(fscal,dx23);
530             ty               = _mm256_mul_pd(fscal,dy23);
531             tz               = _mm256_mul_pd(fscal,dz23);
532
533             /* Update vectorial force */
534             fix2             = _mm256_add_pd(fix2,tx);
535             fiy2             = _mm256_add_pd(fiy2,ty);
536             fiz2             = _mm256_add_pd(fiz2,tz);
537
538             fjx3             = _mm256_add_pd(fjx3,tx);
539             fjy3             = _mm256_add_pd(fjy3,ty);
540             fjz3             = _mm256_add_pd(fjz3,tz);
541
542             /**************************
543              * CALCULATE INTERACTIONS *
544              **************************/
545
546             /* COULOMB ELECTROSTATICS */
547             velec            = _mm256_mul_pd(qq31,rinv31);
548             felec            = _mm256_mul_pd(velec,rinvsq31);
549
550             /* Update potential sum for this i atom from the interaction with this j atom. */
551             velecsum         = _mm256_add_pd(velecsum,velec);
552
553             fscal            = felec;
554
555             /* Calculate temporary vectorial force */
556             tx               = _mm256_mul_pd(fscal,dx31);
557             ty               = _mm256_mul_pd(fscal,dy31);
558             tz               = _mm256_mul_pd(fscal,dz31);
559
560             /* Update vectorial force */
561             fix3             = _mm256_add_pd(fix3,tx);
562             fiy3             = _mm256_add_pd(fiy3,ty);
563             fiz3             = _mm256_add_pd(fiz3,tz);
564
565             fjx1             = _mm256_add_pd(fjx1,tx);
566             fjy1             = _mm256_add_pd(fjy1,ty);
567             fjz1             = _mm256_add_pd(fjz1,tz);
568
569             /**************************
570              * CALCULATE INTERACTIONS *
571              **************************/
572
573             /* COULOMB ELECTROSTATICS */
574             velec            = _mm256_mul_pd(qq32,rinv32);
575             felec            = _mm256_mul_pd(velec,rinvsq32);
576
577             /* Update potential sum for this i atom from the interaction with this j atom. */
578             velecsum         = _mm256_add_pd(velecsum,velec);
579
580             fscal            = felec;
581
582             /* Calculate temporary vectorial force */
583             tx               = _mm256_mul_pd(fscal,dx32);
584             ty               = _mm256_mul_pd(fscal,dy32);
585             tz               = _mm256_mul_pd(fscal,dz32);
586
587             /* Update vectorial force */
588             fix3             = _mm256_add_pd(fix3,tx);
589             fiy3             = _mm256_add_pd(fiy3,ty);
590             fiz3             = _mm256_add_pd(fiz3,tz);
591
592             fjx2             = _mm256_add_pd(fjx2,tx);
593             fjy2             = _mm256_add_pd(fjy2,ty);
594             fjz2             = _mm256_add_pd(fjz2,tz);
595
596             /**************************
597              * CALCULATE INTERACTIONS *
598              **************************/
599
600             /* COULOMB ELECTROSTATICS */
601             velec            = _mm256_mul_pd(qq33,rinv33);
602             felec            = _mm256_mul_pd(velec,rinvsq33);
603
604             /* Update potential sum for this i atom from the interaction with this j atom. */
605             velecsum         = _mm256_add_pd(velecsum,velec);
606
607             fscal            = felec;
608
609             /* Calculate temporary vectorial force */
610             tx               = _mm256_mul_pd(fscal,dx33);
611             ty               = _mm256_mul_pd(fscal,dy33);
612             tz               = _mm256_mul_pd(fscal,dz33);
613
614             /* Update vectorial force */
615             fix3             = _mm256_add_pd(fix3,tx);
616             fiy3             = _mm256_add_pd(fiy3,ty);
617             fiz3             = _mm256_add_pd(fiz3,tz);
618
619             fjx3             = _mm256_add_pd(fjx3,tx);
620             fjy3             = _mm256_add_pd(fjy3,ty);
621             fjz3             = _mm256_add_pd(fjz3,tz);
622
623             fjptrA             = f+j_coord_offsetA;
624             fjptrB             = f+j_coord_offsetB;
625             fjptrC             = f+j_coord_offsetC;
626             fjptrD             = f+j_coord_offsetD;
627
628             gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
629                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
630                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
631
632             /* Inner loop uses 302 flops */
633         }
634
635         if(jidx<j_index_end)
636         {
637
638             /* Get j neighbor index, and coordinate index */
639             jnrlistA         = jjnr[jidx];
640             jnrlistB         = jjnr[jidx+1];
641             jnrlistC         = jjnr[jidx+2];
642             jnrlistD         = jjnr[jidx+3];
643             /* Sign of each element will be negative for non-real atoms.
644              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
645              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
646              */
647             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
648
649             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
650             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
651             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
652
653             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
654             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
655             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
656             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
657             j_coord_offsetA  = DIM*jnrA;
658             j_coord_offsetB  = DIM*jnrB;
659             j_coord_offsetC  = DIM*jnrC;
660             j_coord_offsetD  = DIM*jnrD;
661
662             /* load j atom coordinates */
663             gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
664                                                  x+j_coord_offsetC,x+j_coord_offsetD,
665                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
666                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
667
668             /* Calculate displacement vector */
669             dx00             = _mm256_sub_pd(ix0,jx0);
670             dy00             = _mm256_sub_pd(iy0,jy0);
671             dz00             = _mm256_sub_pd(iz0,jz0);
672             dx11             = _mm256_sub_pd(ix1,jx1);
673             dy11             = _mm256_sub_pd(iy1,jy1);
674             dz11             = _mm256_sub_pd(iz1,jz1);
675             dx12             = _mm256_sub_pd(ix1,jx2);
676             dy12             = _mm256_sub_pd(iy1,jy2);
677             dz12             = _mm256_sub_pd(iz1,jz2);
678             dx13             = _mm256_sub_pd(ix1,jx3);
679             dy13             = _mm256_sub_pd(iy1,jy3);
680             dz13             = _mm256_sub_pd(iz1,jz3);
681             dx21             = _mm256_sub_pd(ix2,jx1);
682             dy21             = _mm256_sub_pd(iy2,jy1);
683             dz21             = _mm256_sub_pd(iz2,jz1);
684             dx22             = _mm256_sub_pd(ix2,jx2);
685             dy22             = _mm256_sub_pd(iy2,jy2);
686             dz22             = _mm256_sub_pd(iz2,jz2);
687             dx23             = _mm256_sub_pd(ix2,jx3);
688             dy23             = _mm256_sub_pd(iy2,jy3);
689             dz23             = _mm256_sub_pd(iz2,jz3);
690             dx31             = _mm256_sub_pd(ix3,jx1);
691             dy31             = _mm256_sub_pd(iy3,jy1);
692             dz31             = _mm256_sub_pd(iz3,jz1);
693             dx32             = _mm256_sub_pd(ix3,jx2);
694             dy32             = _mm256_sub_pd(iy3,jy2);
695             dz32             = _mm256_sub_pd(iz3,jz2);
696             dx33             = _mm256_sub_pd(ix3,jx3);
697             dy33             = _mm256_sub_pd(iy3,jy3);
698             dz33             = _mm256_sub_pd(iz3,jz3);
699
700             /* Calculate squared distance and things based on it */
701             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
702             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
703             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
704             rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
705             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
706             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
707             rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
708             rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
709             rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
710             rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
711
712             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
713             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
714             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
715             rinv13           = gmx_mm256_invsqrt_pd(rsq13);
716             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
717             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
718             rinv23           = gmx_mm256_invsqrt_pd(rsq23);
719             rinv31           = gmx_mm256_invsqrt_pd(rsq31);
720             rinv32           = gmx_mm256_invsqrt_pd(rsq32);
721             rinv33           = gmx_mm256_invsqrt_pd(rsq33);
722
723             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
724             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
725             rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
726             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
727             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
728             rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
729             rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
730             rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
731             rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
732
733             fjx0             = _mm256_setzero_pd();
734             fjy0             = _mm256_setzero_pd();
735             fjz0             = _mm256_setzero_pd();
736             fjx1             = _mm256_setzero_pd();
737             fjy1             = _mm256_setzero_pd();
738             fjz1             = _mm256_setzero_pd();
739             fjx2             = _mm256_setzero_pd();
740             fjy2             = _mm256_setzero_pd();
741             fjz2             = _mm256_setzero_pd();
742             fjx3             = _mm256_setzero_pd();
743             fjy3             = _mm256_setzero_pd();
744             fjz3             = _mm256_setzero_pd();
745
746             /**************************
747              * CALCULATE INTERACTIONS *
748              **************************/
749
750             r00              = _mm256_mul_pd(rsq00,rinv00);
751             r00              = _mm256_andnot_pd(dummy_mask,r00);
752
753             /* Calculate table index by multiplying r with table scale and truncate to integer */
754             rt               = _mm256_mul_pd(r00,vftabscale);
755             vfitab           = _mm256_cvttpd_epi32(rt);
756             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
757             vfitab           = _mm_slli_epi32(vfitab,3);
758
759             /* CUBIC SPLINE TABLE DISPERSION */
760             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
761             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
762             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
763             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
764             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
765             Heps             = _mm256_mul_pd(vfeps,H);
766             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
767             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
768             vvdw6            = _mm256_mul_pd(c6_00,VV);
769             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
770             fvdw6            = _mm256_mul_pd(c6_00,FF);
771
772             /* CUBIC SPLINE TABLE REPULSION */
773             vfitab           = _mm_add_epi32(vfitab,ifour);
774             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
775             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
776             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
777             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
778             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
779             Heps             = _mm256_mul_pd(vfeps,H);
780             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
781             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
782             vvdw12           = _mm256_mul_pd(c12_00,VV);
783             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
784             fvdw12           = _mm256_mul_pd(c12_00,FF);
785             vvdw             = _mm256_add_pd(vvdw12,vvdw6);
786             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
787
788             /* Update potential sum for this i atom from the interaction with this j atom. */
789             vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
790             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
791
792             fscal            = fvdw;
793
794             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
795
796             /* Calculate temporary vectorial force */
797             tx               = _mm256_mul_pd(fscal,dx00);
798             ty               = _mm256_mul_pd(fscal,dy00);
799             tz               = _mm256_mul_pd(fscal,dz00);
800
801             /* Update vectorial force */
802             fix0             = _mm256_add_pd(fix0,tx);
803             fiy0             = _mm256_add_pd(fiy0,ty);
804             fiz0             = _mm256_add_pd(fiz0,tz);
805
806             fjx0             = _mm256_add_pd(fjx0,tx);
807             fjy0             = _mm256_add_pd(fjy0,ty);
808             fjz0             = _mm256_add_pd(fjz0,tz);
809
810             /**************************
811              * CALCULATE INTERACTIONS *
812              **************************/
813
814             /* COULOMB ELECTROSTATICS */
815             velec            = _mm256_mul_pd(qq11,rinv11);
816             felec            = _mm256_mul_pd(velec,rinvsq11);
817
818             /* Update potential sum for this i atom from the interaction with this j atom. */
819             velec            = _mm256_andnot_pd(dummy_mask,velec);
820             velecsum         = _mm256_add_pd(velecsum,velec);
821
822             fscal            = felec;
823
824             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
825
826             /* Calculate temporary vectorial force */
827             tx               = _mm256_mul_pd(fscal,dx11);
828             ty               = _mm256_mul_pd(fscal,dy11);
829             tz               = _mm256_mul_pd(fscal,dz11);
830
831             /* Update vectorial force */
832             fix1             = _mm256_add_pd(fix1,tx);
833             fiy1             = _mm256_add_pd(fiy1,ty);
834             fiz1             = _mm256_add_pd(fiz1,tz);
835
836             fjx1             = _mm256_add_pd(fjx1,tx);
837             fjy1             = _mm256_add_pd(fjy1,ty);
838             fjz1             = _mm256_add_pd(fjz1,tz);
839
840             /**************************
841              * CALCULATE INTERACTIONS *
842              **************************/
843
844             /* COULOMB ELECTROSTATICS */
845             velec            = _mm256_mul_pd(qq12,rinv12);
846             felec            = _mm256_mul_pd(velec,rinvsq12);
847
848             /* Update potential sum for this i atom from the interaction with this j atom. */
849             velec            = _mm256_andnot_pd(dummy_mask,velec);
850             velecsum         = _mm256_add_pd(velecsum,velec);
851
852             fscal            = felec;
853
854             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
855
856             /* Calculate temporary vectorial force */
857             tx               = _mm256_mul_pd(fscal,dx12);
858             ty               = _mm256_mul_pd(fscal,dy12);
859             tz               = _mm256_mul_pd(fscal,dz12);
860
861             /* Update vectorial force */
862             fix1             = _mm256_add_pd(fix1,tx);
863             fiy1             = _mm256_add_pd(fiy1,ty);
864             fiz1             = _mm256_add_pd(fiz1,tz);
865
866             fjx2             = _mm256_add_pd(fjx2,tx);
867             fjy2             = _mm256_add_pd(fjy2,ty);
868             fjz2             = _mm256_add_pd(fjz2,tz);
869
870             /**************************
871              * CALCULATE INTERACTIONS *
872              **************************/
873
874             /* COULOMB ELECTROSTATICS */
875             velec            = _mm256_mul_pd(qq13,rinv13);
876             felec            = _mm256_mul_pd(velec,rinvsq13);
877
878             /* Update potential sum for this i atom from the interaction with this j atom. */
879             velec            = _mm256_andnot_pd(dummy_mask,velec);
880             velecsum         = _mm256_add_pd(velecsum,velec);
881
882             fscal            = felec;
883
884             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
885
886             /* Calculate temporary vectorial force */
887             tx               = _mm256_mul_pd(fscal,dx13);
888             ty               = _mm256_mul_pd(fscal,dy13);
889             tz               = _mm256_mul_pd(fscal,dz13);
890
891             /* Update vectorial force */
892             fix1             = _mm256_add_pd(fix1,tx);
893             fiy1             = _mm256_add_pd(fiy1,ty);
894             fiz1             = _mm256_add_pd(fiz1,tz);
895
896             fjx3             = _mm256_add_pd(fjx3,tx);
897             fjy3             = _mm256_add_pd(fjy3,ty);
898             fjz3             = _mm256_add_pd(fjz3,tz);
899
900             /**************************
901              * CALCULATE INTERACTIONS *
902              **************************/
903
904             /* COULOMB ELECTROSTATICS */
905             velec            = _mm256_mul_pd(qq21,rinv21);
906             felec            = _mm256_mul_pd(velec,rinvsq21);
907
908             /* Update potential sum for this i atom from the interaction with this j atom. */
909             velec            = _mm256_andnot_pd(dummy_mask,velec);
910             velecsum         = _mm256_add_pd(velecsum,velec);
911
912             fscal            = felec;
913
914             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
915
916             /* Calculate temporary vectorial force */
917             tx               = _mm256_mul_pd(fscal,dx21);
918             ty               = _mm256_mul_pd(fscal,dy21);
919             tz               = _mm256_mul_pd(fscal,dz21);
920
921             /* Update vectorial force */
922             fix2             = _mm256_add_pd(fix2,tx);
923             fiy2             = _mm256_add_pd(fiy2,ty);
924             fiz2             = _mm256_add_pd(fiz2,tz);
925
926             fjx1             = _mm256_add_pd(fjx1,tx);
927             fjy1             = _mm256_add_pd(fjy1,ty);
928             fjz1             = _mm256_add_pd(fjz1,tz);
929
930             /**************************
931              * CALCULATE INTERACTIONS *
932              **************************/
933
934             /* COULOMB ELECTROSTATICS */
935             velec            = _mm256_mul_pd(qq22,rinv22);
936             felec            = _mm256_mul_pd(velec,rinvsq22);
937
938             /* Update potential sum for this i atom from the interaction with this j atom. */
939             velec            = _mm256_andnot_pd(dummy_mask,velec);
940             velecsum         = _mm256_add_pd(velecsum,velec);
941
942             fscal            = felec;
943
944             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
945
946             /* Calculate temporary vectorial force */
947             tx               = _mm256_mul_pd(fscal,dx22);
948             ty               = _mm256_mul_pd(fscal,dy22);
949             tz               = _mm256_mul_pd(fscal,dz22);
950
951             /* Update vectorial force */
952             fix2             = _mm256_add_pd(fix2,tx);
953             fiy2             = _mm256_add_pd(fiy2,ty);
954             fiz2             = _mm256_add_pd(fiz2,tz);
955
956             fjx2             = _mm256_add_pd(fjx2,tx);
957             fjy2             = _mm256_add_pd(fjy2,ty);
958             fjz2             = _mm256_add_pd(fjz2,tz);
959
960             /**************************
961              * CALCULATE INTERACTIONS *
962              **************************/
963
964             /* COULOMB ELECTROSTATICS */
965             velec            = _mm256_mul_pd(qq23,rinv23);
966             felec            = _mm256_mul_pd(velec,rinvsq23);
967
968             /* Update potential sum for this i atom from the interaction with this j atom. */
969             velec            = _mm256_andnot_pd(dummy_mask,velec);
970             velecsum         = _mm256_add_pd(velecsum,velec);
971
972             fscal            = felec;
973
974             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
975
976             /* Calculate temporary vectorial force */
977             tx               = _mm256_mul_pd(fscal,dx23);
978             ty               = _mm256_mul_pd(fscal,dy23);
979             tz               = _mm256_mul_pd(fscal,dz23);
980
981             /* Update vectorial force */
982             fix2             = _mm256_add_pd(fix2,tx);
983             fiy2             = _mm256_add_pd(fiy2,ty);
984             fiz2             = _mm256_add_pd(fiz2,tz);
985
986             fjx3             = _mm256_add_pd(fjx3,tx);
987             fjy3             = _mm256_add_pd(fjy3,ty);
988             fjz3             = _mm256_add_pd(fjz3,tz);
989
990             /**************************
991              * CALCULATE INTERACTIONS *
992              **************************/
993
994             /* COULOMB ELECTROSTATICS */
995             velec            = _mm256_mul_pd(qq31,rinv31);
996             felec            = _mm256_mul_pd(velec,rinvsq31);
997
998             /* Update potential sum for this i atom from the interaction with this j atom. */
999             velec            = _mm256_andnot_pd(dummy_mask,velec);
1000             velecsum         = _mm256_add_pd(velecsum,velec);
1001
1002             fscal            = felec;
1003
1004             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1005
1006             /* Calculate temporary vectorial force */
1007             tx               = _mm256_mul_pd(fscal,dx31);
1008             ty               = _mm256_mul_pd(fscal,dy31);
1009             tz               = _mm256_mul_pd(fscal,dz31);
1010
1011             /* Update vectorial force */
1012             fix3             = _mm256_add_pd(fix3,tx);
1013             fiy3             = _mm256_add_pd(fiy3,ty);
1014             fiz3             = _mm256_add_pd(fiz3,tz);
1015
1016             fjx1             = _mm256_add_pd(fjx1,tx);
1017             fjy1             = _mm256_add_pd(fjy1,ty);
1018             fjz1             = _mm256_add_pd(fjz1,tz);
1019
1020             /**************************
1021              * CALCULATE INTERACTIONS *
1022              **************************/
1023
1024             /* COULOMB ELECTROSTATICS */
1025             velec            = _mm256_mul_pd(qq32,rinv32);
1026             felec            = _mm256_mul_pd(velec,rinvsq32);
1027
1028             /* Update potential sum for this i atom from the interaction with this j atom. */
1029             velec            = _mm256_andnot_pd(dummy_mask,velec);
1030             velecsum         = _mm256_add_pd(velecsum,velec);
1031
1032             fscal            = felec;
1033
1034             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1035
1036             /* Calculate temporary vectorial force */
1037             tx               = _mm256_mul_pd(fscal,dx32);
1038             ty               = _mm256_mul_pd(fscal,dy32);
1039             tz               = _mm256_mul_pd(fscal,dz32);
1040
1041             /* Update vectorial force */
1042             fix3             = _mm256_add_pd(fix3,tx);
1043             fiy3             = _mm256_add_pd(fiy3,ty);
1044             fiz3             = _mm256_add_pd(fiz3,tz);
1045
1046             fjx2             = _mm256_add_pd(fjx2,tx);
1047             fjy2             = _mm256_add_pd(fjy2,ty);
1048             fjz2             = _mm256_add_pd(fjz2,tz);
1049
1050             /**************************
1051              * CALCULATE INTERACTIONS *
1052              **************************/
1053
1054             /* COULOMB ELECTROSTATICS */
1055             velec            = _mm256_mul_pd(qq33,rinv33);
1056             felec            = _mm256_mul_pd(velec,rinvsq33);
1057
1058             /* Update potential sum for this i atom from the interaction with this j atom. */
1059             velec            = _mm256_andnot_pd(dummy_mask,velec);
1060             velecsum         = _mm256_add_pd(velecsum,velec);
1061
1062             fscal            = felec;
1063
1064             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1065
1066             /* Calculate temporary vectorial force */
1067             tx               = _mm256_mul_pd(fscal,dx33);
1068             ty               = _mm256_mul_pd(fscal,dy33);
1069             tz               = _mm256_mul_pd(fscal,dz33);
1070
1071             /* Update vectorial force */
1072             fix3             = _mm256_add_pd(fix3,tx);
1073             fiy3             = _mm256_add_pd(fiy3,ty);
1074             fiz3             = _mm256_add_pd(fiz3,tz);
1075
1076             fjx3             = _mm256_add_pd(fjx3,tx);
1077             fjy3             = _mm256_add_pd(fjy3,ty);
1078             fjz3             = _mm256_add_pd(fjz3,tz);
1079
1080             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1081             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1082             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1083             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1084
1085             gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1086                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
1087                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1088
1089             /* Inner loop uses 303 flops */
1090         }
1091
1092         /* End of innermost loop */
1093
1094         gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1095                                                  f+i_coord_offset,fshift+i_shift_offset);
1096
1097         ggid                        = gid[iidx];
1098         /* Update potential energies */
1099         gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1100         gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
1101
1102         /* Increment number of inner iterations */
1103         inneriter                  += j_index_end - j_index_start;
1104
1105         /* Outer loop uses 26 flops */
1106     }
1107
1108     /* Increment number of outer iterations */
1109     outeriter        += nri;
1110
1111     /* Update outer/inner flops */
1112
1113     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*303);
1114 }
1115 /*
1116  * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_F_avx_256_double
1117  * Electrostatics interaction: Coulomb
1118  * VdW interaction:            CubicSplineTable
1119  * Geometry:                   Water4-Water4
1120  * Calculate force/pot:        Force
1121  */
1122 void
1123 nb_kernel_ElecCoul_VdwCSTab_GeomW4W4_F_avx_256_double
1124                     (t_nblist                    * gmx_restrict       nlist,
1125                      rvec                        * gmx_restrict          xx,
1126                      rvec                        * gmx_restrict          ff,
1127                      t_forcerec                  * gmx_restrict          fr,
1128                      t_mdatoms                   * gmx_restrict     mdatoms,
1129                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1130                      t_nrnb                      * gmx_restrict        nrnb)
1131 {
1132     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1133      * just 0 for non-waters.
1134      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1135      * jnr indices corresponding to data put in the four positions in the SIMD register.
1136      */
1137     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1138     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1139     int              jnrA,jnrB,jnrC,jnrD;
1140     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1141     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1142     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1143     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1144     real             rcutoff_scalar;
1145     real             *shiftvec,*fshift,*x,*f;
1146     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1147     real             scratch[4*DIM];
1148     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1149     real *           vdwioffsetptr0;
1150     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1151     real *           vdwioffsetptr1;
1152     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1153     real *           vdwioffsetptr2;
1154     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1155     real *           vdwioffsetptr3;
1156     __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1157     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
1158     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1159     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1160     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1161     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1162     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1163     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
1164     __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
1165     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1166     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1167     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1168     __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
1169     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1170     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1171     __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
1172     __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
1173     __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
1174     __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
1175     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
1176     real             *charge;
1177     int              nvdwtype;
1178     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1179     int              *vdwtype;
1180     real             *vdwparam;
1181     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
1182     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
1183     __m128i          vfitab;
1184     __m128i          ifour       = _mm_set1_epi32(4);
1185     __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
1186     real             *vftab;
1187     __m256d          dummy_mask,cutoff_mask;
1188     __m128           tmpmask0,tmpmask1;
1189     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1190     __m256d          one     = _mm256_set1_pd(1.0);
1191     __m256d          two     = _mm256_set1_pd(2.0);
1192     x                = xx[0];
1193     f                = ff[0];
1194
1195     nri              = nlist->nri;
1196     iinr             = nlist->iinr;
1197     jindex           = nlist->jindex;
1198     jjnr             = nlist->jjnr;
1199     shiftidx         = nlist->shift;
1200     gid              = nlist->gid;
1201     shiftvec         = fr->shift_vec[0];
1202     fshift           = fr->fshift[0];
1203     facel            = _mm256_set1_pd(fr->epsfac);
1204     charge           = mdatoms->chargeA;
1205     nvdwtype         = fr->ntype;
1206     vdwparam         = fr->nbfp;
1207     vdwtype          = mdatoms->typeA;
1208
1209     vftab            = kernel_data->table_vdw->data;
1210     vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
1211
1212     /* Setup water-specific parameters */
1213     inr              = nlist->iinr[0];
1214     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1215     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1216     iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
1217     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1218
1219     jq1              = _mm256_set1_pd(charge[inr+1]);
1220     jq2              = _mm256_set1_pd(charge[inr+2]);
1221     jq3              = _mm256_set1_pd(charge[inr+3]);
1222     vdwjidx0A        = 2*vdwtype[inr+0];
1223     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
1224     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
1225     qq11             = _mm256_mul_pd(iq1,jq1);
1226     qq12             = _mm256_mul_pd(iq1,jq2);
1227     qq13             = _mm256_mul_pd(iq1,jq3);
1228     qq21             = _mm256_mul_pd(iq2,jq1);
1229     qq22             = _mm256_mul_pd(iq2,jq2);
1230     qq23             = _mm256_mul_pd(iq2,jq3);
1231     qq31             = _mm256_mul_pd(iq3,jq1);
1232     qq32             = _mm256_mul_pd(iq3,jq2);
1233     qq33             = _mm256_mul_pd(iq3,jq3);
1234
1235     /* Avoid stupid compiler warnings */
1236     jnrA = jnrB = jnrC = jnrD = 0;
1237     j_coord_offsetA = 0;
1238     j_coord_offsetB = 0;
1239     j_coord_offsetC = 0;
1240     j_coord_offsetD = 0;
1241
1242     outeriter        = 0;
1243     inneriter        = 0;
1244
1245     for(iidx=0;iidx<4*DIM;iidx++)
1246     {
1247         scratch[iidx] = 0.0;
1248     }
1249
1250     /* Start outer loop over neighborlists */
1251     for(iidx=0; iidx<nri; iidx++)
1252     {
1253         /* Load shift vector for this list */
1254         i_shift_offset   = DIM*shiftidx[iidx];
1255
1256         /* Load limits for loop over neighbors */
1257         j_index_start    = jindex[iidx];
1258         j_index_end      = jindex[iidx+1];
1259
1260         /* Get outer coordinate index */
1261         inr              = iinr[iidx];
1262         i_coord_offset   = DIM*inr;
1263
1264         /* Load i particle coords and add shift vector */
1265         gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1266                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1267
1268         fix0             = _mm256_setzero_pd();
1269         fiy0             = _mm256_setzero_pd();
1270         fiz0             = _mm256_setzero_pd();
1271         fix1             = _mm256_setzero_pd();
1272         fiy1             = _mm256_setzero_pd();
1273         fiz1             = _mm256_setzero_pd();
1274         fix2             = _mm256_setzero_pd();
1275         fiy2             = _mm256_setzero_pd();
1276         fiz2             = _mm256_setzero_pd();
1277         fix3             = _mm256_setzero_pd();
1278         fiy3             = _mm256_setzero_pd();
1279         fiz3             = _mm256_setzero_pd();
1280
1281         /* Start inner kernel loop */
1282         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1283         {
1284
1285             /* Get j neighbor index, and coordinate index */
1286             jnrA             = jjnr[jidx];
1287             jnrB             = jjnr[jidx+1];
1288             jnrC             = jjnr[jidx+2];
1289             jnrD             = jjnr[jidx+3];
1290             j_coord_offsetA  = DIM*jnrA;
1291             j_coord_offsetB  = DIM*jnrB;
1292             j_coord_offsetC  = DIM*jnrC;
1293             j_coord_offsetD  = DIM*jnrD;
1294
1295             /* load j atom coordinates */
1296             gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1297                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1298                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1299                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
1300
1301             /* Calculate displacement vector */
1302             dx00             = _mm256_sub_pd(ix0,jx0);
1303             dy00             = _mm256_sub_pd(iy0,jy0);
1304             dz00             = _mm256_sub_pd(iz0,jz0);
1305             dx11             = _mm256_sub_pd(ix1,jx1);
1306             dy11             = _mm256_sub_pd(iy1,jy1);
1307             dz11             = _mm256_sub_pd(iz1,jz1);
1308             dx12             = _mm256_sub_pd(ix1,jx2);
1309             dy12             = _mm256_sub_pd(iy1,jy2);
1310             dz12             = _mm256_sub_pd(iz1,jz2);
1311             dx13             = _mm256_sub_pd(ix1,jx3);
1312             dy13             = _mm256_sub_pd(iy1,jy3);
1313             dz13             = _mm256_sub_pd(iz1,jz3);
1314             dx21             = _mm256_sub_pd(ix2,jx1);
1315             dy21             = _mm256_sub_pd(iy2,jy1);
1316             dz21             = _mm256_sub_pd(iz2,jz1);
1317             dx22             = _mm256_sub_pd(ix2,jx2);
1318             dy22             = _mm256_sub_pd(iy2,jy2);
1319             dz22             = _mm256_sub_pd(iz2,jz2);
1320             dx23             = _mm256_sub_pd(ix2,jx3);
1321             dy23             = _mm256_sub_pd(iy2,jy3);
1322             dz23             = _mm256_sub_pd(iz2,jz3);
1323             dx31             = _mm256_sub_pd(ix3,jx1);
1324             dy31             = _mm256_sub_pd(iy3,jy1);
1325             dz31             = _mm256_sub_pd(iz3,jz1);
1326             dx32             = _mm256_sub_pd(ix3,jx2);
1327             dy32             = _mm256_sub_pd(iy3,jy2);
1328             dz32             = _mm256_sub_pd(iz3,jz2);
1329             dx33             = _mm256_sub_pd(ix3,jx3);
1330             dy33             = _mm256_sub_pd(iy3,jy3);
1331             dz33             = _mm256_sub_pd(iz3,jz3);
1332
1333             /* Calculate squared distance and things based on it */
1334             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1335             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1336             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1337             rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
1338             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1339             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1340             rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
1341             rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
1342             rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
1343             rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
1344
1345             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1346             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
1347             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
1348             rinv13           = gmx_mm256_invsqrt_pd(rsq13);
1349             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
1350             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
1351             rinv23           = gmx_mm256_invsqrt_pd(rsq23);
1352             rinv31           = gmx_mm256_invsqrt_pd(rsq31);
1353             rinv32           = gmx_mm256_invsqrt_pd(rsq32);
1354             rinv33           = gmx_mm256_invsqrt_pd(rsq33);
1355
1356             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1357             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1358             rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
1359             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1360             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1361             rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
1362             rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
1363             rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
1364             rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
1365
1366             fjx0             = _mm256_setzero_pd();
1367             fjy0             = _mm256_setzero_pd();
1368             fjz0             = _mm256_setzero_pd();
1369             fjx1             = _mm256_setzero_pd();
1370             fjy1             = _mm256_setzero_pd();
1371             fjz1             = _mm256_setzero_pd();
1372             fjx2             = _mm256_setzero_pd();
1373             fjy2             = _mm256_setzero_pd();
1374             fjz2             = _mm256_setzero_pd();
1375             fjx3             = _mm256_setzero_pd();
1376             fjy3             = _mm256_setzero_pd();
1377             fjz3             = _mm256_setzero_pd();
1378
1379             /**************************
1380              * CALCULATE INTERACTIONS *
1381              **************************/
1382
1383             r00              = _mm256_mul_pd(rsq00,rinv00);
1384
1385             /* Calculate table index by multiplying r with table scale and truncate to integer */
1386             rt               = _mm256_mul_pd(r00,vftabscale);
1387             vfitab           = _mm256_cvttpd_epi32(rt);
1388             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1389             vfitab           = _mm_slli_epi32(vfitab,3);
1390
1391             /* CUBIC SPLINE TABLE DISPERSION */
1392             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1393             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1394             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1395             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1396             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1397             Heps             = _mm256_mul_pd(vfeps,H);
1398             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1399             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1400             fvdw6            = _mm256_mul_pd(c6_00,FF);
1401
1402             /* CUBIC SPLINE TABLE REPULSION */
1403             vfitab           = _mm_add_epi32(vfitab,ifour);
1404             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1405             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1406             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1407             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1408             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1409             Heps             = _mm256_mul_pd(vfeps,H);
1410             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1411             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1412             fvdw12           = _mm256_mul_pd(c12_00,FF);
1413             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1414
1415             fscal            = fvdw;
1416
1417             /* Calculate temporary vectorial force */
1418             tx               = _mm256_mul_pd(fscal,dx00);
1419             ty               = _mm256_mul_pd(fscal,dy00);
1420             tz               = _mm256_mul_pd(fscal,dz00);
1421
1422             /* Update vectorial force */
1423             fix0             = _mm256_add_pd(fix0,tx);
1424             fiy0             = _mm256_add_pd(fiy0,ty);
1425             fiz0             = _mm256_add_pd(fiz0,tz);
1426
1427             fjx0             = _mm256_add_pd(fjx0,tx);
1428             fjy0             = _mm256_add_pd(fjy0,ty);
1429             fjz0             = _mm256_add_pd(fjz0,tz);
1430
1431             /**************************
1432              * CALCULATE INTERACTIONS *
1433              **************************/
1434
1435             /* COULOMB ELECTROSTATICS */
1436             velec            = _mm256_mul_pd(qq11,rinv11);
1437             felec            = _mm256_mul_pd(velec,rinvsq11);
1438
1439             fscal            = felec;
1440
1441             /* Calculate temporary vectorial force */
1442             tx               = _mm256_mul_pd(fscal,dx11);
1443             ty               = _mm256_mul_pd(fscal,dy11);
1444             tz               = _mm256_mul_pd(fscal,dz11);
1445
1446             /* Update vectorial force */
1447             fix1             = _mm256_add_pd(fix1,tx);
1448             fiy1             = _mm256_add_pd(fiy1,ty);
1449             fiz1             = _mm256_add_pd(fiz1,tz);
1450
1451             fjx1             = _mm256_add_pd(fjx1,tx);
1452             fjy1             = _mm256_add_pd(fjy1,ty);
1453             fjz1             = _mm256_add_pd(fjz1,tz);
1454
1455             /**************************
1456              * CALCULATE INTERACTIONS *
1457              **************************/
1458
1459             /* COULOMB ELECTROSTATICS */
1460             velec            = _mm256_mul_pd(qq12,rinv12);
1461             felec            = _mm256_mul_pd(velec,rinvsq12);
1462
1463             fscal            = felec;
1464
1465             /* Calculate temporary vectorial force */
1466             tx               = _mm256_mul_pd(fscal,dx12);
1467             ty               = _mm256_mul_pd(fscal,dy12);
1468             tz               = _mm256_mul_pd(fscal,dz12);
1469
1470             /* Update vectorial force */
1471             fix1             = _mm256_add_pd(fix1,tx);
1472             fiy1             = _mm256_add_pd(fiy1,ty);
1473             fiz1             = _mm256_add_pd(fiz1,tz);
1474
1475             fjx2             = _mm256_add_pd(fjx2,tx);
1476             fjy2             = _mm256_add_pd(fjy2,ty);
1477             fjz2             = _mm256_add_pd(fjz2,tz);
1478
1479             /**************************
1480              * CALCULATE INTERACTIONS *
1481              **************************/
1482
1483             /* COULOMB ELECTROSTATICS */
1484             velec            = _mm256_mul_pd(qq13,rinv13);
1485             felec            = _mm256_mul_pd(velec,rinvsq13);
1486
1487             fscal            = felec;
1488
1489             /* Calculate temporary vectorial force */
1490             tx               = _mm256_mul_pd(fscal,dx13);
1491             ty               = _mm256_mul_pd(fscal,dy13);
1492             tz               = _mm256_mul_pd(fscal,dz13);
1493
1494             /* Update vectorial force */
1495             fix1             = _mm256_add_pd(fix1,tx);
1496             fiy1             = _mm256_add_pd(fiy1,ty);
1497             fiz1             = _mm256_add_pd(fiz1,tz);
1498
1499             fjx3             = _mm256_add_pd(fjx3,tx);
1500             fjy3             = _mm256_add_pd(fjy3,ty);
1501             fjz3             = _mm256_add_pd(fjz3,tz);
1502
1503             /**************************
1504              * CALCULATE INTERACTIONS *
1505              **************************/
1506
1507             /* COULOMB ELECTROSTATICS */
1508             velec            = _mm256_mul_pd(qq21,rinv21);
1509             felec            = _mm256_mul_pd(velec,rinvsq21);
1510
1511             fscal            = felec;
1512
1513             /* Calculate temporary vectorial force */
1514             tx               = _mm256_mul_pd(fscal,dx21);
1515             ty               = _mm256_mul_pd(fscal,dy21);
1516             tz               = _mm256_mul_pd(fscal,dz21);
1517
1518             /* Update vectorial force */
1519             fix2             = _mm256_add_pd(fix2,tx);
1520             fiy2             = _mm256_add_pd(fiy2,ty);
1521             fiz2             = _mm256_add_pd(fiz2,tz);
1522
1523             fjx1             = _mm256_add_pd(fjx1,tx);
1524             fjy1             = _mm256_add_pd(fjy1,ty);
1525             fjz1             = _mm256_add_pd(fjz1,tz);
1526
1527             /**************************
1528              * CALCULATE INTERACTIONS *
1529              **************************/
1530
1531             /* COULOMB ELECTROSTATICS */
1532             velec            = _mm256_mul_pd(qq22,rinv22);
1533             felec            = _mm256_mul_pd(velec,rinvsq22);
1534
1535             fscal            = felec;
1536
1537             /* Calculate temporary vectorial force */
1538             tx               = _mm256_mul_pd(fscal,dx22);
1539             ty               = _mm256_mul_pd(fscal,dy22);
1540             tz               = _mm256_mul_pd(fscal,dz22);
1541
1542             /* Update vectorial force */
1543             fix2             = _mm256_add_pd(fix2,tx);
1544             fiy2             = _mm256_add_pd(fiy2,ty);
1545             fiz2             = _mm256_add_pd(fiz2,tz);
1546
1547             fjx2             = _mm256_add_pd(fjx2,tx);
1548             fjy2             = _mm256_add_pd(fjy2,ty);
1549             fjz2             = _mm256_add_pd(fjz2,tz);
1550
1551             /**************************
1552              * CALCULATE INTERACTIONS *
1553              **************************/
1554
1555             /* COULOMB ELECTROSTATICS */
1556             velec            = _mm256_mul_pd(qq23,rinv23);
1557             felec            = _mm256_mul_pd(velec,rinvsq23);
1558
1559             fscal            = felec;
1560
1561             /* Calculate temporary vectorial force */
1562             tx               = _mm256_mul_pd(fscal,dx23);
1563             ty               = _mm256_mul_pd(fscal,dy23);
1564             tz               = _mm256_mul_pd(fscal,dz23);
1565
1566             /* Update vectorial force */
1567             fix2             = _mm256_add_pd(fix2,tx);
1568             fiy2             = _mm256_add_pd(fiy2,ty);
1569             fiz2             = _mm256_add_pd(fiz2,tz);
1570
1571             fjx3             = _mm256_add_pd(fjx3,tx);
1572             fjy3             = _mm256_add_pd(fjy3,ty);
1573             fjz3             = _mm256_add_pd(fjz3,tz);
1574
1575             /**************************
1576              * CALCULATE INTERACTIONS *
1577              **************************/
1578
1579             /* COULOMB ELECTROSTATICS */
1580             velec            = _mm256_mul_pd(qq31,rinv31);
1581             felec            = _mm256_mul_pd(velec,rinvsq31);
1582
1583             fscal            = felec;
1584
1585             /* Calculate temporary vectorial force */
1586             tx               = _mm256_mul_pd(fscal,dx31);
1587             ty               = _mm256_mul_pd(fscal,dy31);
1588             tz               = _mm256_mul_pd(fscal,dz31);
1589
1590             /* Update vectorial force */
1591             fix3             = _mm256_add_pd(fix3,tx);
1592             fiy3             = _mm256_add_pd(fiy3,ty);
1593             fiz3             = _mm256_add_pd(fiz3,tz);
1594
1595             fjx1             = _mm256_add_pd(fjx1,tx);
1596             fjy1             = _mm256_add_pd(fjy1,ty);
1597             fjz1             = _mm256_add_pd(fjz1,tz);
1598
1599             /**************************
1600              * CALCULATE INTERACTIONS *
1601              **************************/
1602
1603             /* COULOMB ELECTROSTATICS */
1604             velec            = _mm256_mul_pd(qq32,rinv32);
1605             felec            = _mm256_mul_pd(velec,rinvsq32);
1606
1607             fscal            = felec;
1608
1609             /* Calculate temporary vectorial force */
1610             tx               = _mm256_mul_pd(fscal,dx32);
1611             ty               = _mm256_mul_pd(fscal,dy32);
1612             tz               = _mm256_mul_pd(fscal,dz32);
1613
1614             /* Update vectorial force */
1615             fix3             = _mm256_add_pd(fix3,tx);
1616             fiy3             = _mm256_add_pd(fiy3,ty);
1617             fiz3             = _mm256_add_pd(fiz3,tz);
1618
1619             fjx2             = _mm256_add_pd(fjx2,tx);
1620             fjy2             = _mm256_add_pd(fjy2,ty);
1621             fjz2             = _mm256_add_pd(fjz2,tz);
1622
1623             /**************************
1624              * CALCULATE INTERACTIONS *
1625              **************************/
1626
1627             /* COULOMB ELECTROSTATICS */
1628             velec            = _mm256_mul_pd(qq33,rinv33);
1629             felec            = _mm256_mul_pd(velec,rinvsq33);
1630
1631             fscal            = felec;
1632
1633             /* Calculate temporary vectorial force */
1634             tx               = _mm256_mul_pd(fscal,dx33);
1635             ty               = _mm256_mul_pd(fscal,dy33);
1636             tz               = _mm256_mul_pd(fscal,dz33);
1637
1638             /* Update vectorial force */
1639             fix3             = _mm256_add_pd(fix3,tx);
1640             fiy3             = _mm256_add_pd(fiy3,ty);
1641             fiz3             = _mm256_add_pd(fiz3,tz);
1642
1643             fjx3             = _mm256_add_pd(fjx3,tx);
1644             fjy3             = _mm256_add_pd(fjy3,ty);
1645             fjz3             = _mm256_add_pd(fjz3,tz);
1646
1647             fjptrA             = f+j_coord_offsetA;
1648             fjptrB             = f+j_coord_offsetB;
1649             fjptrC             = f+j_coord_offsetC;
1650             fjptrD             = f+j_coord_offsetD;
1651
1652             gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1653                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
1654                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1655
1656             /* Inner loop uses 285 flops */
1657         }
1658
1659         if(jidx<j_index_end)
1660         {
1661
1662             /* Get j neighbor index, and coordinate index */
1663             jnrlistA         = jjnr[jidx];
1664             jnrlistB         = jjnr[jidx+1];
1665             jnrlistC         = jjnr[jidx+2];
1666             jnrlistD         = jjnr[jidx+3];
1667             /* Sign of each element will be negative for non-real atoms.
1668              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1669              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
1670              */
1671             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
1672
1673             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
1674             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
1675             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
1676
1677             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1678             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1679             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1680             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1681             j_coord_offsetA  = DIM*jnrA;
1682             j_coord_offsetB  = DIM*jnrB;
1683             j_coord_offsetC  = DIM*jnrC;
1684             j_coord_offsetD  = DIM*jnrD;
1685
1686             /* load j atom coordinates */
1687             gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1688                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1689                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1690                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
1691
1692             /* Calculate displacement vector */
1693             dx00             = _mm256_sub_pd(ix0,jx0);
1694             dy00             = _mm256_sub_pd(iy0,jy0);
1695             dz00             = _mm256_sub_pd(iz0,jz0);
1696             dx11             = _mm256_sub_pd(ix1,jx1);
1697             dy11             = _mm256_sub_pd(iy1,jy1);
1698             dz11             = _mm256_sub_pd(iz1,jz1);
1699             dx12             = _mm256_sub_pd(ix1,jx2);
1700             dy12             = _mm256_sub_pd(iy1,jy2);
1701             dz12             = _mm256_sub_pd(iz1,jz2);
1702             dx13             = _mm256_sub_pd(ix1,jx3);
1703             dy13             = _mm256_sub_pd(iy1,jy3);
1704             dz13             = _mm256_sub_pd(iz1,jz3);
1705             dx21             = _mm256_sub_pd(ix2,jx1);
1706             dy21             = _mm256_sub_pd(iy2,jy1);
1707             dz21             = _mm256_sub_pd(iz2,jz1);
1708             dx22             = _mm256_sub_pd(ix2,jx2);
1709             dy22             = _mm256_sub_pd(iy2,jy2);
1710             dz22             = _mm256_sub_pd(iz2,jz2);
1711             dx23             = _mm256_sub_pd(ix2,jx3);
1712             dy23             = _mm256_sub_pd(iy2,jy3);
1713             dz23             = _mm256_sub_pd(iz2,jz3);
1714             dx31             = _mm256_sub_pd(ix3,jx1);
1715             dy31             = _mm256_sub_pd(iy3,jy1);
1716             dz31             = _mm256_sub_pd(iz3,jz1);
1717             dx32             = _mm256_sub_pd(ix3,jx2);
1718             dy32             = _mm256_sub_pd(iy3,jy2);
1719             dz32             = _mm256_sub_pd(iz3,jz2);
1720             dx33             = _mm256_sub_pd(ix3,jx3);
1721             dy33             = _mm256_sub_pd(iy3,jy3);
1722             dz33             = _mm256_sub_pd(iz3,jz3);
1723
1724             /* Calculate squared distance and things based on it */
1725             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1726             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1727             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1728             rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
1729             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1730             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1731             rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
1732             rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
1733             rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
1734             rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
1735
1736             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1737             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
1738             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
1739             rinv13           = gmx_mm256_invsqrt_pd(rsq13);
1740             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
1741             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
1742             rinv23           = gmx_mm256_invsqrt_pd(rsq23);
1743             rinv31           = gmx_mm256_invsqrt_pd(rsq31);
1744             rinv32           = gmx_mm256_invsqrt_pd(rsq32);
1745             rinv33           = gmx_mm256_invsqrt_pd(rsq33);
1746
1747             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1748             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1749             rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
1750             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1751             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1752             rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
1753             rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
1754             rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
1755             rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
1756
1757             fjx0             = _mm256_setzero_pd();
1758             fjy0             = _mm256_setzero_pd();
1759             fjz0             = _mm256_setzero_pd();
1760             fjx1             = _mm256_setzero_pd();
1761             fjy1             = _mm256_setzero_pd();
1762             fjz1             = _mm256_setzero_pd();
1763             fjx2             = _mm256_setzero_pd();
1764             fjy2             = _mm256_setzero_pd();
1765             fjz2             = _mm256_setzero_pd();
1766             fjx3             = _mm256_setzero_pd();
1767             fjy3             = _mm256_setzero_pd();
1768             fjz3             = _mm256_setzero_pd();
1769
1770             /**************************
1771              * CALCULATE INTERACTIONS *
1772              **************************/
1773
1774             r00              = _mm256_mul_pd(rsq00,rinv00);
1775             r00              = _mm256_andnot_pd(dummy_mask,r00);
1776
1777             /* Calculate table index by multiplying r with table scale and truncate to integer */
1778             rt               = _mm256_mul_pd(r00,vftabscale);
1779             vfitab           = _mm256_cvttpd_epi32(rt);
1780             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1781             vfitab           = _mm_slli_epi32(vfitab,3);
1782
1783             /* CUBIC SPLINE TABLE DISPERSION */
1784             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1785             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1786             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1787             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1788             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1789             Heps             = _mm256_mul_pd(vfeps,H);
1790             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1791             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1792             fvdw6            = _mm256_mul_pd(c6_00,FF);
1793
1794             /* CUBIC SPLINE TABLE REPULSION */
1795             vfitab           = _mm_add_epi32(vfitab,ifour);
1796             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1797             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1798             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1799             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1800             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1801             Heps             = _mm256_mul_pd(vfeps,H);
1802             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1803             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1804             fvdw12           = _mm256_mul_pd(c12_00,FF);
1805             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1806
1807             fscal            = fvdw;
1808
1809             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1810
1811             /* Calculate temporary vectorial force */
1812             tx               = _mm256_mul_pd(fscal,dx00);
1813             ty               = _mm256_mul_pd(fscal,dy00);
1814             tz               = _mm256_mul_pd(fscal,dz00);
1815
1816             /* Update vectorial force */
1817             fix0             = _mm256_add_pd(fix0,tx);
1818             fiy0             = _mm256_add_pd(fiy0,ty);
1819             fiz0             = _mm256_add_pd(fiz0,tz);
1820
1821             fjx0             = _mm256_add_pd(fjx0,tx);
1822             fjy0             = _mm256_add_pd(fjy0,ty);
1823             fjz0             = _mm256_add_pd(fjz0,tz);
1824
1825             /**************************
1826              * CALCULATE INTERACTIONS *
1827              **************************/
1828
1829             /* COULOMB ELECTROSTATICS */
1830             velec            = _mm256_mul_pd(qq11,rinv11);
1831             felec            = _mm256_mul_pd(velec,rinvsq11);
1832
1833             fscal            = felec;
1834
1835             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1836
1837             /* Calculate temporary vectorial force */
1838             tx               = _mm256_mul_pd(fscal,dx11);
1839             ty               = _mm256_mul_pd(fscal,dy11);
1840             tz               = _mm256_mul_pd(fscal,dz11);
1841
1842             /* Update vectorial force */
1843             fix1             = _mm256_add_pd(fix1,tx);
1844             fiy1             = _mm256_add_pd(fiy1,ty);
1845             fiz1             = _mm256_add_pd(fiz1,tz);
1846
1847             fjx1             = _mm256_add_pd(fjx1,tx);
1848             fjy1             = _mm256_add_pd(fjy1,ty);
1849             fjz1             = _mm256_add_pd(fjz1,tz);
1850
1851             /**************************
1852              * CALCULATE INTERACTIONS *
1853              **************************/
1854
1855             /* COULOMB ELECTROSTATICS */
1856             velec            = _mm256_mul_pd(qq12,rinv12);
1857             felec            = _mm256_mul_pd(velec,rinvsq12);
1858
1859             fscal            = felec;
1860
1861             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1862
1863             /* Calculate temporary vectorial force */
1864             tx               = _mm256_mul_pd(fscal,dx12);
1865             ty               = _mm256_mul_pd(fscal,dy12);
1866             tz               = _mm256_mul_pd(fscal,dz12);
1867
1868             /* Update vectorial force */
1869             fix1             = _mm256_add_pd(fix1,tx);
1870             fiy1             = _mm256_add_pd(fiy1,ty);
1871             fiz1             = _mm256_add_pd(fiz1,tz);
1872
1873             fjx2             = _mm256_add_pd(fjx2,tx);
1874             fjy2             = _mm256_add_pd(fjy2,ty);
1875             fjz2             = _mm256_add_pd(fjz2,tz);
1876
1877             /**************************
1878              * CALCULATE INTERACTIONS *
1879              **************************/
1880
1881             /* COULOMB ELECTROSTATICS */
1882             velec            = _mm256_mul_pd(qq13,rinv13);
1883             felec            = _mm256_mul_pd(velec,rinvsq13);
1884
1885             fscal            = felec;
1886
1887             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1888
1889             /* Calculate temporary vectorial force */
1890             tx               = _mm256_mul_pd(fscal,dx13);
1891             ty               = _mm256_mul_pd(fscal,dy13);
1892             tz               = _mm256_mul_pd(fscal,dz13);
1893
1894             /* Update vectorial force */
1895             fix1             = _mm256_add_pd(fix1,tx);
1896             fiy1             = _mm256_add_pd(fiy1,ty);
1897             fiz1             = _mm256_add_pd(fiz1,tz);
1898
1899             fjx3             = _mm256_add_pd(fjx3,tx);
1900             fjy3             = _mm256_add_pd(fjy3,ty);
1901             fjz3             = _mm256_add_pd(fjz3,tz);
1902
1903             /**************************
1904              * CALCULATE INTERACTIONS *
1905              **************************/
1906
1907             /* COULOMB ELECTROSTATICS */
1908             velec            = _mm256_mul_pd(qq21,rinv21);
1909             felec            = _mm256_mul_pd(velec,rinvsq21);
1910
1911             fscal            = felec;
1912
1913             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1914
1915             /* Calculate temporary vectorial force */
1916             tx               = _mm256_mul_pd(fscal,dx21);
1917             ty               = _mm256_mul_pd(fscal,dy21);
1918             tz               = _mm256_mul_pd(fscal,dz21);
1919
1920             /* Update vectorial force */
1921             fix2             = _mm256_add_pd(fix2,tx);
1922             fiy2             = _mm256_add_pd(fiy2,ty);
1923             fiz2             = _mm256_add_pd(fiz2,tz);
1924
1925             fjx1             = _mm256_add_pd(fjx1,tx);
1926             fjy1             = _mm256_add_pd(fjy1,ty);
1927             fjz1             = _mm256_add_pd(fjz1,tz);
1928
1929             /**************************
1930              * CALCULATE INTERACTIONS *
1931              **************************/
1932
1933             /* COULOMB ELECTROSTATICS */
1934             velec            = _mm256_mul_pd(qq22,rinv22);
1935             felec            = _mm256_mul_pd(velec,rinvsq22);
1936
1937             fscal            = felec;
1938
1939             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1940
1941             /* Calculate temporary vectorial force */
1942             tx               = _mm256_mul_pd(fscal,dx22);
1943             ty               = _mm256_mul_pd(fscal,dy22);
1944             tz               = _mm256_mul_pd(fscal,dz22);
1945
1946             /* Update vectorial force */
1947             fix2             = _mm256_add_pd(fix2,tx);
1948             fiy2             = _mm256_add_pd(fiy2,ty);
1949             fiz2             = _mm256_add_pd(fiz2,tz);
1950
1951             fjx2             = _mm256_add_pd(fjx2,tx);
1952             fjy2             = _mm256_add_pd(fjy2,ty);
1953             fjz2             = _mm256_add_pd(fjz2,tz);
1954
1955             /**************************
1956              * CALCULATE INTERACTIONS *
1957              **************************/
1958
1959             /* COULOMB ELECTROSTATICS */
1960             velec            = _mm256_mul_pd(qq23,rinv23);
1961             felec            = _mm256_mul_pd(velec,rinvsq23);
1962
1963             fscal            = felec;
1964
1965             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1966
1967             /* Calculate temporary vectorial force */
1968             tx               = _mm256_mul_pd(fscal,dx23);
1969             ty               = _mm256_mul_pd(fscal,dy23);
1970             tz               = _mm256_mul_pd(fscal,dz23);
1971
1972             /* Update vectorial force */
1973             fix2             = _mm256_add_pd(fix2,tx);
1974             fiy2             = _mm256_add_pd(fiy2,ty);
1975             fiz2             = _mm256_add_pd(fiz2,tz);
1976
1977             fjx3             = _mm256_add_pd(fjx3,tx);
1978             fjy3             = _mm256_add_pd(fjy3,ty);
1979             fjz3             = _mm256_add_pd(fjz3,tz);
1980
1981             /**************************
1982              * CALCULATE INTERACTIONS *
1983              **************************/
1984
1985             /* COULOMB ELECTROSTATICS */
1986             velec            = _mm256_mul_pd(qq31,rinv31);
1987             felec            = _mm256_mul_pd(velec,rinvsq31);
1988
1989             fscal            = felec;
1990
1991             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1992
1993             /* Calculate temporary vectorial force */
1994             tx               = _mm256_mul_pd(fscal,dx31);
1995             ty               = _mm256_mul_pd(fscal,dy31);
1996             tz               = _mm256_mul_pd(fscal,dz31);
1997
1998             /* Update vectorial force */
1999             fix3             = _mm256_add_pd(fix3,tx);
2000             fiy3             = _mm256_add_pd(fiy3,ty);
2001             fiz3             = _mm256_add_pd(fiz3,tz);
2002
2003             fjx1             = _mm256_add_pd(fjx1,tx);
2004             fjy1             = _mm256_add_pd(fjy1,ty);
2005             fjz1             = _mm256_add_pd(fjz1,tz);
2006
2007             /**************************
2008              * CALCULATE INTERACTIONS *
2009              **************************/
2010
2011             /* COULOMB ELECTROSTATICS */
2012             velec            = _mm256_mul_pd(qq32,rinv32);
2013             felec            = _mm256_mul_pd(velec,rinvsq32);
2014
2015             fscal            = felec;
2016
2017             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2018
2019             /* Calculate temporary vectorial force */
2020             tx               = _mm256_mul_pd(fscal,dx32);
2021             ty               = _mm256_mul_pd(fscal,dy32);
2022             tz               = _mm256_mul_pd(fscal,dz32);
2023
2024             /* Update vectorial force */
2025             fix3             = _mm256_add_pd(fix3,tx);
2026             fiy3             = _mm256_add_pd(fiy3,ty);
2027             fiz3             = _mm256_add_pd(fiz3,tz);
2028
2029             fjx2             = _mm256_add_pd(fjx2,tx);
2030             fjy2             = _mm256_add_pd(fjy2,ty);
2031             fjz2             = _mm256_add_pd(fjz2,tz);
2032
2033             /**************************
2034              * CALCULATE INTERACTIONS *
2035              **************************/
2036
2037             /* COULOMB ELECTROSTATICS */
2038             velec            = _mm256_mul_pd(qq33,rinv33);
2039             felec            = _mm256_mul_pd(velec,rinvsq33);
2040
2041             fscal            = felec;
2042
2043             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2044
2045             /* Calculate temporary vectorial force */
2046             tx               = _mm256_mul_pd(fscal,dx33);
2047             ty               = _mm256_mul_pd(fscal,dy33);
2048             tz               = _mm256_mul_pd(fscal,dz33);
2049
2050             /* Update vectorial force */
2051             fix3             = _mm256_add_pd(fix3,tx);
2052             fiy3             = _mm256_add_pd(fiy3,ty);
2053             fiz3             = _mm256_add_pd(fiz3,tz);
2054
2055             fjx3             = _mm256_add_pd(fjx3,tx);
2056             fjy3             = _mm256_add_pd(fjy3,ty);
2057             fjz3             = _mm256_add_pd(fjz3,tz);
2058
2059             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2060             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2061             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2062             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2063
2064             gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
2065                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2066                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2067
2068             /* Inner loop uses 286 flops */
2069         }
2070
2071         /* End of innermost loop */
2072
2073         gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
2074                                                  f+i_coord_offset,fshift+i_shift_offset);
2075
2076         /* Increment number of inner iterations */
2077         inneriter                  += j_index_end - j_index_start;
2078
2079         /* Outer loop uses 24 flops */
2080     }
2081
2082     /* Increment number of outer iterations */
2083     outeriter        += nri;
2084
2085     /* Update outer/inner flops */
2086
2087     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*286);
2088 }