made errors during GPU detection non-fatal
[alexxy/gromacs.git] / src / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_avx_256_single.c
1 /*
2  * Note: this file was generated by the Gromacs avx_256_single kernel generator.
3  *
4  *                This source code is part of
5  *
6  *                 G   R   O   M   A   C   S
7  *
8  * Copyright (c) 2001-2012, The GROMACS Development Team
9  *
10  * Gromacs is a library for molecular simulation and trajectory analysis,
11  * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
12  * a full list of developers and information, check out http://www.gromacs.org
13  *
14  * This program is free software; you can redistribute it and/or modify it under
15  * the terms of the GNU Lesser General Public License as published by the Free
16  * Software Foundation; either version 2 of the License, or (at your option) any
17  * later version.
18  *
19  * To help fund GROMACS development, we humbly ask that you cite
20  * the papers people have written on it - you can find them on the website.
21  */
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <math.h>
27
28 #include "../nb_kernel.h"
29 #include "types/simple.h"
30 #include "vec.h"
31 #include "nrnb.h"
32
33 #include "gmx_math_x86_avx_256_single.h"
34 #include "kernelutil_x86_avx_256_single.h"
35
36 /*
37  * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_VF_avx_256_single
38  * Electrostatics interaction: ReactionField
39  * VdW interaction:            CubicSplineTable
40  * Geometry:                   Water4-Water4
41  * Calculate force/pot:        PotentialAndForce
42  */
43 void
44 nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_VF_avx_256_single
45                     (t_nblist * gmx_restrict                nlist,
46                      rvec * gmx_restrict                    xx,
47                      rvec * gmx_restrict                    ff,
48                      t_forcerec * gmx_restrict              fr,
49                      t_mdatoms * gmx_restrict               mdatoms,
50                      nb_kernel_data_t * gmx_restrict        kernel_data,
51                      t_nrnb * gmx_restrict                  nrnb)
52 {
53     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
54      * just 0 for non-waters.
55      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
56      * jnr indices corresponding to data put in the four positions in the SIMD register.
57      */
58     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
59     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
60     int              jnrA,jnrB,jnrC,jnrD;
61     int              jnrE,jnrF,jnrG,jnrH;
62     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
63     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
64     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
65     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
66     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
67     real             rcutoff_scalar;
68     real             *shiftvec,*fshift,*x,*f;
69     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
70     real             scratch[4*DIM];
71     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
72     real *           vdwioffsetptr0;
73     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
74     real *           vdwioffsetptr1;
75     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
76     real *           vdwioffsetptr2;
77     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
78     real *           vdwioffsetptr3;
79     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
80     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
81     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
82     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
83     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
84     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
85     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
86     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
87     __m256           jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
88     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
89     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
90     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
91     __m256           dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
92     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
93     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
94     __m256           dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
95     __m256           dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
96     __m256           dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
97     __m256           dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
98     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
99     real             *charge;
100     int              nvdwtype;
101     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
102     int              *vdwtype;
103     real             *vdwparam;
104     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
105     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
106     __m256i          vfitab;
107     __m128i          vfitab_lo,vfitab_hi;
108     __m128i          ifour       = _mm_set1_epi32(4);
109     __m256           rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
110     real             *vftab;
111     __m256           dummy_mask,cutoff_mask;
112     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
113     __m256           one     = _mm256_set1_ps(1.0);
114     __m256           two     = _mm256_set1_ps(2.0);
115     x                = xx[0];
116     f                = ff[0];
117
118     nri              = nlist->nri;
119     iinr             = nlist->iinr;
120     jindex           = nlist->jindex;
121     jjnr             = nlist->jjnr;
122     shiftidx         = nlist->shift;
123     gid              = nlist->gid;
124     shiftvec         = fr->shift_vec[0];
125     fshift           = fr->fshift[0];
126     facel            = _mm256_set1_ps(fr->epsfac);
127     charge           = mdatoms->chargeA;
128     krf              = _mm256_set1_ps(fr->ic->k_rf);
129     krf2             = _mm256_set1_ps(fr->ic->k_rf*2.0);
130     crf              = _mm256_set1_ps(fr->ic->c_rf);
131     nvdwtype         = fr->ntype;
132     vdwparam         = fr->nbfp;
133     vdwtype          = mdatoms->typeA;
134
135     vftab            = kernel_data->table_vdw->data;
136     vftabscale       = _mm256_set1_ps(kernel_data->table_vdw->scale);
137
138     /* Setup water-specific parameters */
139     inr              = nlist->iinr[0];
140     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
141     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
142     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
143     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
144
145     jq1              = _mm256_set1_ps(charge[inr+1]);
146     jq2              = _mm256_set1_ps(charge[inr+2]);
147     jq3              = _mm256_set1_ps(charge[inr+3]);
148     vdwjidx0A        = 2*vdwtype[inr+0];
149     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
150     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
151     qq11             = _mm256_mul_ps(iq1,jq1);
152     qq12             = _mm256_mul_ps(iq1,jq2);
153     qq13             = _mm256_mul_ps(iq1,jq3);
154     qq21             = _mm256_mul_ps(iq2,jq1);
155     qq22             = _mm256_mul_ps(iq2,jq2);
156     qq23             = _mm256_mul_ps(iq2,jq3);
157     qq31             = _mm256_mul_ps(iq3,jq1);
158     qq32             = _mm256_mul_ps(iq3,jq2);
159     qq33             = _mm256_mul_ps(iq3,jq3);
160
161     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
162     rcutoff_scalar   = fr->rcoulomb;
163     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
164     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
165
166     /* Avoid stupid compiler warnings */
167     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
168     j_coord_offsetA = 0;
169     j_coord_offsetB = 0;
170     j_coord_offsetC = 0;
171     j_coord_offsetD = 0;
172     j_coord_offsetE = 0;
173     j_coord_offsetF = 0;
174     j_coord_offsetG = 0;
175     j_coord_offsetH = 0;
176
177     outeriter        = 0;
178     inneriter        = 0;
179
180     for(iidx=0;iidx<4*DIM;iidx++)
181     {
182         scratch[iidx] = 0.0;
183     }
184
185     /* Start outer loop over neighborlists */
186     for(iidx=0; iidx<nri; iidx++)
187     {
188         /* Load shift vector for this list */
189         i_shift_offset   = DIM*shiftidx[iidx];
190
191         /* Load limits for loop over neighbors */
192         j_index_start    = jindex[iidx];
193         j_index_end      = jindex[iidx+1];
194
195         /* Get outer coordinate index */
196         inr              = iinr[iidx];
197         i_coord_offset   = DIM*inr;
198
199         /* Load i particle coords and add shift vector */
200         gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
201                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
202
203         fix0             = _mm256_setzero_ps();
204         fiy0             = _mm256_setzero_ps();
205         fiz0             = _mm256_setzero_ps();
206         fix1             = _mm256_setzero_ps();
207         fiy1             = _mm256_setzero_ps();
208         fiz1             = _mm256_setzero_ps();
209         fix2             = _mm256_setzero_ps();
210         fiy2             = _mm256_setzero_ps();
211         fiz2             = _mm256_setzero_ps();
212         fix3             = _mm256_setzero_ps();
213         fiy3             = _mm256_setzero_ps();
214         fiz3             = _mm256_setzero_ps();
215
216         /* Reset potential sums */
217         velecsum         = _mm256_setzero_ps();
218         vvdwsum          = _mm256_setzero_ps();
219
220         /* Start inner kernel loop */
221         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
222         {
223
224             /* Get j neighbor index, and coordinate index */
225             jnrA             = jjnr[jidx];
226             jnrB             = jjnr[jidx+1];
227             jnrC             = jjnr[jidx+2];
228             jnrD             = jjnr[jidx+3];
229             jnrE             = jjnr[jidx+4];
230             jnrF             = jjnr[jidx+5];
231             jnrG             = jjnr[jidx+6];
232             jnrH             = jjnr[jidx+7];
233             j_coord_offsetA  = DIM*jnrA;
234             j_coord_offsetB  = DIM*jnrB;
235             j_coord_offsetC  = DIM*jnrC;
236             j_coord_offsetD  = DIM*jnrD;
237             j_coord_offsetE  = DIM*jnrE;
238             j_coord_offsetF  = DIM*jnrF;
239             j_coord_offsetG  = DIM*jnrG;
240             j_coord_offsetH  = DIM*jnrH;
241
242             /* load j atom coordinates */
243             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
244                                                  x+j_coord_offsetC,x+j_coord_offsetD,
245                                                  x+j_coord_offsetE,x+j_coord_offsetF,
246                                                  x+j_coord_offsetG,x+j_coord_offsetH,
247                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
248                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
249
250             /* Calculate displacement vector */
251             dx00             = _mm256_sub_ps(ix0,jx0);
252             dy00             = _mm256_sub_ps(iy0,jy0);
253             dz00             = _mm256_sub_ps(iz0,jz0);
254             dx11             = _mm256_sub_ps(ix1,jx1);
255             dy11             = _mm256_sub_ps(iy1,jy1);
256             dz11             = _mm256_sub_ps(iz1,jz1);
257             dx12             = _mm256_sub_ps(ix1,jx2);
258             dy12             = _mm256_sub_ps(iy1,jy2);
259             dz12             = _mm256_sub_ps(iz1,jz2);
260             dx13             = _mm256_sub_ps(ix1,jx3);
261             dy13             = _mm256_sub_ps(iy1,jy3);
262             dz13             = _mm256_sub_ps(iz1,jz3);
263             dx21             = _mm256_sub_ps(ix2,jx1);
264             dy21             = _mm256_sub_ps(iy2,jy1);
265             dz21             = _mm256_sub_ps(iz2,jz1);
266             dx22             = _mm256_sub_ps(ix2,jx2);
267             dy22             = _mm256_sub_ps(iy2,jy2);
268             dz22             = _mm256_sub_ps(iz2,jz2);
269             dx23             = _mm256_sub_ps(ix2,jx3);
270             dy23             = _mm256_sub_ps(iy2,jy3);
271             dz23             = _mm256_sub_ps(iz2,jz3);
272             dx31             = _mm256_sub_ps(ix3,jx1);
273             dy31             = _mm256_sub_ps(iy3,jy1);
274             dz31             = _mm256_sub_ps(iz3,jz1);
275             dx32             = _mm256_sub_ps(ix3,jx2);
276             dy32             = _mm256_sub_ps(iy3,jy2);
277             dz32             = _mm256_sub_ps(iz3,jz2);
278             dx33             = _mm256_sub_ps(ix3,jx3);
279             dy33             = _mm256_sub_ps(iy3,jy3);
280             dz33             = _mm256_sub_ps(iz3,jz3);
281
282             /* Calculate squared distance and things based on it */
283             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
284             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
285             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
286             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
287             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
288             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
289             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
290             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
291             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
292             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
293
294             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
295             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
296             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
297             rinv13           = gmx_mm256_invsqrt_ps(rsq13);
298             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
299             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
300             rinv23           = gmx_mm256_invsqrt_ps(rsq23);
301             rinv31           = gmx_mm256_invsqrt_ps(rsq31);
302             rinv32           = gmx_mm256_invsqrt_ps(rsq32);
303             rinv33           = gmx_mm256_invsqrt_ps(rsq33);
304
305             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
306             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
307             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
308             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
309             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
310             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
311             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
312             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
313             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
314
315             fjx0             = _mm256_setzero_ps();
316             fjy0             = _mm256_setzero_ps();
317             fjz0             = _mm256_setzero_ps();
318             fjx1             = _mm256_setzero_ps();
319             fjy1             = _mm256_setzero_ps();
320             fjz1             = _mm256_setzero_ps();
321             fjx2             = _mm256_setzero_ps();
322             fjy2             = _mm256_setzero_ps();
323             fjz2             = _mm256_setzero_ps();
324             fjx3             = _mm256_setzero_ps();
325             fjy3             = _mm256_setzero_ps();
326             fjz3             = _mm256_setzero_ps();
327
328             /**************************
329              * CALCULATE INTERACTIONS *
330              **************************/
331
332             if (gmx_mm256_any_lt(rsq00,rcutoff2))
333             {
334
335             r00              = _mm256_mul_ps(rsq00,rinv00);
336
337             /* Calculate table index by multiplying r with table scale and truncate to integer */
338             rt               = _mm256_mul_ps(r00,vftabscale);
339             vfitab           = _mm256_cvttps_epi32(rt);
340             vfeps            = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
341             /*         AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
342             vfitab_lo        = _mm256_extractf128_si256(vfitab,0x0);
343             vfitab_hi        = _mm256_extractf128_si256(vfitab,0x1);
344             vfitab_lo        = _mm_slli_epi32(vfitab_lo,3);
345             vfitab_hi        = _mm_slli_epi32(vfitab_hi,3);
346
347             /* CUBIC SPLINE TABLE DISPERSION */
348             Y                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
349                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
350             F                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
351                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
352             G                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
353                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
354             H                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
355                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
356             GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
357             Heps             = _mm256_mul_ps(vfeps,H);
358             Fp               = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
359             VV               = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
360             vvdw6            = _mm256_mul_ps(c6_00,VV);
361             FF               = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
362             fvdw6            = _mm256_mul_ps(c6_00,FF);
363
364             /* CUBIC SPLINE TABLE REPULSION */
365             vfitab_lo        = _mm_add_epi32(vfitab_lo,ifour);
366             vfitab_hi        = _mm_add_epi32(vfitab_hi,ifour);
367             Y                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
368                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
369             F                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
370                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
371             G                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
372                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
373             H                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
374                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
375             GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
376             Heps             = _mm256_mul_ps(vfeps,H);
377             Fp               = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
378             VV               = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
379             vvdw12           = _mm256_mul_ps(c12_00,VV);
380             FF               = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
381             fvdw12           = _mm256_mul_ps(c12_00,FF);
382             vvdw             = _mm256_add_ps(vvdw12,vvdw6);
383             fvdw             = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
384
385             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
386
387             /* Update potential sum for this i atom from the interaction with this j atom. */
388             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
389             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
390
391             fscal            = fvdw;
392
393             fscal            = _mm256_and_ps(fscal,cutoff_mask);
394
395             /* Calculate temporary vectorial force */
396             tx               = _mm256_mul_ps(fscal,dx00);
397             ty               = _mm256_mul_ps(fscal,dy00);
398             tz               = _mm256_mul_ps(fscal,dz00);
399
400             /* Update vectorial force */
401             fix0             = _mm256_add_ps(fix0,tx);
402             fiy0             = _mm256_add_ps(fiy0,ty);
403             fiz0             = _mm256_add_ps(fiz0,tz);
404
405             fjx0             = _mm256_add_ps(fjx0,tx);
406             fjy0             = _mm256_add_ps(fjy0,ty);
407             fjz0             = _mm256_add_ps(fjz0,tz);
408
409             }
410
411             /**************************
412              * CALCULATE INTERACTIONS *
413              **************************/
414
415             if (gmx_mm256_any_lt(rsq11,rcutoff2))
416             {
417
418             /* REACTION-FIELD ELECTROSTATICS */
419             velec            = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
420             felec            = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
421
422             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
423
424             /* Update potential sum for this i atom from the interaction with this j atom. */
425             velec            = _mm256_and_ps(velec,cutoff_mask);
426             velecsum         = _mm256_add_ps(velecsum,velec);
427
428             fscal            = felec;
429
430             fscal            = _mm256_and_ps(fscal,cutoff_mask);
431
432             /* Calculate temporary vectorial force */
433             tx               = _mm256_mul_ps(fscal,dx11);
434             ty               = _mm256_mul_ps(fscal,dy11);
435             tz               = _mm256_mul_ps(fscal,dz11);
436
437             /* Update vectorial force */
438             fix1             = _mm256_add_ps(fix1,tx);
439             fiy1             = _mm256_add_ps(fiy1,ty);
440             fiz1             = _mm256_add_ps(fiz1,tz);
441
442             fjx1             = _mm256_add_ps(fjx1,tx);
443             fjy1             = _mm256_add_ps(fjy1,ty);
444             fjz1             = _mm256_add_ps(fjz1,tz);
445
446             }
447
448             /**************************
449              * CALCULATE INTERACTIONS *
450              **************************/
451
452             if (gmx_mm256_any_lt(rsq12,rcutoff2))
453             {
454
455             /* REACTION-FIELD ELECTROSTATICS */
456             velec            = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
457             felec            = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
458
459             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
460
461             /* Update potential sum for this i atom from the interaction with this j atom. */
462             velec            = _mm256_and_ps(velec,cutoff_mask);
463             velecsum         = _mm256_add_ps(velecsum,velec);
464
465             fscal            = felec;
466
467             fscal            = _mm256_and_ps(fscal,cutoff_mask);
468
469             /* Calculate temporary vectorial force */
470             tx               = _mm256_mul_ps(fscal,dx12);
471             ty               = _mm256_mul_ps(fscal,dy12);
472             tz               = _mm256_mul_ps(fscal,dz12);
473
474             /* Update vectorial force */
475             fix1             = _mm256_add_ps(fix1,tx);
476             fiy1             = _mm256_add_ps(fiy1,ty);
477             fiz1             = _mm256_add_ps(fiz1,tz);
478
479             fjx2             = _mm256_add_ps(fjx2,tx);
480             fjy2             = _mm256_add_ps(fjy2,ty);
481             fjz2             = _mm256_add_ps(fjz2,tz);
482
483             }
484
485             /**************************
486              * CALCULATE INTERACTIONS *
487              **************************/
488
489             if (gmx_mm256_any_lt(rsq13,rcutoff2))
490             {
491
492             /* REACTION-FIELD ELECTROSTATICS */
493             velec            = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_add_ps(rinv13,_mm256_mul_ps(krf,rsq13)),crf));
494             felec            = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
495
496             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
497
498             /* Update potential sum for this i atom from the interaction with this j atom. */
499             velec            = _mm256_and_ps(velec,cutoff_mask);
500             velecsum         = _mm256_add_ps(velecsum,velec);
501
502             fscal            = felec;
503
504             fscal            = _mm256_and_ps(fscal,cutoff_mask);
505
506             /* Calculate temporary vectorial force */
507             tx               = _mm256_mul_ps(fscal,dx13);
508             ty               = _mm256_mul_ps(fscal,dy13);
509             tz               = _mm256_mul_ps(fscal,dz13);
510
511             /* Update vectorial force */
512             fix1             = _mm256_add_ps(fix1,tx);
513             fiy1             = _mm256_add_ps(fiy1,ty);
514             fiz1             = _mm256_add_ps(fiz1,tz);
515
516             fjx3             = _mm256_add_ps(fjx3,tx);
517             fjy3             = _mm256_add_ps(fjy3,ty);
518             fjz3             = _mm256_add_ps(fjz3,tz);
519
520             }
521
522             /**************************
523              * CALCULATE INTERACTIONS *
524              **************************/
525
526             if (gmx_mm256_any_lt(rsq21,rcutoff2))
527             {
528
529             /* REACTION-FIELD ELECTROSTATICS */
530             velec            = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
531             felec            = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
532
533             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
534
535             /* Update potential sum for this i atom from the interaction with this j atom. */
536             velec            = _mm256_and_ps(velec,cutoff_mask);
537             velecsum         = _mm256_add_ps(velecsum,velec);
538
539             fscal            = felec;
540
541             fscal            = _mm256_and_ps(fscal,cutoff_mask);
542
543             /* Calculate temporary vectorial force */
544             tx               = _mm256_mul_ps(fscal,dx21);
545             ty               = _mm256_mul_ps(fscal,dy21);
546             tz               = _mm256_mul_ps(fscal,dz21);
547
548             /* Update vectorial force */
549             fix2             = _mm256_add_ps(fix2,tx);
550             fiy2             = _mm256_add_ps(fiy2,ty);
551             fiz2             = _mm256_add_ps(fiz2,tz);
552
553             fjx1             = _mm256_add_ps(fjx1,tx);
554             fjy1             = _mm256_add_ps(fjy1,ty);
555             fjz1             = _mm256_add_ps(fjz1,tz);
556
557             }
558
559             /**************************
560              * CALCULATE INTERACTIONS *
561              **************************/
562
563             if (gmx_mm256_any_lt(rsq22,rcutoff2))
564             {
565
566             /* REACTION-FIELD ELECTROSTATICS */
567             velec            = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
568             felec            = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
569
570             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
571
572             /* Update potential sum for this i atom from the interaction with this j atom. */
573             velec            = _mm256_and_ps(velec,cutoff_mask);
574             velecsum         = _mm256_add_ps(velecsum,velec);
575
576             fscal            = felec;
577
578             fscal            = _mm256_and_ps(fscal,cutoff_mask);
579
580             /* Calculate temporary vectorial force */
581             tx               = _mm256_mul_ps(fscal,dx22);
582             ty               = _mm256_mul_ps(fscal,dy22);
583             tz               = _mm256_mul_ps(fscal,dz22);
584
585             /* Update vectorial force */
586             fix2             = _mm256_add_ps(fix2,tx);
587             fiy2             = _mm256_add_ps(fiy2,ty);
588             fiz2             = _mm256_add_ps(fiz2,tz);
589
590             fjx2             = _mm256_add_ps(fjx2,tx);
591             fjy2             = _mm256_add_ps(fjy2,ty);
592             fjz2             = _mm256_add_ps(fjz2,tz);
593
594             }
595
596             /**************************
597              * CALCULATE INTERACTIONS *
598              **************************/
599
600             if (gmx_mm256_any_lt(rsq23,rcutoff2))
601             {
602
603             /* REACTION-FIELD ELECTROSTATICS */
604             velec            = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_add_ps(rinv23,_mm256_mul_ps(krf,rsq23)),crf));
605             felec            = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
606
607             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
608
609             /* Update potential sum for this i atom from the interaction with this j atom. */
610             velec            = _mm256_and_ps(velec,cutoff_mask);
611             velecsum         = _mm256_add_ps(velecsum,velec);
612
613             fscal            = felec;
614
615             fscal            = _mm256_and_ps(fscal,cutoff_mask);
616
617             /* Calculate temporary vectorial force */
618             tx               = _mm256_mul_ps(fscal,dx23);
619             ty               = _mm256_mul_ps(fscal,dy23);
620             tz               = _mm256_mul_ps(fscal,dz23);
621
622             /* Update vectorial force */
623             fix2             = _mm256_add_ps(fix2,tx);
624             fiy2             = _mm256_add_ps(fiy2,ty);
625             fiz2             = _mm256_add_ps(fiz2,tz);
626
627             fjx3             = _mm256_add_ps(fjx3,tx);
628             fjy3             = _mm256_add_ps(fjy3,ty);
629             fjz3             = _mm256_add_ps(fjz3,tz);
630
631             }
632
633             /**************************
634              * CALCULATE INTERACTIONS *
635              **************************/
636
637             if (gmx_mm256_any_lt(rsq31,rcutoff2))
638             {
639
640             /* REACTION-FIELD ELECTROSTATICS */
641             velec            = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_add_ps(rinv31,_mm256_mul_ps(krf,rsq31)),crf));
642             felec            = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
643
644             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
645
646             /* Update potential sum for this i atom from the interaction with this j atom. */
647             velec            = _mm256_and_ps(velec,cutoff_mask);
648             velecsum         = _mm256_add_ps(velecsum,velec);
649
650             fscal            = felec;
651
652             fscal            = _mm256_and_ps(fscal,cutoff_mask);
653
654             /* Calculate temporary vectorial force */
655             tx               = _mm256_mul_ps(fscal,dx31);
656             ty               = _mm256_mul_ps(fscal,dy31);
657             tz               = _mm256_mul_ps(fscal,dz31);
658
659             /* Update vectorial force */
660             fix3             = _mm256_add_ps(fix3,tx);
661             fiy3             = _mm256_add_ps(fiy3,ty);
662             fiz3             = _mm256_add_ps(fiz3,tz);
663
664             fjx1             = _mm256_add_ps(fjx1,tx);
665             fjy1             = _mm256_add_ps(fjy1,ty);
666             fjz1             = _mm256_add_ps(fjz1,tz);
667
668             }
669
670             /**************************
671              * CALCULATE INTERACTIONS *
672              **************************/
673
674             if (gmx_mm256_any_lt(rsq32,rcutoff2))
675             {
676
677             /* REACTION-FIELD ELECTROSTATICS */
678             velec            = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_add_ps(rinv32,_mm256_mul_ps(krf,rsq32)),crf));
679             felec            = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
680
681             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
682
683             /* Update potential sum for this i atom from the interaction with this j atom. */
684             velec            = _mm256_and_ps(velec,cutoff_mask);
685             velecsum         = _mm256_add_ps(velecsum,velec);
686
687             fscal            = felec;
688
689             fscal            = _mm256_and_ps(fscal,cutoff_mask);
690
691             /* Calculate temporary vectorial force */
692             tx               = _mm256_mul_ps(fscal,dx32);
693             ty               = _mm256_mul_ps(fscal,dy32);
694             tz               = _mm256_mul_ps(fscal,dz32);
695
696             /* Update vectorial force */
697             fix3             = _mm256_add_ps(fix3,tx);
698             fiy3             = _mm256_add_ps(fiy3,ty);
699             fiz3             = _mm256_add_ps(fiz3,tz);
700
701             fjx2             = _mm256_add_ps(fjx2,tx);
702             fjy2             = _mm256_add_ps(fjy2,ty);
703             fjz2             = _mm256_add_ps(fjz2,tz);
704
705             }
706
707             /**************************
708              * CALCULATE INTERACTIONS *
709              **************************/
710
711             if (gmx_mm256_any_lt(rsq33,rcutoff2))
712             {
713
714             /* REACTION-FIELD ELECTROSTATICS */
715             velec            = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_add_ps(rinv33,_mm256_mul_ps(krf,rsq33)),crf));
716             felec            = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
717
718             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
719
720             /* Update potential sum for this i atom from the interaction with this j atom. */
721             velec            = _mm256_and_ps(velec,cutoff_mask);
722             velecsum         = _mm256_add_ps(velecsum,velec);
723
724             fscal            = felec;
725
726             fscal            = _mm256_and_ps(fscal,cutoff_mask);
727
728             /* Calculate temporary vectorial force */
729             tx               = _mm256_mul_ps(fscal,dx33);
730             ty               = _mm256_mul_ps(fscal,dy33);
731             tz               = _mm256_mul_ps(fscal,dz33);
732
733             /* Update vectorial force */
734             fix3             = _mm256_add_ps(fix3,tx);
735             fiy3             = _mm256_add_ps(fiy3,ty);
736             fiz3             = _mm256_add_ps(fiz3,tz);
737
738             fjx3             = _mm256_add_ps(fjx3,tx);
739             fjy3             = _mm256_add_ps(fjy3,ty);
740             fjz3             = _mm256_add_ps(fjz3,tz);
741
742             }
743
744             fjptrA             = f+j_coord_offsetA;
745             fjptrB             = f+j_coord_offsetB;
746             fjptrC             = f+j_coord_offsetC;
747             fjptrD             = f+j_coord_offsetD;
748             fjptrE             = f+j_coord_offsetE;
749             fjptrF             = f+j_coord_offsetF;
750             fjptrG             = f+j_coord_offsetG;
751             fjptrH             = f+j_coord_offsetH;
752
753             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
754                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
755                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
756
757             /* Inner loop uses 387 flops */
758         }
759
760         if(jidx<j_index_end)
761         {
762
763             /* Get j neighbor index, and coordinate index */
764             jnrlistA         = jjnr[jidx];
765             jnrlistB         = jjnr[jidx+1];
766             jnrlistC         = jjnr[jidx+2];
767             jnrlistD         = jjnr[jidx+3];
768             jnrlistE         = jjnr[jidx+4];
769             jnrlistF         = jjnr[jidx+5];
770             jnrlistG         = jjnr[jidx+6];
771             jnrlistH         = jjnr[jidx+7];
772             /* Sign of each element will be negative for non-real atoms.
773              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
774              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
775              */
776             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
777                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
778                                             
779             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
780             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
781             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
782             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
783             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
784             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
785             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
786             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
787             j_coord_offsetA  = DIM*jnrA;
788             j_coord_offsetB  = DIM*jnrB;
789             j_coord_offsetC  = DIM*jnrC;
790             j_coord_offsetD  = DIM*jnrD;
791             j_coord_offsetE  = DIM*jnrE;
792             j_coord_offsetF  = DIM*jnrF;
793             j_coord_offsetG  = DIM*jnrG;
794             j_coord_offsetH  = DIM*jnrH;
795
796             /* load j atom coordinates */
797             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
798                                                  x+j_coord_offsetC,x+j_coord_offsetD,
799                                                  x+j_coord_offsetE,x+j_coord_offsetF,
800                                                  x+j_coord_offsetG,x+j_coord_offsetH,
801                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
802                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
803
804             /* Calculate displacement vector */
805             dx00             = _mm256_sub_ps(ix0,jx0);
806             dy00             = _mm256_sub_ps(iy0,jy0);
807             dz00             = _mm256_sub_ps(iz0,jz0);
808             dx11             = _mm256_sub_ps(ix1,jx1);
809             dy11             = _mm256_sub_ps(iy1,jy1);
810             dz11             = _mm256_sub_ps(iz1,jz1);
811             dx12             = _mm256_sub_ps(ix1,jx2);
812             dy12             = _mm256_sub_ps(iy1,jy2);
813             dz12             = _mm256_sub_ps(iz1,jz2);
814             dx13             = _mm256_sub_ps(ix1,jx3);
815             dy13             = _mm256_sub_ps(iy1,jy3);
816             dz13             = _mm256_sub_ps(iz1,jz3);
817             dx21             = _mm256_sub_ps(ix2,jx1);
818             dy21             = _mm256_sub_ps(iy2,jy1);
819             dz21             = _mm256_sub_ps(iz2,jz1);
820             dx22             = _mm256_sub_ps(ix2,jx2);
821             dy22             = _mm256_sub_ps(iy2,jy2);
822             dz22             = _mm256_sub_ps(iz2,jz2);
823             dx23             = _mm256_sub_ps(ix2,jx3);
824             dy23             = _mm256_sub_ps(iy2,jy3);
825             dz23             = _mm256_sub_ps(iz2,jz3);
826             dx31             = _mm256_sub_ps(ix3,jx1);
827             dy31             = _mm256_sub_ps(iy3,jy1);
828             dz31             = _mm256_sub_ps(iz3,jz1);
829             dx32             = _mm256_sub_ps(ix3,jx2);
830             dy32             = _mm256_sub_ps(iy3,jy2);
831             dz32             = _mm256_sub_ps(iz3,jz2);
832             dx33             = _mm256_sub_ps(ix3,jx3);
833             dy33             = _mm256_sub_ps(iy3,jy3);
834             dz33             = _mm256_sub_ps(iz3,jz3);
835
836             /* Calculate squared distance and things based on it */
837             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
838             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
839             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
840             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
841             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
842             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
843             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
844             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
845             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
846             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
847
848             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
849             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
850             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
851             rinv13           = gmx_mm256_invsqrt_ps(rsq13);
852             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
853             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
854             rinv23           = gmx_mm256_invsqrt_ps(rsq23);
855             rinv31           = gmx_mm256_invsqrt_ps(rsq31);
856             rinv32           = gmx_mm256_invsqrt_ps(rsq32);
857             rinv33           = gmx_mm256_invsqrt_ps(rsq33);
858
859             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
860             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
861             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
862             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
863             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
864             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
865             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
866             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
867             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
868
869             fjx0             = _mm256_setzero_ps();
870             fjy0             = _mm256_setzero_ps();
871             fjz0             = _mm256_setzero_ps();
872             fjx1             = _mm256_setzero_ps();
873             fjy1             = _mm256_setzero_ps();
874             fjz1             = _mm256_setzero_ps();
875             fjx2             = _mm256_setzero_ps();
876             fjy2             = _mm256_setzero_ps();
877             fjz2             = _mm256_setzero_ps();
878             fjx3             = _mm256_setzero_ps();
879             fjy3             = _mm256_setzero_ps();
880             fjz3             = _mm256_setzero_ps();
881
882             /**************************
883              * CALCULATE INTERACTIONS *
884              **************************/
885
886             if (gmx_mm256_any_lt(rsq00,rcutoff2))
887             {
888
889             r00              = _mm256_mul_ps(rsq00,rinv00);
890             r00              = _mm256_andnot_ps(dummy_mask,r00);
891
892             /* Calculate table index by multiplying r with table scale and truncate to integer */
893             rt               = _mm256_mul_ps(r00,vftabscale);
894             vfitab           = _mm256_cvttps_epi32(rt);
895             vfeps            = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
896             /*         AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
897             vfitab_lo        = _mm256_extractf128_si256(vfitab,0x0);
898             vfitab_hi        = _mm256_extractf128_si256(vfitab,0x1);
899             vfitab_lo        = _mm_slli_epi32(vfitab_lo,3);
900             vfitab_hi        = _mm_slli_epi32(vfitab_hi,3);
901
902             /* CUBIC SPLINE TABLE DISPERSION */
903             Y                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
904                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
905             F                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
906                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
907             G                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
908                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
909             H                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
910                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
911             GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
912             Heps             = _mm256_mul_ps(vfeps,H);
913             Fp               = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
914             VV               = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
915             vvdw6            = _mm256_mul_ps(c6_00,VV);
916             FF               = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
917             fvdw6            = _mm256_mul_ps(c6_00,FF);
918
919             /* CUBIC SPLINE TABLE REPULSION */
920             vfitab_lo        = _mm_add_epi32(vfitab_lo,ifour);
921             vfitab_hi        = _mm_add_epi32(vfitab_hi,ifour);
922             Y                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
923                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
924             F                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
925                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
926             G                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
927                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
928             H                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
929                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
930             GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
931             Heps             = _mm256_mul_ps(vfeps,H);
932             Fp               = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
933             VV               = _mm256_add_ps(Y,_mm256_mul_ps(vfeps,Fp));
934             vvdw12           = _mm256_mul_ps(c12_00,VV);
935             FF               = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
936             fvdw12           = _mm256_mul_ps(c12_00,FF);
937             vvdw             = _mm256_add_ps(vvdw12,vvdw6);
938             fvdw             = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
939
940             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
941
942             /* Update potential sum for this i atom from the interaction with this j atom. */
943             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
944             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
945             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
946
947             fscal            = fvdw;
948
949             fscal            = _mm256_and_ps(fscal,cutoff_mask);
950
951             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
952
953             /* Calculate temporary vectorial force */
954             tx               = _mm256_mul_ps(fscal,dx00);
955             ty               = _mm256_mul_ps(fscal,dy00);
956             tz               = _mm256_mul_ps(fscal,dz00);
957
958             /* Update vectorial force */
959             fix0             = _mm256_add_ps(fix0,tx);
960             fiy0             = _mm256_add_ps(fiy0,ty);
961             fiz0             = _mm256_add_ps(fiz0,tz);
962
963             fjx0             = _mm256_add_ps(fjx0,tx);
964             fjy0             = _mm256_add_ps(fjy0,ty);
965             fjz0             = _mm256_add_ps(fjz0,tz);
966
967             }
968
969             /**************************
970              * CALCULATE INTERACTIONS *
971              **************************/
972
973             if (gmx_mm256_any_lt(rsq11,rcutoff2))
974             {
975
976             /* REACTION-FIELD ELECTROSTATICS */
977             velec            = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_add_ps(rinv11,_mm256_mul_ps(krf,rsq11)),crf));
978             felec            = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
979
980             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
981
982             /* Update potential sum for this i atom from the interaction with this j atom. */
983             velec            = _mm256_and_ps(velec,cutoff_mask);
984             velec            = _mm256_andnot_ps(dummy_mask,velec);
985             velecsum         = _mm256_add_ps(velecsum,velec);
986
987             fscal            = felec;
988
989             fscal            = _mm256_and_ps(fscal,cutoff_mask);
990
991             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
992
993             /* Calculate temporary vectorial force */
994             tx               = _mm256_mul_ps(fscal,dx11);
995             ty               = _mm256_mul_ps(fscal,dy11);
996             tz               = _mm256_mul_ps(fscal,dz11);
997
998             /* Update vectorial force */
999             fix1             = _mm256_add_ps(fix1,tx);
1000             fiy1             = _mm256_add_ps(fiy1,ty);
1001             fiz1             = _mm256_add_ps(fiz1,tz);
1002
1003             fjx1             = _mm256_add_ps(fjx1,tx);
1004             fjy1             = _mm256_add_ps(fjy1,ty);
1005             fjz1             = _mm256_add_ps(fjz1,tz);
1006
1007             }
1008
1009             /**************************
1010              * CALCULATE INTERACTIONS *
1011              **************************/
1012
1013             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1014             {
1015
1016             /* REACTION-FIELD ELECTROSTATICS */
1017             velec            = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_add_ps(rinv12,_mm256_mul_ps(krf,rsq12)),crf));
1018             felec            = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
1019
1020             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1021
1022             /* Update potential sum for this i atom from the interaction with this j atom. */
1023             velec            = _mm256_and_ps(velec,cutoff_mask);
1024             velec            = _mm256_andnot_ps(dummy_mask,velec);
1025             velecsum         = _mm256_add_ps(velecsum,velec);
1026
1027             fscal            = felec;
1028
1029             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1030
1031             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1032
1033             /* Calculate temporary vectorial force */
1034             tx               = _mm256_mul_ps(fscal,dx12);
1035             ty               = _mm256_mul_ps(fscal,dy12);
1036             tz               = _mm256_mul_ps(fscal,dz12);
1037
1038             /* Update vectorial force */
1039             fix1             = _mm256_add_ps(fix1,tx);
1040             fiy1             = _mm256_add_ps(fiy1,ty);
1041             fiz1             = _mm256_add_ps(fiz1,tz);
1042
1043             fjx2             = _mm256_add_ps(fjx2,tx);
1044             fjy2             = _mm256_add_ps(fjy2,ty);
1045             fjz2             = _mm256_add_ps(fjz2,tz);
1046
1047             }
1048
1049             /**************************
1050              * CALCULATE INTERACTIONS *
1051              **************************/
1052
1053             if (gmx_mm256_any_lt(rsq13,rcutoff2))
1054             {
1055
1056             /* REACTION-FIELD ELECTROSTATICS */
1057             velec            = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_add_ps(rinv13,_mm256_mul_ps(krf,rsq13)),crf));
1058             felec            = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
1059
1060             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
1061
1062             /* Update potential sum for this i atom from the interaction with this j atom. */
1063             velec            = _mm256_and_ps(velec,cutoff_mask);
1064             velec            = _mm256_andnot_ps(dummy_mask,velec);
1065             velecsum         = _mm256_add_ps(velecsum,velec);
1066
1067             fscal            = felec;
1068
1069             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1070
1071             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1072
1073             /* Calculate temporary vectorial force */
1074             tx               = _mm256_mul_ps(fscal,dx13);
1075             ty               = _mm256_mul_ps(fscal,dy13);
1076             tz               = _mm256_mul_ps(fscal,dz13);
1077
1078             /* Update vectorial force */
1079             fix1             = _mm256_add_ps(fix1,tx);
1080             fiy1             = _mm256_add_ps(fiy1,ty);
1081             fiz1             = _mm256_add_ps(fiz1,tz);
1082
1083             fjx3             = _mm256_add_ps(fjx3,tx);
1084             fjy3             = _mm256_add_ps(fjy3,ty);
1085             fjz3             = _mm256_add_ps(fjz3,tz);
1086
1087             }
1088
1089             /**************************
1090              * CALCULATE INTERACTIONS *
1091              **************************/
1092
1093             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1094             {
1095
1096             /* REACTION-FIELD ELECTROSTATICS */
1097             velec            = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_add_ps(rinv21,_mm256_mul_ps(krf,rsq21)),crf));
1098             felec            = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
1099
1100             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1101
1102             /* Update potential sum for this i atom from the interaction with this j atom. */
1103             velec            = _mm256_and_ps(velec,cutoff_mask);
1104             velec            = _mm256_andnot_ps(dummy_mask,velec);
1105             velecsum         = _mm256_add_ps(velecsum,velec);
1106
1107             fscal            = felec;
1108
1109             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1110
1111             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1112
1113             /* Calculate temporary vectorial force */
1114             tx               = _mm256_mul_ps(fscal,dx21);
1115             ty               = _mm256_mul_ps(fscal,dy21);
1116             tz               = _mm256_mul_ps(fscal,dz21);
1117
1118             /* Update vectorial force */
1119             fix2             = _mm256_add_ps(fix2,tx);
1120             fiy2             = _mm256_add_ps(fiy2,ty);
1121             fiz2             = _mm256_add_ps(fiz2,tz);
1122
1123             fjx1             = _mm256_add_ps(fjx1,tx);
1124             fjy1             = _mm256_add_ps(fjy1,ty);
1125             fjz1             = _mm256_add_ps(fjz1,tz);
1126
1127             }
1128
1129             /**************************
1130              * CALCULATE INTERACTIONS *
1131              **************************/
1132
1133             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1134             {
1135
1136             /* REACTION-FIELD ELECTROSTATICS */
1137             velec            = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_add_ps(rinv22,_mm256_mul_ps(krf,rsq22)),crf));
1138             felec            = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
1139
1140             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1141
1142             /* Update potential sum for this i atom from the interaction with this j atom. */
1143             velec            = _mm256_and_ps(velec,cutoff_mask);
1144             velec            = _mm256_andnot_ps(dummy_mask,velec);
1145             velecsum         = _mm256_add_ps(velecsum,velec);
1146
1147             fscal            = felec;
1148
1149             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1150
1151             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1152
1153             /* Calculate temporary vectorial force */
1154             tx               = _mm256_mul_ps(fscal,dx22);
1155             ty               = _mm256_mul_ps(fscal,dy22);
1156             tz               = _mm256_mul_ps(fscal,dz22);
1157
1158             /* Update vectorial force */
1159             fix2             = _mm256_add_ps(fix2,tx);
1160             fiy2             = _mm256_add_ps(fiy2,ty);
1161             fiz2             = _mm256_add_ps(fiz2,tz);
1162
1163             fjx2             = _mm256_add_ps(fjx2,tx);
1164             fjy2             = _mm256_add_ps(fjy2,ty);
1165             fjz2             = _mm256_add_ps(fjz2,tz);
1166
1167             }
1168
1169             /**************************
1170              * CALCULATE INTERACTIONS *
1171              **************************/
1172
1173             if (gmx_mm256_any_lt(rsq23,rcutoff2))
1174             {
1175
1176             /* REACTION-FIELD ELECTROSTATICS */
1177             velec            = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_add_ps(rinv23,_mm256_mul_ps(krf,rsq23)),crf));
1178             felec            = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
1179
1180             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
1181
1182             /* Update potential sum for this i atom from the interaction with this j atom. */
1183             velec            = _mm256_and_ps(velec,cutoff_mask);
1184             velec            = _mm256_andnot_ps(dummy_mask,velec);
1185             velecsum         = _mm256_add_ps(velecsum,velec);
1186
1187             fscal            = felec;
1188
1189             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1190
1191             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1192
1193             /* Calculate temporary vectorial force */
1194             tx               = _mm256_mul_ps(fscal,dx23);
1195             ty               = _mm256_mul_ps(fscal,dy23);
1196             tz               = _mm256_mul_ps(fscal,dz23);
1197
1198             /* Update vectorial force */
1199             fix2             = _mm256_add_ps(fix2,tx);
1200             fiy2             = _mm256_add_ps(fiy2,ty);
1201             fiz2             = _mm256_add_ps(fiz2,tz);
1202
1203             fjx3             = _mm256_add_ps(fjx3,tx);
1204             fjy3             = _mm256_add_ps(fjy3,ty);
1205             fjz3             = _mm256_add_ps(fjz3,tz);
1206
1207             }
1208
1209             /**************************
1210              * CALCULATE INTERACTIONS *
1211              **************************/
1212
1213             if (gmx_mm256_any_lt(rsq31,rcutoff2))
1214             {
1215
1216             /* REACTION-FIELD ELECTROSTATICS */
1217             velec            = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_add_ps(rinv31,_mm256_mul_ps(krf,rsq31)),crf));
1218             felec            = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
1219
1220             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
1221
1222             /* Update potential sum for this i atom from the interaction with this j atom. */
1223             velec            = _mm256_and_ps(velec,cutoff_mask);
1224             velec            = _mm256_andnot_ps(dummy_mask,velec);
1225             velecsum         = _mm256_add_ps(velecsum,velec);
1226
1227             fscal            = felec;
1228
1229             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1230
1231             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1232
1233             /* Calculate temporary vectorial force */
1234             tx               = _mm256_mul_ps(fscal,dx31);
1235             ty               = _mm256_mul_ps(fscal,dy31);
1236             tz               = _mm256_mul_ps(fscal,dz31);
1237
1238             /* Update vectorial force */
1239             fix3             = _mm256_add_ps(fix3,tx);
1240             fiy3             = _mm256_add_ps(fiy3,ty);
1241             fiz3             = _mm256_add_ps(fiz3,tz);
1242
1243             fjx1             = _mm256_add_ps(fjx1,tx);
1244             fjy1             = _mm256_add_ps(fjy1,ty);
1245             fjz1             = _mm256_add_ps(fjz1,tz);
1246
1247             }
1248
1249             /**************************
1250              * CALCULATE INTERACTIONS *
1251              **************************/
1252
1253             if (gmx_mm256_any_lt(rsq32,rcutoff2))
1254             {
1255
1256             /* REACTION-FIELD ELECTROSTATICS */
1257             velec            = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_add_ps(rinv32,_mm256_mul_ps(krf,rsq32)),crf));
1258             felec            = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
1259
1260             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
1261
1262             /* Update potential sum for this i atom from the interaction with this j atom. */
1263             velec            = _mm256_and_ps(velec,cutoff_mask);
1264             velec            = _mm256_andnot_ps(dummy_mask,velec);
1265             velecsum         = _mm256_add_ps(velecsum,velec);
1266
1267             fscal            = felec;
1268
1269             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1270
1271             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1272
1273             /* Calculate temporary vectorial force */
1274             tx               = _mm256_mul_ps(fscal,dx32);
1275             ty               = _mm256_mul_ps(fscal,dy32);
1276             tz               = _mm256_mul_ps(fscal,dz32);
1277
1278             /* Update vectorial force */
1279             fix3             = _mm256_add_ps(fix3,tx);
1280             fiy3             = _mm256_add_ps(fiy3,ty);
1281             fiz3             = _mm256_add_ps(fiz3,tz);
1282
1283             fjx2             = _mm256_add_ps(fjx2,tx);
1284             fjy2             = _mm256_add_ps(fjy2,ty);
1285             fjz2             = _mm256_add_ps(fjz2,tz);
1286
1287             }
1288
1289             /**************************
1290              * CALCULATE INTERACTIONS *
1291              **************************/
1292
1293             if (gmx_mm256_any_lt(rsq33,rcutoff2))
1294             {
1295
1296             /* REACTION-FIELD ELECTROSTATICS */
1297             velec            = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_add_ps(rinv33,_mm256_mul_ps(krf,rsq33)),crf));
1298             felec            = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
1299
1300             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
1301
1302             /* Update potential sum for this i atom from the interaction with this j atom. */
1303             velec            = _mm256_and_ps(velec,cutoff_mask);
1304             velec            = _mm256_andnot_ps(dummy_mask,velec);
1305             velecsum         = _mm256_add_ps(velecsum,velec);
1306
1307             fscal            = felec;
1308
1309             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1310
1311             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1312
1313             /* Calculate temporary vectorial force */
1314             tx               = _mm256_mul_ps(fscal,dx33);
1315             ty               = _mm256_mul_ps(fscal,dy33);
1316             tz               = _mm256_mul_ps(fscal,dz33);
1317
1318             /* Update vectorial force */
1319             fix3             = _mm256_add_ps(fix3,tx);
1320             fiy3             = _mm256_add_ps(fiy3,ty);
1321             fiz3             = _mm256_add_ps(fiz3,tz);
1322
1323             fjx3             = _mm256_add_ps(fjx3,tx);
1324             fjy3             = _mm256_add_ps(fjy3,ty);
1325             fjz3             = _mm256_add_ps(fjz3,tz);
1326
1327             }
1328
1329             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1330             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1331             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1332             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1333             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1334             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1335             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1336             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1337
1338             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1339                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
1340                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1341
1342             /* Inner loop uses 388 flops */
1343         }
1344
1345         /* End of innermost loop */
1346
1347         gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1348                                                  f+i_coord_offset,fshift+i_shift_offset);
1349
1350         ggid                        = gid[iidx];
1351         /* Update potential energies */
1352         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1353         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1354
1355         /* Increment number of inner iterations */
1356         inneriter                  += j_index_end - j_index_start;
1357
1358         /* Outer loop uses 26 flops */
1359     }
1360
1361     /* Increment number of outer iterations */
1362     outeriter        += nri;
1363
1364     /* Update outer/inner flops */
1365
1366     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*388);
1367 }
1368 /*
1369  * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_F_avx_256_single
1370  * Electrostatics interaction: ReactionField
1371  * VdW interaction:            CubicSplineTable
1372  * Geometry:                   Water4-Water4
1373  * Calculate force/pot:        Force
1374  */
1375 void
1376 nb_kernel_ElecRFCut_VdwCSTab_GeomW4W4_F_avx_256_single
1377                     (t_nblist * gmx_restrict                nlist,
1378                      rvec * gmx_restrict                    xx,
1379                      rvec * gmx_restrict                    ff,
1380                      t_forcerec * gmx_restrict              fr,
1381                      t_mdatoms * gmx_restrict               mdatoms,
1382                      nb_kernel_data_t * gmx_restrict        kernel_data,
1383                      t_nrnb * gmx_restrict                  nrnb)
1384 {
1385     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1386      * just 0 for non-waters.
1387      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1388      * jnr indices corresponding to data put in the four positions in the SIMD register.
1389      */
1390     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1391     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1392     int              jnrA,jnrB,jnrC,jnrD;
1393     int              jnrE,jnrF,jnrG,jnrH;
1394     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1395     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1396     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1397     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1398     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1399     real             rcutoff_scalar;
1400     real             *shiftvec,*fshift,*x,*f;
1401     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1402     real             scratch[4*DIM];
1403     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1404     real *           vdwioffsetptr0;
1405     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1406     real *           vdwioffsetptr1;
1407     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1408     real *           vdwioffsetptr2;
1409     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1410     real *           vdwioffsetptr3;
1411     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1412     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1413     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1414     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1415     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1416     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1417     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1418     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
1419     __m256           jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
1420     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1421     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1422     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1423     __m256           dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
1424     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1425     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1426     __m256           dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
1427     __m256           dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
1428     __m256           dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
1429     __m256           dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
1430     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
1431     real             *charge;
1432     int              nvdwtype;
1433     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1434     int              *vdwtype;
1435     real             *vdwparam;
1436     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
1437     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
1438     __m256i          vfitab;
1439     __m128i          vfitab_lo,vfitab_hi;
1440     __m128i          ifour       = _mm_set1_epi32(4);
1441     __m256           rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
1442     real             *vftab;
1443     __m256           dummy_mask,cutoff_mask;
1444     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1445     __m256           one     = _mm256_set1_ps(1.0);
1446     __m256           two     = _mm256_set1_ps(2.0);
1447     x                = xx[0];
1448     f                = ff[0];
1449
1450     nri              = nlist->nri;
1451     iinr             = nlist->iinr;
1452     jindex           = nlist->jindex;
1453     jjnr             = nlist->jjnr;
1454     shiftidx         = nlist->shift;
1455     gid              = nlist->gid;
1456     shiftvec         = fr->shift_vec[0];
1457     fshift           = fr->fshift[0];
1458     facel            = _mm256_set1_ps(fr->epsfac);
1459     charge           = mdatoms->chargeA;
1460     krf              = _mm256_set1_ps(fr->ic->k_rf);
1461     krf2             = _mm256_set1_ps(fr->ic->k_rf*2.0);
1462     crf              = _mm256_set1_ps(fr->ic->c_rf);
1463     nvdwtype         = fr->ntype;
1464     vdwparam         = fr->nbfp;
1465     vdwtype          = mdatoms->typeA;
1466
1467     vftab            = kernel_data->table_vdw->data;
1468     vftabscale       = _mm256_set1_ps(kernel_data->table_vdw->scale);
1469
1470     /* Setup water-specific parameters */
1471     inr              = nlist->iinr[0];
1472     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1473     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1474     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
1475     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1476
1477     jq1              = _mm256_set1_ps(charge[inr+1]);
1478     jq2              = _mm256_set1_ps(charge[inr+2]);
1479     jq3              = _mm256_set1_ps(charge[inr+3]);
1480     vdwjidx0A        = 2*vdwtype[inr+0];
1481     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1482     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1483     qq11             = _mm256_mul_ps(iq1,jq1);
1484     qq12             = _mm256_mul_ps(iq1,jq2);
1485     qq13             = _mm256_mul_ps(iq1,jq3);
1486     qq21             = _mm256_mul_ps(iq2,jq1);
1487     qq22             = _mm256_mul_ps(iq2,jq2);
1488     qq23             = _mm256_mul_ps(iq2,jq3);
1489     qq31             = _mm256_mul_ps(iq3,jq1);
1490     qq32             = _mm256_mul_ps(iq3,jq2);
1491     qq33             = _mm256_mul_ps(iq3,jq3);
1492
1493     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1494     rcutoff_scalar   = fr->rcoulomb;
1495     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
1496     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
1497
1498     /* Avoid stupid compiler warnings */
1499     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1500     j_coord_offsetA = 0;
1501     j_coord_offsetB = 0;
1502     j_coord_offsetC = 0;
1503     j_coord_offsetD = 0;
1504     j_coord_offsetE = 0;
1505     j_coord_offsetF = 0;
1506     j_coord_offsetG = 0;
1507     j_coord_offsetH = 0;
1508
1509     outeriter        = 0;
1510     inneriter        = 0;
1511
1512     for(iidx=0;iidx<4*DIM;iidx++)
1513     {
1514         scratch[iidx] = 0.0;
1515     }
1516
1517     /* Start outer loop over neighborlists */
1518     for(iidx=0; iidx<nri; iidx++)
1519     {
1520         /* Load shift vector for this list */
1521         i_shift_offset   = DIM*shiftidx[iidx];
1522
1523         /* Load limits for loop over neighbors */
1524         j_index_start    = jindex[iidx];
1525         j_index_end      = jindex[iidx+1];
1526
1527         /* Get outer coordinate index */
1528         inr              = iinr[iidx];
1529         i_coord_offset   = DIM*inr;
1530
1531         /* Load i particle coords and add shift vector */
1532         gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1533                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1534
1535         fix0             = _mm256_setzero_ps();
1536         fiy0             = _mm256_setzero_ps();
1537         fiz0             = _mm256_setzero_ps();
1538         fix1             = _mm256_setzero_ps();
1539         fiy1             = _mm256_setzero_ps();
1540         fiz1             = _mm256_setzero_ps();
1541         fix2             = _mm256_setzero_ps();
1542         fiy2             = _mm256_setzero_ps();
1543         fiz2             = _mm256_setzero_ps();
1544         fix3             = _mm256_setzero_ps();
1545         fiy3             = _mm256_setzero_ps();
1546         fiz3             = _mm256_setzero_ps();
1547
1548         /* Start inner kernel loop */
1549         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1550         {
1551
1552             /* Get j neighbor index, and coordinate index */
1553             jnrA             = jjnr[jidx];
1554             jnrB             = jjnr[jidx+1];
1555             jnrC             = jjnr[jidx+2];
1556             jnrD             = jjnr[jidx+3];
1557             jnrE             = jjnr[jidx+4];
1558             jnrF             = jjnr[jidx+5];
1559             jnrG             = jjnr[jidx+6];
1560             jnrH             = jjnr[jidx+7];
1561             j_coord_offsetA  = DIM*jnrA;
1562             j_coord_offsetB  = DIM*jnrB;
1563             j_coord_offsetC  = DIM*jnrC;
1564             j_coord_offsetD  = DIM*jnrD;
1565             j_coord_offsetE  = DIM*jnrE;
1566             j_coord_offsetF  = DIM*jnrF;
1567             j_coord_offsetG  = DIM*jnrG;
1568             j_coord_offsetH  = DIM*jnrH;
1569
1570             /* load j atom coordinates */
1571             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1572                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1573                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1574                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1575                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1576                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
1577
1578             /* Calculate displacement vector */
1579             dx00             = _mm256_sub_ps(ix0,jx0);
1580             dy00             = _mm256_sub_ps(iy0,jy0);
1581             dz00             = _mm256_sub_ps(iz0,jz0);
1582             dx11             = _mm256_sub_ps(ix1,jx1);
1583             dy11             = _mm256_sub_ps(iy1,jy1);
1584             dz11             = _mm256_sub_ps(iz1,jz1);
1585             dx12             = _mm256_sub_ps(ix1,jx2);
1586             dy12             = _mm256_sub_ps(iy1,jy2);
1587             dz12             = _mm256_sub_ps(iz1,jz2);
1588             dx13             = _mm256_sub_ps(ix1,jx3);
1589             dy13             = _mm256_sub_ps(iy1,jy3);
1590             dz13             = _mm256_sub_ps(iz1,jz3);
1591             dx21             = _mm256_sub_ps(ix2,jx1);
1592             dy21             = _mm256_sub_ps(iy2,jy1);
1593             dz21             = _mm256_sub_ps(iz2,jz1);
1594             dx22             = _mm256_sub_ps(ix2,jx2);
1595             dy22             = _mm256_sub_ps(iy2,jy2);
1596             dz22             = _mm256_sub_ps(iz2,jz2);
1597             dx23             = _mm256_sub_ps(ix2,jx3);
1598             dy23             = _mm256_sub_ps(iy2,jy3);
1599             dz23             = _mm256_sub_ps(iz2,jz3);
1600             dx31             = _mm256_sub_ps(ix3,jx1);
1601             dy31             = _mm256_sub_ps(iy3,jy1);
1602             dz31             = _mm256_sub_ps(iz3,jz1);
1603             dx32             = _mm256_sub_ps(ix3,jx2);
1604             dy32             = _mm256_sub_ps(iy3,jy2);
1605             dz32             = _mm256_sub_ps(iz3,jz2);
1606             dx33             = _mm256_sub_ps(ix3,jx3);
1607             dy33             = _mm256_sub_ps(iy3,jy3);
1608             dz33             = _mm256_sub_ps(iz3,jz3);
1609
1610             /* Calculate squared distance and things based on it */
1611             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1612             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1613             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1614             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
1615             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1616             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1617             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
1618             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
1619             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
1620             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
1621
1622             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1623             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
1624             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
1625             rinv13           = gmx_mm256_invsqrt_ps(rsq13);
1626             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
1627             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
1628             rinv23           = gmx_mm256_invsqrt_ps(rsq23);
1629             rinv31           = gmx_mm256_invsqrt_ps(rsq31);
1630             rinv32           = gmx_mm256_invsqrt_ps(rsq32);
1631             rinv33           = gmx_mm256_invsqrt_ps(rsq33);
1632
1633             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1634             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1635             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
1636             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1637             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1638             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
1639             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
1640             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
1641             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
1642
1643             fjx0             = _mm256_setzero_ps();
1644             fjy0             = _mm256_setzero_ps();
1645             fjz0             = _mm256_setzero_ps();
1646             fjx1             = _mm256_setzero_ps();
1647             fjy1             = _mm256_setzero_ps();
1648             fjz1             = _mm256_setzero_ps();
1649             fjx2             = _mm256_setzero_ps();
1650             fjy2             = _mm256_setzero_ps();
1651             fjz2             = _mm256_setzero_ps();
1652             fjx3             = _mm256_setzero_ps();
1653             fjy3             = _mm256_setzero_ps();
1654             fjz3             = _mm256_setzero_ps();
1655
1656             /**************************
1657              * CALCULATE INTERACTIONS *
1658              **************************/
1659
1660             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1661             {
1662
1663             r00              = _mm256_mul_ps(rsq00,rinv00);
1664
1665             /* Calculate table index by multiplying r with table scale and truncate to integer */
1666             rt               = _mm256_mul_ps(r00,vftabscale);
1667             vfitab           = _mm256_cvttps_epi32(rt);
1668             vfeps            = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
1669             /*         AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
1670             vfitab_lo        = _mm256_extractf128_si256(vfitab,0x0);
1671             vfitab_hi        = _mm256_extractf128_si256(vfitab,0x1);
1672             vfitab_lo        = _mm_slli_epi32(vfitab_lo,3);
1673             vfitab_hi        = _mm_slli_epi32(vfitab_hi,3);
1674
1675             /* CUBIC SPLINE TABLE DISPERSION */
1676             Y                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
1677                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
1678             F                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
1679                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
1680             G                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
1681                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
1682             H                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
1683                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
1684             GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
1685             Heps             = _mm256_mul_ps(vfeps,H);
1686             Fp               = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
1687             FF               = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
1688             fvdw6            = _mm256_mul_ps(c6_00,FF);
1689
1690             /* CUBIC SPLINE TABLE REPULSION */
1691             vfitab_lo        = _mm_add_epi32(vfitab_lo,ifour);
1692             vfitab_hi        = _mm_add_epi32(vfitab_hi,ifour);
1693             Y                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
1694                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
1695             F                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
1696                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
1697             G                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
1698                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
1699             H                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
1700                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
1701             GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
1702             Heps             = _mm256_mul_ps(vfeps,H);
1703             Fp               = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
1704             FF               = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
1705             fvdw12           = _mm256_mul_ps(c12_00,FF);
1706             fvdw             = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
1707
1708             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1709
1710             fscal            = fvdw;
1711
1712             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1713
1714             /* Calculate temporary vectorial force */
1715             tx               = _mm256_mul_ps(fscal,dx00);
1716             ty               = _mm256_mul_ps(fscal,dy00);
1717             tz               = _mm256_mul_ps(fscal,dz00);
1718
1719             /* Update vectorial force */
1720             fix0             = _mm256_add_ps(fix0,tx);
1721             fiy0             = _mm256_add_ps(fiy0,ty);
1722             fiz0             = _mm256_add_ps(fiz0,tz);
1723
1724             fjx0             = _mm256_add_ps(fjx0,tx);
1725             fjy0             = _mm256_add_ps(fjy0,ty);
1726             fjz0             = _mm256_add_ps(fjz0,tz);
1727
1728             }
1729
1730             /**************************
1731              * CALCULATE INTERACTIONS *
1732              **************************/
1733
1734             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1735             {
1736
1737             /* REACTION-FIELD ELECTROSTATICS */
1738             felec            = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
1739
1740             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1741
1742             fscal            = felec;
1743
1744             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1745
1746             /* Calculate temporary vectorial force */
1747             tx               = _mm256_mul_ps(fscal,dx11);
1748             ty               = _mm256_mul_ps(fscal,dy11);
1749             tz               = _mm256_mul_ps(fscal,dz11);
1750
1751             /* Update vectorial force */
1752             fix1             = _mm256_add_ps(fix1,tx);
1753             fiy1             = _mm256_add_ps(fiy1,ty);
1754             fiz1             = _mm256_add_ps(fiz1,tz);
1755
1756             fjx1             = _mm256_add_ps(fjx1,tx);
1757             fjy1             = _mm256_add_ps(fjy1,ty);
1758             fjz1             = _mm256_add_ps(fjz1,tz);
1759
1760             }
1761
1762             /**************************
1763              * CALCULATE INTERACTIONS *
1764              **************************/
1765
1766             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1767             {
1768
1769             /* REACTION-FIELD ELECTROSTATICS */
1770             felec            = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
1771
1772             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1773
1774             fscal            = felec;
1775
1776             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1777
1778             /* Calculate temporary vectorial force */
1779             tx               = _mm256_mul_ps(fscal,dx12);
1780             ty               = _mm256_mul_ps(fscal,dy12);
1781             tz               = _mm256_mul_ps(fscal,dz12);
1782
1783             /* Update vectorial force */
1784             fix1             = _mm256_add_ps(fix1,tx);
1785             fiy1             = _mm256_add_ps(fiy1,ty);
1786             fiz1             = _mm256_add_ps(fiz1,tz);
1787
1788             fjx2             = _mm256_add_ps(fjx2,tx);
1789             fjy2             = _mm256_add_ps(fjy2,ty);
1790             fjz2             = _mm256_add_ps(fjz2,tz);
1791
1792             }
1793
1794             /**************************
1795              * CALCULATE INTERACTIONS *
1796              **************************/
1797
1798             if (gmx_mm256_any_lt(rsq13,rcutoff2))
1799             {
1800
1801             /* REACTION-FIELD ELECTROSTATICS */
1802             felec            = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
1803
1804             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
1805
1806             fscal            = felec;
1807
1808             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1809
1810             /* Calculate temporary vectorial force */
1811             tx               = _mm256_mul_ps(fscal,dx13);
1812             ty               = _mm256_mul_ps(fscal,dy13);
1813             tz               = _mm256_mul_ps(fscal,dz13);
1814
1815             /* Update vectorial force */
1816             fix1             = _mm256_add_ps(fix1,tx);
1817             fiy1             = _mm256_add_ps(fiy1,ty);
1818             fiz1             = _mm256_add_ps(fiz1,tz);
1819
1820             fjx3             = _mm256_add_ps(fjx3,tx);
1821             fjy3             = _mm256_add_ps(fjy3,ty);
1822             fjz3             = _mm256_add_ps(fjz3,tz);
1823
1824             }
1825
1826             /**************************
1827              * CALCULATE INTERACTIONS *
1828              **************************/
1829
1830             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1831             {
1832
1833             /* REACTION-FIELD ELECTROSTATICS */
1834             felec            = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
1835
1836             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1837
1838             fscal            = felec;
1839
1840             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1841
1842             /* Calculate temporary vectorial force */
1843             tx               = _mm256_mul_ps(fscal,dx21);
1844             ty               = _mm256_mul_ps(fscal,dy21);
1845             tz               = _mm256_mul_ps(fscal,dz21);
1846
1847             /* Update vectorial force */
1848             fix2             = _mm256_add_ps(fix2,tx);
1849             fiy2             = _mm256_add_ps(fiy2,ty);
1850             fiz2             = _mm256_add_ps(fiz2,tz);
1851
1852             fjx1             = _mm256_add_ps(fjx1,tx);
1853             fjy1             = _mm256_add_ps(fjy1,ty);
1854             fjz1             = _mm256_add_ps(fjz1,tz);
1855
1856             }
1857
1858             /**************************
1859              * CALCULATE INTERACTIONS *
1860              **************************/
1861
1862             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1863             {
1864
1865             /* REACTION-FIELD ELECTROSTATICS */
1866             felec            = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
1867
1868             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1869
1870             fscal            = felec;
1871
1872             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1873
1874             /* Calculate temporary vectorial force */
1875             tx               = _mm256_mul_ps(fscal,dx22);
1876             ty               = _mm256_mul_ps(fscal,dy22);
1877             tz               = _mm256_mul_ps(fscal,dz22);
1878
1879             /* Update vectorial force */
1880             fix2             = _mm256_add_ps(fix2,tx);
1881             fiy2             = _mm256_add_ps(fiy2,ty);
1882             fiz2             = _mm256_add_ps(fiz2,tz);
1883
1884             fjx2             = _mm256_add_ps(fjx2,tx);
1885             fjy2             = _mm256_add_ps(fjy2,ty);
1886             fjz2             = _mm256_add_ps(fjz2,tz);
1887
1888             }
1889
1890             /**************************
1891              * CALCULATE INTERACTIONS *
1892              **************************/
1893
1894             if (gmx_mm256_any_lt(rsq23,rcutoff2))
1895             {
1896
1897             /* REACTION-FIELD ELECTROSTATICS */
1898             felec            = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
1899
1900             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
1901
1902             fscal            = felec;
1903
1904             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1905
1906             /* Calculate temporary vectorial force */
1907             tx               = _mm256_mul_ps(fscal,dx23);
1908             ty               = _mm256_mul_ps(fscal,dy23);
1909             tz               = _mm256_mul_ps(fscal,dz23);
1910
1911             /* Update vectorial force */
1912             fix2             = _mm256_add_ps(fix2,tx);
1913             fiy2             = _mm256_add_ps(fiy2,ty);
1914             fiz2             = _mm256_add_ps(fiz2,tz);
1915
1916             fjx3             = _mm256_add_ps(fjx3,tx);
1917             fjy3             = _mm256_add_ps(fjy3,ty);
1918             fjz3             = _mm256_add_ps(fjz3,tz);
1919
1920             }
1921
1922             /**************************
1923              * CALCULATE INTERACTIONS *
1924              **************************/
1925
1926             if (gmx_mm256_any_lt(rsq31,rcutoff2))
1927             {
1928
1929             /* REACTION-FIELD ELECTROSTATICS */
1930             felec            = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
1931
1932             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
1933
1934             fscal            = felec;
1935
1936             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1937
1938             /* Calculate temporary vectorial force */
1939             tx               = _mm256_mul_ps(fscal,dx31);
1940             ty               = _mm256_mul_ps(fscal,dy31);
1941             tz               = _mm256_mul_ps(fscal,dz31);
1942
1943             /* Update vectorial force */
1944             fix3             = _mm256_add_ps(fix3,tx);
1945             fiy3             = _mm256_add_ps(fiy3,ty);
1946             fiz3             = _mm256_add_ps(fiz3,tz);
1947
1948             fjx1             = _mm256_add_ps(fjx1,tx);
1949             fjy1             = _mm256_add_ps(fjy1,ty);
1950             fjz1             = _mm256_add_ps(fjz1,tz);
1951
1952             }
1953
1954             /**************************
1955              * CALCULATE INTERACTIONS *
1956              **************************/
1957
1958             if (gmx_mm256_any_lt(rsq32,rcutoff2))
1959             {
1960
1961             /* REACTION-FIELD ELECTROSTATICS */
1962             felec            = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
1963
1964             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
1965
1966             fscal            = felec;
1967
1968             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1969
1970             /* Calculate temporary vectorial force */
1971             tx               = _mm256_mul_ps(fscal,dx32);
1972             ty               = _mm256_mul_ps(fscal,dy32);
1973             tz               = _mm256_mul_ps(fscal,dz32);
1974
1975             /* Update vectorial force */
1976             fix3             = _mm256_add_ps(fix3,tx);
1977             fiy3             = _mm256_add_ps(fiy3,ty);
1978             fiz3             = _mm256_add_ps(fiz3,tz);
1979
1980             fjx2             = _mm256_add_ps(fjx2,tx);
1981             fjy2             = _mm256_add_ps(fjy2,ty);
1982             fjz2             = _mm256_add_ps(fjz2,tz);
1983
1984             }
1985
1986             /**************************
1987              * CALCULATE INTERACTIONS *
1988              **************************/
1989
1990             if (gmx_mm256_any_lt(rsq33,rcutoff2))
1991             {
1992
1993             /* REACTION-FIELD ELECTROSTATICS */
1994             felec            = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
1995
1996             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
1997
1998             fscal            = felec;
1999
2000             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2001
2002             /* Calculate temporary vectorial force */
2003             tx               = _mm256_mul_ps(fscal,dx33);
2004             ty               = _mm256_mul_ps(fscal,dy33);
2005             tz               = _mm256_mul_ps(fscal,dz33);
2006
2007             /* Update vectorial force */
2008             fix3             = _mm256_add_ps(fix3,tx);
2009             fiy3             = _mm256_add_ps(fiy3,ty);
2010             fiz3             = _mm256_add_ps(fiz3,tz);
2011
2012             fjx3             = _mm256_add_ps(fjx3,tx);
2013             fjy3             = _mm256_add_ps(fjy3,ty);
2014             fjz3             = _mm256_add_ps(fjz3,tz);
2015
2016             }
2017
2018             fjptrA             = f+j_coord_offsetA;
2019             fjptrB             = f+j_coord_offsetB;
2020             fjptrC             = f+j_coord_offsetC;
2021             fjptrD             = f+j_coord_offsetD;
2022             fjptrE             = f+j_coord_offsetE;
2023             fjptrF             = f+j_coord_offsetF;
2024             fjptrG             = f+j_coord_offsetG;
2025             fjptrH             = f+j_coord_offsetH;
2026
2027             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2028                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2029                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2030
2031             /* Inner loop uses 324 flops */
2032         }
2033
2034         if(jidx<j_index_end)
2035         {
2036
2037             /* Get j neighbor index, and coordinate index */
2038             jnrlistA         = jjnr[jidx];
2039             jnrlistB         = jjnr[jidx+1];
2040             jnrlistC         = jjnr[jidx+2];
2041             jnrlistD         = jjnr[jidx+3];
2042             jnrlistE         = jjnr[jidx+4];
2043             jnrlistF         = jjnr[jidx+5];
2044             jnrlistG         = jjnr[jidx+6];
2045             jnrlistH         = jjnr[jidx+7];
2046             /* Sign of each element will be negative for non-real atoms.
2047              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2048              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
2049              */
2050             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
2051                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
2052                                             
2053             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
2054             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
2055             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
2056             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
2057             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
2058             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
2059             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
2060             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
2061             j_coord_offsetA  = DIM*jnrA;
2062             j_coord_offsetB  = DIM*jnrB;
2063             j_coord_offsetC  = DIM*jnrC;
2064             j_coord_offsetD  = DIM*jnrD;
2065             j_coord_offsetE  = DIM*jnrE;
2066             j_coord_offsetF  = DIM*jnrF;
2067             j_coord_offsetG  = DIM*jnrG;
2068             j_coord_offsetH  = DIM*jnrH;
2069
2070             /* load j atom coordinates */
2071             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
2072                                                  x+j_coord_offsetC,x+j_coord_offsetD,
2073                                                  x+j_coord_offsetE,x+j_coord_offsetF,
2074                                                  x+j_coord_offsetG,x+j_coord_offsetH,
2075                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
2076                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
2077
2078             /* Calculate displacement vector */
2079             dx00             = _mm256_sub_ps(ix0,jx0);
2080             dy00             = _mm256_sub_ps(iy0,jy0);
2081             dz00             = _mm256_sub_ps(iz0,jz0);
2082             dx11             = _mm256_sub_ps(ix1,jx1);
2083             dy11             = _mm256_sub_ps(iy1,jy1);
2084             dz11             = _mm256_sub_ps(iz1,jz1);
2085             dx12             = _mm256_sub_ps(ix1,jx2);
2086             dy12             = _mm256_sub_ps(iy1,jy2);
2087             dz12             = _mm256_sub_ps(iz1,jz2);
2088             dx13             = _mm256_sub_ps(ix1,jx3);
2089             dy13             = _mm256_sub_ps(iy1,jy3);
2090             dz13             = _mm256_sub_ps(iz1,jz3);
2091             dx21             = _mm256_sub_ps(ix2,jx1);
2092             dy21             = _mm256_sub_ps(iy2,jy1);
2093             dz21             = _mm256_sub_ps(iz2,jz1);
2094             dx22             = _mm256_sub_ps(ix2,jx2);
2095             dy22             = _mm256_sub_ps(iy2,jy2);
2096             dz22             = _mm256_sub_ps(iz2,jz2);
2097             dx23             = _mm256_sub_ps(ix2,jx3);
2098             dy23             = _mm256_sub_ps(iy2,jy3);
2099             dz23             = _mm256_sub_ps(iz2,jz3);
2100             dx31             = _mm256_sub_ps(ix3,jx1);
2101             dy31             = _mm256_sub_ps(iy3,jy1);
2102             dz31             = _mm256_sub_ps(iz3,jz1);
2103             dx32             = _mm256_sub_ps(ix3,jx2);
2104             dy32             = _mm256_sub_ps(iy3,jy2);
2105             dz32             = _mm256_sub_ps(iz3,jz2);
2106             dx33             = _mm256_sub_ps(ix3,jx3);
2107             dy33             = _mm256_sub_ps(iy3,jy3);
2108             dz33             = _mm256_sub_ps(iz3,jz3);
2109
2110             /* Calculate squared distance and things based on it */
2111             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
2112             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2113             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2114             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
2115             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2116             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2117             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
2118             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
2119             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
2120             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
2121
2122             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
2123             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
2124             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
2125             rinv13           = gmx_mm256_invsqrt_ps(rsq13);
2126             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
2127             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
2128             rinv23           = gmx_mm256_invsqrt_ps(rsq23);
2129             rinv31           = gmx_mm256_invsqrt_ps(rsq31);
2130             rinv32           = gmx_mm256_invsqrt_ps(rsq32);
2131             rinv33           = gmx_mm256_invsqrt_ps(rsq33);
2132
2133             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
2134             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
2135             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
2136             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
2137             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
2138             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
2139             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
2140             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
2141             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
2142
2143             fjx0             = _mm256_setzero_ps();
2144             fjy0             = _mm256_setzero_ps();
2145             fjz0             = _mm256_setzero_ps();
2146             fjx1             = _mm256_setzero_ps();
2147             fjy1             = _mm256_setzero_ps();
2148             fjz1             = _mm256_setzero_ps();
2149             fjx2             = _mm256_setzero_ps();
2150             fjy2             = _mm256_setzero_ps();
2151             fjz2             = _mm256_setzero_ps();
2152             fjx3             = _mm256_setzero_ps();
2153             fjy3             = _mm256_setzero_ps();
2154             fjz3             = _mm256_setzero_ps();
2155
2156             /**************************
2157              * CALCULATE INTERACTIONS *
2158              **************************/
2159
2160             if (gmx_mm256_any_lt(rsq00,rcutoff2))
2161             {
2162
2163             r00              = _mm256_mul_ps(rsq00,rinv00);
2164             r00              = _mm256_andnot_ps(dummy_mask,r00);
2165
2166             /* Calculate table index by multiplying r with table scale and truncate to integer */
2167             rt               = _mm256_mul_ps(r00,vftabscale);
2168             vfitab           = _mm256_cvttps_epi32(rt);
2169             vfeps            = _mm256_sub_ps(rt,_mm256_round_ps(rt, _MM_FROUND_FLOOR));
2170             /*         AVX1 does not support 256-bit integer operations, so now we go to 128-bit mode... */
2171             vfitab_lo        = _mm256_extractf128_si256(vfitab,0x0);
2172             vfitab_hi        = _mm256_extractf128_si256(vfitab,0x1);
2173             vfitab_lo        = _mm_slli_epi32(vfitab_lo,3);
2174             vfitab_hi        = _mm_slli_epi32(vfitab_hi,3);
2175
2176             /* CUBIC SPLINE TABLE DISPERSION */
2177             Y                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
2178                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
2179             F                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
2180                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
2181             G                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
2182                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
2183             H                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
2184                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
2185             GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
2186             Heps             = _mm256_mul_ps(vfeps,H);
2187             Fp               = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
2188             FF               = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
2189             fvdw6            = _mm256_mul_ps(c6_00,FF);
2190
2191             /* CUBIC SPLINE TABLE REPULSION */
2192             vfitab_lo        = _mm_add_epi32(vfitab_lo,ifour);
2193             vfitab_hi        = _mm_add_epi32(vfitab_hi,ifour);
2194             Y                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,0)),
2195                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,0)));
2196             F                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,1)),
2197                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,1)));
2198             G                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,2)),
2199                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,2)));
2200             H                = gmx_mm256_set_m128(_mm_load_ps(vftab + _mm_extract_epi32(vfitab_hi,3)),
2201                                                   _mm_load_ps(vftab + _mm_extract_epi32(vfitab_lo,3)));
2202             GMX_MM256_HALFTRANSPOSE4_PS(Y,F,G,H);
2203             Heps             = _mm256_mul_ps(vfeps,H);
2204             Fp               = _mm256_add_ps(F,_mm256_mul_ps(vfeps,_mm256_add_ps(G,Heps)));
2205             FF               = _mm256_add_ps(Fp,_mm256_mul_ps(vfeps,_mm256_add_ps(G,_mm256_add_ps(Heps,Heps))));
2206             fvdw12           = _mm256_mul_ps(c12_00,FF);
2207             fvdw             = _mm256_xor_ps(signbit,_mm256_mul_ps(_mm256_add_ps(fvdw6,fvdw12),_mm256_mul_ps(vftabscale,rinv00)));
2208
2209             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
2210
2211             fscal            = fvdw;
2212
2213             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2214
2215             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2216
2217             /* Calculate temporary vectorial force */
2218             tx               = _mm256_mul_ps(fscal,dx00);
2219             ty               = _mm256_mul_ps(fscal,dy00);
2220             tz               = _mm256_mul_ps(fscal,dz00);
2221
2222             /* Update vectorial force */
2223             fix0             = _mm256_add_ps(fix0,tx);
2224             fiy0             = _mm256_add_ps(fiy0,ty);
2225             fiz0             = _mm256_add_ps(fiz0,tz);
2226
2227             fjx0             = _mm256_add_ps(fjx0,tx);
2228             fjy0             = _mm256_add_ps(fjy0,ty);
2229             fjz0             = _mm256_add_ps(fjz0,tz);
2230
2231             }
2232
2233             /**************************
2234              * CALCULATE INTERACTIONS *
2235              **************************/
2236
2237             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2238             {
2239
2240             /* REACTION-FIELD ELECTROSTATICS */
2241             felec            = _mm256_mul_ps(qq11,_mm256_sub_ps(_mm256_mul_ps(rinv11,rinvsq11),krf2));
2242
2243             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2244
2245             fscal            = felec;
2246
2247             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2248
2249             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2250
2251             /* Calculate temporary vectorial force */
2252             tx               = _mm256_mul_ps(fscal,dx11);
2253             ty               = _mm256_mul_ps(fscal,dy11);
2254             tz               = _mm256_mul_ps(fscal,dz11);
2255
2256             /* Update vectorial force */
2257             fix1             = _mm256_add_ps(fix1,tx);
2258             fiy1             = _mm256_add_ps(fiy1,ty);
2259             fiz1             = _mm256_add_ps(fiz1,tz);
2260
2261             fjx1             = _mm256_add_ps(fjx1,tx);
2262             fjy1             = _mm256_add_ps(fjy1,ty);
2263             fjz1             = _mm256_add_ps(fjz1,tz);
2264
2265             }
2266
2267             /**************************
2268              * CALCULATE INTERACTIONS *
2269              **************************/
2270
2271             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2272             {
2273
2274             /* REACTION-FIELD ELECTROSTATICS */
2275             felec            = _mm256_mul_ps(qq12,_mm256_sub_ps(_mm256_mul_ps(rinv12,rinvsq12),krf2));
2276
2277             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2278
2279             fscal            = felec;
2280
2281             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2282
2283             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2284
2285             /* Calculate temporary vectorial force */
2286             tx               = _mm256_mul_ps(fscal,dx12);
2287             ty               = _mm256_mul_ps(fscal,dy12);
2288             tz               = _mm256_mul_ps(fscal,dz12);
2289
2290             /* Update vectorial force */
2291             fix1             = _mm256_add_ps(fix1,tx);
2292             fiy1             = _mm256_add_ps(fiy1,ty);
2293             fiz1             = _mm256_add_ps(fiz1,tz);
2294
2295             fjx2             = _mm256_add_ps(fjx2,tx);
2296             fjy2             = _mm256_add_ps(fjy2,ty);
2297             fjz2             = _mm256_add_ps(fjz2,tz);
2298
2299             }
2300
2301             /**************************
2302              * CALCULATE INTERACTIONS *
2303              **************************/
2304
2305             if (gmx_mm256_any_lt(rsq13,rcutoff2))
2306             {
2307
2308             /* REACTION-FIELD ELECTROSTATICS */
2309             felec            = _mm256_mul_ps(qq13,_mm256_sub_ps(_mm256_mul_ps(rinv13,rinvsq13),krf2));
2310
2311             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
2312
2313             fscal            = felec;
2314
2315             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2316
2317             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2318
2319             /* Calculate temporary vectorial force */
2320             tx               = _mm256_mul_ps(fscal,dx13);
2321             ty               = _mm256_mul_ps(fscal,dy13);
2322             tz               = _mm256_mul_ps(fscal,dz13);
2323
2324             /* Update vectorial force */
2325             fix1             = _mm256_add_ps(fix1,tx);
2326             fiy1             = _mm256_add_ps(fiy1,ty);
2327             fiz1             = _mm256_add_ps(fiz1,tz);
2328
2329             fjx3             = _mm256_add_ps(fjx3,tx);
2330             fjy3             = _mm256_add_ps(fjy3,ty);
2331             fjz3             = _mm256_add_ps(fjz3,tz);
2332
2333             }
2334
2335             /**************************
2336              * CALCULATE INTERACTIONS *
2337              **************************/
2338
2339             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2340             {
2341
2342             /* REACTION-FIELD ELECTROSTATICS */
2343             felec            = _mm256_mul_ps(qq21,_mm256_sub_ps(_mm256_mul_ps(rinv21,rinvsq21),krf2));
2344
2345             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2346
2347             fscal            = felec;
2348
2349             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2350
2351             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2352
2353             /* Calculate temporary vectorial force */
2354             tx               = _mm256_mul_ps(fscal,dx21);
2355             ty               = _mm256_mul_ps(fscal,dy21);
2356             tz               = _mm256_mul_ps(fscal,dz21);
2357
2358             /* Update vectorial force */
2359             fix2             = _mm256_add_ps(fix2,tx);
2360             fiy2             = _mm256_add_ps(fiy2,ty);
2361             fiz2             = _mm256_add_ps(fiz2,tz);
2362
2363             fjx1             = _mm256_add_ps(fjx1,tx);
2364             fjy1             = _mm256_add_ps(fjy1,ty);
2365             fjz1             = _mm256_add_ps(fjz1,tz);
2366
2367             }
2368
2369             /**************************
2370              * CALCULATE INTERACTIONS *
2371              **************************/
2372
2373             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2374             {
2375
2376             /* REACTION-FIELD ELECTROSTATICS */
2377             felec            = _mm256_mul_ps(qq22,_mm256_sub_ps(_mm256_mul_ps(rinv22,rinvsq22),krf2));
2378
2379             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2380
2381             fscal            = felec;
2382
2383             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2384
2385             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2386
2387             /* Calculate temporary vectorial force */
2388             tx               = _mm256_mul_ps(fscal,dx22);
2389             ty               = _mm256_mul_ps(fscal,dy22);
2390             tz               = _mm256_mul_ps(fscal,dz22);
2391
2392             /* Update vectorial force */
2393             fix2             = _mm256_add_ps(fix2,tx);
2394             fiy2             = _mm256_add_ps(fiy2,ty);
2395             fiz2             = _mm256_add_ps(fiz2,tz);
2396
2397             fjx2             = _mm256_add_ps(fjx2,tx);
2398             fjy2             = _mm256_add_ps(fjy2,ty);
2399             fjz2             = _mm256_add_ps(fjz2,tz);
2400
2401             }
2402
2403             /**************************
2404              * CALCULATE INTERACTIONS *
2405              **************************/
2406
2407             if (gmx_mm256_any_lt(rsq23,rcutoff2))
2408             {
2409
2410             /* REACTION-FIELD ELECTROSTATICS */
2411             felec            = _mm256_mul_ps(qq23,_mm256_sub_ps(_mm256_mul_ps(rinv23,rinvsq23),krf2));
2412
2413             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
2414
2415             fscal            = felec;
2416
2417             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2418
2419             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2420
2421             /* Calculate temporary vectorial force */
2422             tx               = _mm256_mul_ps(fscal,dx23);
2423             ty               = _mm256_mul_ps(fscal,dy23);
2424             tz               = _mm256_mul_ps(fscal,dz23);
2425
2426             /* Update vectorial force */
2427             fix2             = _mm256_add_ps(fix2,tx);
2428             fiy2             = _mm256_add_ps(fiy2,ty);
2429             fiz2             = _mm256_add_ps(fiz2,tz);
2430
2431             fjx3             = _mm256_add_ps(fjx3,tx);
2432             fjy3             = _mm256_add_ps(fjy3,ty);
2433             fjz3             = _mm256_add_ps(fjz3,tz);
2434
2435             }
2436
2437             /**************************
2438              * CALCULATE INTERACTIONS *
2439              **************************/
2440
2441             if (gmx_mm256_any_lt(rsq31,rcutoff2))
2442             {
2443
2444             /* REACTION-FIELD ELECTROSTATICS */
2445             felec            = _mm256_mul_ps(qq31,_mm256_sub_ps(_mm256_mul_ps(rinv31,rinvsq31),krf2));
2446
2447             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
2448
2449             fscal            = felec;
2450
2451             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2452
2453             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2454
2455             /* Calculate temporary vectorial force */
2456             tx               = _mm256_mul_ps(fscal,dx31);
2457             ty               = _mm256_mul_ps(fscal,dy31);
2458             tz               = _mm256_mul_ps(fscal,dz31);
2459
2460             /* Update vectorial force */
2461             fix3             = _mm256_add_ps(fix3,tx);
2462             fiy3             = _mm256_add_ps(fiy3,ty);
2463             fiz3             = _mm256_add_ps(fiz3,tz);
2464
2465             fjx1             = _mm256_add_ps(fjx1,tx);
2466             fjy1             = _mm256_add_ps(fjy1,ty);
2467             fjz1             = _mm256_add_ps(fjz1,tz);
2468
2469             }
2470
2471             /**************************
2472              * CALCULATE INTERACTIONS *
2473              **************************/
2474
2475             if (gmx_mm256_any_lt(rsq32,rcutoff2))
2476             {
2477
2478             /* REACTION-FIELD ELECTROSTATICS */
2479             felec            = _mm256_mul_ps(qq32,_mm256_sub_ps(_mm256_mul_ps(rinv32,rinvsq32),krf2));
2480
2481             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
2482
2483             fscal            = felec;
2484
2485             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2486
2487             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2488
2489             /* Calculate temporary vectorial force */
2490             tx               = _mm256_mul_ps(fscal,dx32);
2491             ty               = _mm256_mul_ps(fscal,dy32);
2492             tz               = _mm256_mul_ps(fscal,dz32);
2493
2494             /* Update vectorial force */
2495             fix3             = _mm256_add_ps(fix3,tx);
2496             fiy3             = _mm256_add_ps(fiy3,ty);
2497             fiz3             = _mm256_add_ps(fiz3,tz);
2498
2499             fjx2             = _mm256_add_ps(fjx2,tx);
2500             fjy2             = _mm256_add_ps(fjy2,ty);
2501             fjz2             = _mm256_add_ps(fjz2,tz);
2502
2503             }
2504
2505             /**************************
2506              * CALCULATE INTERACTIONS *
2507              **************************/
2508
2509             if (gmx_mm256_any_lt(rsq33,rcutoff2))
2510             {
2511
2512             /* REACTION-FIELD ELECTROSTATICS */
2513             felec            = _mm256_mul_ps(qq33,_mm256_sub_ps(_mm256_mul_ps(rinv33,rinvsq33),krf2));
2514
2515             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
2516
2517             fscal            = felec;
2518
2519             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2520
2521             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2522
2523             /* Calculate temporary vectorial force */
2524             tx               = _mm256_mul_ps(fscal,dx33);
2525             ty               = _mm256_mul_ps(fscal,dy33);
2526             tz               = _mm256_mul_ps(fscal,dz33);
2527
2528             /* Update vectorial force */
2529             fix3             = _mm256_add_ps(fix3,tx);
2530             fiy3             = _mm256_add_ps(fiy3,ty);
2531             fiz3             = _mm256_add_ps(fiz3,tz);
2532
2533             fjx3             = _mm256_add_ps(fjx3,tx);
2534             fjy3             = _mm256_add_ps(fjy3,ty);
2535             fjz3             = _mm256_add_ps(fjz3,tz);
2536
2537             }
2538
2539             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2540             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2541             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2542             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2543             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2544             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2545             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2546             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2547
2548             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2549                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2550                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2551
2552             /* Inner loop uses 325 flops */
2553         }
2554
2555         /* End of innermost loop */
2556
2557         gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
2558                                                  f+i_coord_offset,fshift+i_shift_offset);
2559
2560         /* Increment number of inner iterations */
2561         inneriter                  += j_index_end - j_index_start;
2562
2563         /* Outer loop uses 24 flops */
2564     }
2565
2566     /* Increment number of outer iterations */
2567     outeriter        += nri;
2568
2569     /* Update outer/inner flops */
2570
2571     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*325);
2572 }