Use full path for legacyheaders
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "config.h"
39
40 #include <math.h>
41
42 #include "../nb_kernel.h"
43 #include "gromacs/legacyheaders/types/simple.h"
44 #include "gromacs/math/vec.h"
45 #include "gromacs/legacyheaders/nrnb.h"
46
47 #include "gromacs/simd/math_x86_avx_256_single.h"
48 #include "kernelutil_x86_avx_256_single.h"
49
50 /*
51  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_VF_avx_256_single
52  * Electrostatics interaction: Ewald
53  * VdW interaction:            LJEwald
54  * Geometry:                   Water4-Water4
55  * Calculate force/pot:        PotentialAndForce
56  */
57 void
58 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_VF_avx_256_single
59                     (t_nblist                    * gmx_restrict       nlist,
60                      rvec                        * gmx_restrict          xx,
61                      rvec                        * gmx_restrict          ff,
62                      t_forcerec                  * gmx_restrict          fr,
63                      t_mdatoms                   * gmx_restrict     mdatoms,
64                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65                      t_nrnb                      * gmx_restrict        nrnb)
66 {
67     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
68      * just 0 for non-waters.
69      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
70      * jnr indices corresponding to data put in the four positions in the SIMD register.
71      */
72     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
73     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74     int              jnrA,jnrB,jnrC,jnrD;
75     int              jnrE,jnrF,jnrG,jnrH;
76     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
77     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
78     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
79     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
80     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
81     real             rcutoff_scalar;
82     real             *shiftvec,*fshift,*x,*f;
83     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
84     real             scratch[4*DIM];
85     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86     real *           vdwioffsetptr0;
87     real *           vdwgridioffsetptr0;
88     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
89     real *           vdwioffsetptr1;
90     real *           vdwgridioffsetptr1;
91     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
92     real *           vdwioffsetptr2;
93     real *           vdwgridioffsetptr2;
94     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
95     real *           vdwioffsetptr3;
96     real *           vdwgridioffsetptr3;
97     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
98     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
99     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
100     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
101     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
102     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
103     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
104     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
105     __m256           jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
106     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
107     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
108     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
109     __m256           dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
110     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
111     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
112     __m256           dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
113     __m256           dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
114     __m256           dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
115     __m256           dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
116     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
117     real             *charge;
118     int              nvdwtype;
119     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
120     int              *vdwtype;
121     real             *vdwparam;
122     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
123     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
124     __m256           c6grid_00;
125     __m256           c6grid_11;
126     __m256           c6grid_12;
127     __m256           c6grid_13;
128     __m256           c6grid_21;
129     __m256           c6grid_22;
130     __m256           c6grid_23;
131     __m256           c6grid_31;
132     __m256           c6grid_32;
133     __m256           c6grid_33;
134     real             *vdwgridparam;
135     __m256           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
136     __m256           one_half  = _mm256_set1_ps(0.5);
137     __m256           minus_one = _mm256_set1_ps(-1.0);
138     __m256i          ewitab;
139     __m128i          ewitab_lo,ewitab_hi;
140     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
141     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
142     real             *ewtab;
143     __m256           dummy_mask,cutoff_mask;
144     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
145     __m256           one     = _mm256_set1_ps(1.0);
146     __m256           two     = _mm256_set1_ps(2.0);
147     x                = xx[0];
148     f                = ff[0];
149
150     nri              = nlist->nri;
151     iinr             = nlist->iinr;
152     jindex           = nlist->jindex;
153     jjnr             = nlist->jjnr;
154     shiftidx         = nlist->shift;
155     gid              = nlist->gid;
156     shiftvec         = fr->shift_vec[0];
157     fshift           = fr->fshift[0];
158     facel            = _mm256_set1_ps(fr->epsfac);
159     charge           = mdatoms->chargeA;
160     nvdwtype         = fr->ntype;
161     vdwparam         = fr->nbfp;
162     vdwtype          = mdatoms->typeA;
163     vdwgridparam     = fr->ljpme_c6grid;
164     sh_lj_ewald      = _mm256_set1_ps(fr->ic->sh_lj_ewald);
165     ewclj            = _mm256_set1_ps(fr->ewaldcoeff_lj);
166     ewclj2           = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
167
168     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
169     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
170     beta2            = _mm256_mul_ps(beta,beta);
171     beta3            = _mm256_mul_ps(beta,beta2);
172
173     ewtab            = fr->ic->tabq_coul_FDV0;
174     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
175     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
176
177     /* Setup water-specific parameters */
178     inr              = nlist->iinr[0];
179     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
180     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
181     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
182     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
183     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
184
185     jq1              = _mm256_set1_ps(charge[inr+1]);
186     jq2              = _mm256_set1_ps(charge[inr+2]);
187     jq3              = _mm256_set1_ps(charge[inr+3]);
188     vdwjidx0A        = 2*vdwtype[inr+0];
189     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
190     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
191     c6grid_00        = _mm256_set1_ps(vdwgridioffsetptr0[vdwjidx0A]);
192     qq11             = _mm256_mul_ps(iq1,jq1);
193     qq12             = _mm256_mul_ps(iq1,jq2);
194     qq13             = _mm256_mul_ps(iq1,jq3);
195     qq21             = _mm256_mul_ps(iq2,jq1);
196     qq22             = _mm256_mul_ps(iq2,jq2);
197     qq23             = _mm256_mul_ps(iq2,jq3);
198     qq31             = _mm256_mul_ps(iq3,jq1);
199     qq32             = _mm256_mul_ps(iq3,jq2);
200     qq33             = _mm256_mul_ps(iq3,jq3);
201
202     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
203     rcutoff_scalar   = fr->rcoulomb;
204     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
205     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
206
207     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
208     rvdw             = _mm256_set1_ps(fr->rvdw);
209
210     /* Avoid stupid compiler warnings */
211     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
212     j_coord_offsetA = 0;
213     j_coord_offsetB = 0;
214     j_coord_offsetC = 0;
215     j_coord_offsetD = 0;
216     j_coord_offsetE = 0;
217     j_coord_offsetF = 0;
218     j_coord_offsetG = 0;
219     j_coord_offsetH = 0;
220
221     outeriter        = 0;
222     inneriter        = 0;
223
224     for(iidx=0;iidx<4*DIM;iidx++)
225     {
226         scratch[iidx] = 0.0;
227     }
228
229     /* Start outer loop over neighborlists */
230     for(iidx=0; iidx<nri; iidx++)
231     {
232         /* Load shift vector for this list */
233         i_shift_offset   = DIM*shiftidx[iidx];
234
235         /* Load limits for loop over neighbors */
236         j_index_start    = jindex[iidx];
237         j_index_end      = jindex[iidx+1];
238
239         /* Get outer coordinate index */
240         inr              = iinr[iidx];
241         i_coord_offset   = DIM*inr;
242
243         /* Load i particle coords and add shift vector */
244         gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
245                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
246
247         fix0             = _mm256_setzero_ps();
248         fiy0             = _mm256_setzero_ps();
249         fiz0             = _mm256_setzero_ps();
250         fix1             = _mm256_setzero_ps();
251         fiy1             = _mm256_setzero_ps();
252         fiz1             = _mm256_setzero_ps();
253         fix2             = _mm256_setzero_ps();
254         fiy2             = _mm256_setzero_ps();
255         fiz2             = _mm256_setzero_ps();
256         fix3             = _mm256_setzero_ps();
257         fiy3             = _mm256_setzero_ps();
258         fiz3             = _mm256_setzero_ps();
259
260         /* Reset potential sums */
261         velecsum         = _mm256_setzero_ps();
262         vvdwsum          = _mm256_setzero_ps();
263
264         /* Start inner kernel loop */
265         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
266         {
267
268             /* Get j neighbor index, and coordinate index */
269             jnrA             = jjnr[jidx];
270             jnrB             = jjnr[jidx+1];
271             jnrC             = jjnr[jidx+2];
272             jnrD             = jjnr[jidx+3];
273             jnrE             = jjnr[jidx+4];
274             jnrF             = jjnr[jidx+5];
275             jnrG             = jjnr[jidx+6];
276             jnrH             = jjnr[jidx+7];
277             j_coord_offsetA  = DIM*jnrA;
278             j_coord_offsetB  = DIM*jnrB;
279             j_coord_offsetC  = DIM*jnrC;
280             j_coord_offsetD  = DIM*jnrD;
281             j_coord_offsetE  = DIM*jnrE;
282             j_coord_offsetF  = DIM*jnrF;
283             j_coord_offsetG  = DIM*jnrG;
284             j_coord_offsetH  = DIM*jnrH;
285
286             /* load j atom coordinates */
287             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
288                                                  x+j_coord_offsetC,x+j_coord_offsetD,
289                                                  x+j_coord_offsetE,x+j_coord_offsetF,
290                                                  x+j_coord_offsetG,x+j_coord_offsetH,
291                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
292                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
293
294             /* Calculate displacement vector */
295             dx00             = _mm256_sub_ps(ix0,jx0);
296             dy00             = _mm256_sub_ps(iy0,jy0);
297             dz00             = _mm256_sub_ps(iz0,jz0);
298             dx11             = _mm256_sub_ps(ix1,jx1);
299             dy11             = _mm256_sub_ps(iy1,jy1);
300             dz11             = _mm256_sub_ps(iz1,jz1);
301             dx12             = _mm256_sub_ps(ix1,jx2);
302             dy12             = _mm256_sub_ps(iy1,jy2);
303             dz12             = _mm256_sub_ps(iz1,jz2);
304             dx13             = _mm256_sub_ps(ix1,jx3);
305             dy13             = _mm256_sub_ps(iy1,jy3);
306             dz13             = _mm256_sub_ps(iz1,jz3);
307             dx21             = _mm256_sub_ps(ix2,jx1);
308             dy21             = _mm256_sub_ps(iy2,jy1);
309             dz21             = _mm256_sub_ps(iz2,jz1);
310             dx22             = _mm256_sub_ps(ix2,jx2);
311             dy22             = _mm256_sub_ps(iy2,jy2);
312             dz22             = _mm256_sub_ps(iz2,jz2);
313             dx23             = _mm256_sub_ps(ix2,jx3);
314             dy23             = _mm256_sub_ps(iy2,jy3);
315             dz23             = _mm256_sub_ps(iz2,jz3);
316             dx31             = _mm256_sub_ps(ix3,jx1);
317             dy31             = _mm256_sub_ps(iy3,jy1);
318             dz31             = _mm256_sub_ps(iz3,jz1);
319             dx32             = _mm256_sub_ps(ix3,jx2);
320             dy32             = _mm256_sub_ps(iy3,jy2);
321             dz32             = _mm256_sub_ps(iz3,jz2);
322             dx33             = _mm256_sub_ps(ix3,jx3);
323             dy33             = _mm256_sub_ps(iy3,jy3);
324             dz33             = _mm256_sub_ps(iz3,jz3);
325
326             /* Calculate squared distance and things based on it */
327             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
328             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
329             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
330             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
331             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
332             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
333             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
334             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
335             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
336             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
337
338             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
339             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
340             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
341             rinv13           = gmx_mm256_invsqrt_ps(rsq13);
342             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
343             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
344             rinv23           = gmx_mm256_invsqrt_ps(rsq23);
345             rinv31           = gmx_mm256_invsqrt_ps(rsq31);
346             rinv32           = gmx_mm256_invsqrt_ps(rsq32);
347             rinv33           = gmx_mm256_invsqrt_ps(rsq33);
348
349             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
350             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
351             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
352             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
353             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
354             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
355             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
356             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
357             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
358             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
359
360             fjx0             = _mm256_setzero_ps();
361             fjy0             = _mm256_setzero_ps();
362             fjz0             = _mm256_setzero_ps();
363             fjx1             = _mm256_setzero_ps();
364             fjy1             = _mm256_setzero_ps();
365             fjz1             = _mm256_setzero_ps();
366             fjx2             = _mm256_setzero_ps();
367             fjy2             = _mm256_setzero_ps();
368             fjz2             = _mm256_setzero_ps();
369             fjx3             = _mm256_setzero_ps();
370             fjy3             = _mm256_setzero_ps();
371             fjz3             = _mm256_setzero_ps();
372
373             /**************************
374              * CALCULATE INTERACTIONS *
375              **************************/
376
377             if (gmx_mm256_any_lt(rsq00,rcutoff2))
378             {
379
380             r00              = _mm256_mul_ps(rsq00,rinv00);
381
382             /* Analytical LJ-PME */
383             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
384             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
385             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
386             exponent         = gmx_simd_exp_r(ewcljrsq);
387             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
388             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
389             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
390             vvdw6            = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
391             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
392             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
393                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
394             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
395             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
396
397             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
398
399             /* Update potential sum for this i atom from the interaction with this j atom. */
400             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
401             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
402
403             fscal            = fvdw;
404
405             fscal            = _mm256_and_ps(fscal,cutoff_mask);
406
407             /* Calculate temporary vectorial force */
408             tx               = _mm256_mul_ps(fscal,dx00);
409             ty               = _mm256_mul_ps(fscal,dy00);
410             tz               = _mm256_mul_ps(fscal,dz00);
411
412             /* Update vectorial force */
413             fix0             = _mm256_add_ps(fix0,tx);
414             fiy0             = _mm256_add_ps(fiy0,ty);
415             fiz0             = _mm256_add_ps(fiz0,tz);
416
417             fjx0             = _mm256_add_ps(fjx0,tx);
418             fjy0             = _mm256_add_ps(fjy0,ty);
419             fjz0             = _mm256_add_ps(fjz0,tz);
420
421             }
422
423             /**************************
424              * CALCULATE INTERACTIONS *
425              **************************/
426
427             if (gmx_mm256_any_lt(rsq11,rcutoff2))
428             {
429
430             r11              = _mm256_mul_ps(rsq11,rinv11);
431
432             /* EWALD ELECTROSTATICS */
433             
434             /* Analytical PME correction */
435             zeta2            = _mm256_mul_ps(beta2,rsq11);
436             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
437             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
438             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
439             felec            = _mm256_mul_ps(qq11,felec);
440             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
441             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
442             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
443             velec            = _mm256_mul_ps(qq11,velec);
444             
445             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
446
447             /* Update potential sum for this i atom from the interaction with this j atom. */
448             velec            = _mm256_and_ps(velec,cutoff_mask);
449             velecsum         = _mm256_add_ps(velecsum,velec);
450
451             fscal            = felec;
452
453             fscal            = _mm256_and_ps(fscal,cutoff_mask);
454
455             /* Calculate temporary vectorial force */
456             tx               = _mm256_mul_ps(fscal,dx11);
457             ty               = _mm256_mul_ps(fscal,dy11);
458             tz               = _mm256_mul_ps(fscal,dz11);
459
460             /* Update vectorial force */
461             fix1             = _mm256_add_ps(fix1,tx);
462             fiy1             = _mm256_add_ps(fiy1,ty);
463             fiz1             = _mm256_add_ps(fiz1,tz);
464
465             fjx1             = _mm256_add_ps(fjx1,tx);
466             fjy1             = _mm256_add_ps(fjy1,ty);
467             fjz1             = _mm256_add_ps(fjz1,tz);
468
469             }
470
471             /**************************
472              * CALCULATE INTERACTIONS *
473              **************************/
474
475             if (gmx_mm256_any_lt(rsq12,rcutoff2))
476             {
477
478             r12              = _mm256_mul_ps(rsq12,rinv12);
479
480             /* EWALD ELECTROSTATICS */
481             
482             /* Analytical PME correction */
483             zeta2            = _mm256_mul_ps(beta2,rsq12);
484             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
485             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
486             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
487             felec            = _mm256_mul_ps(qq12,felec);
488             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
489             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
490             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
491             velec            = _mm256_mul_ps(qq12,velec);
492             
493             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
494
495             /* Update potential sum for this i atom from the interaction with this j atom. */
496             velec            = _mm256_and_ps(velec,cutoff_mask);
497             velecsum         = _mm256_add_ps(velecsum,velec);
498
499             fscal            = felec;
500
501             fscal            = _mm256_and_ps(fscal,cutoff_mask);
502
503             /* Calculate temporary vectorial force */
504             tx               = _mm256_mul_ps(fscal,dx12);
505             ty               = _mm256_mul_ps(fscal,dy12);
506             tz               = _mm256_mul_ps(fscal,dz12);
507
508             /* Update vectorial force */
509             fix1             = _mm256_add_ps(fix1,tx);
510             fiy1             = _mm256_add_ps(fiy1,ty);
511             fiz1             = _mm256_add_ps(fiz1,tz);
512
513             fjx2             = _mm256_add_ps(fjx2,tx);
514             fjy2             = _mm256_add_ps(fjy2,ty);
515             fjz2             = _mm256_add_ps(fjz2,tz);
516
517             }
518
519             /**************************
520              * CALCULATE INTERACTIONS *
521              **************************/
522
523             if (gmx_mm256_any_lt(rsq13,rcutoff2))
524             {
525
526             r13              = _mm256_mul_ps(rsq13,rinv13);
527
528             /* EWALD ELECTROSTATICS */
529             
530             /* Analytical PME correction */
531             zeta2            = _mm256_mul_ps(beta2,rsq13);
532             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
533             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
534             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
535             felec            = _mm256_mul_ps(qq13,felec);
536             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
537             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
538             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv13,sh_ewald),pmecorrV);
539             velec            = _mm256_mul_ps(qq13,velec);
540             
541             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
542
543             /* Update potential sum for this i atom from the interaction with this j atom. */
544             velec            = _mm256_and_ps(velec,cutoff_mask);
545             velecsum         = _mm256_add_ps(velecsum,velec);
546
547             fscal            = felec;
548
549             fscal            = _mm256_and_ps(fscal,cutoff_mask);
550
551             /* Calculate temporary vectorial force */
552             tx               = _mm256_mul_ps(fscal,dx13);
553             ty               = _mm256_mul_ps(fscal,dy13);
554             tz               = _mm256_mul_ps(fscal,dz13);
555
556             /* Update vectorial force */
557             fix1             = _mm256_add_ps(fix1,tx);
558             fiy1             = _mm256_add_ps(fiy1,ty);
559             fiz1             = _mm256_add_ps(fiz1,tz);
560
561             fjx3             = _mm256_add_ps(fjx3,tx);
562             fjy3             = _mm256_add_ps(fjy3,ty);
563             fjz3             = _mm256_add_ps(fjz3,tz);
564
565             }
566
567             /**************************
568              * CALCULATE INTERACTIONS *
569              **************************/
570
571             if (gmx_mm256_any_lt(rsq21,rcutoff2))
572             {
573
574             r21              = _mm256_mul_ps(rsq21,rinv21);
575
576             /* EWALD ELECTROSTATICS */
577             
578             /* Analytical PME correction */
579             zeta2            = _mm256_mul_ps(beta2,rsq21);
580             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
581             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
582             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
583             felec            = _mm256_mul_ps(qq21,felec);
584             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
585             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
586             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
587             velec            = _mm256_mul_ps(qq21,velec);
588             
589             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
590
591             /* Update potential sum for this i atom from the interaction with this j atom. */
592             velec            = _mm256_and_ps(velec,cutoff_mask);
593             velecsum         = _mm256_add_ps(velecsum,velec);
594
595             fscal            = felec;
596
597             fscal            = _mm256_and_ps(fscal,cutoff_mask);
598
599             /* Calculate temporary vectorial force */
600             tx               = _mm256_mul_ps(fscal,dx21);
601             ty               = _mm256_mul_ps(fscal,dy21);
602             tz               = _mm256_mul_ps(fscal,dz21);
603
604             /* Update vectorial force */
605             fix2             = _mm256_add_ps(fix2,tx);
606             fiy2             = _mm256_add_ps(fiy2,ty);
607             fiz2             = _mm256_add_ps(fiz2,tz);
608
609             fjx1             = _mm256_add_ps(fjx1,tx);
610             fjy1             = _mm256_add_ps(fjy1,ty);
611             fjz1             = _mm256_add_ps(fjz1,tz);
612
613             }
614
615             /**************************
616              * CALCULATE INTERACTIONS *
617              **************************/
618
619             if (gmx_mm256_any_lt(rsq22,rcutoff2))
620             {
621
622             r22              = _mm256_mul_ps(rsq22,rinv22);
623
624             /* EWALD ELECTROSTATICS */
625             
626             /* Analytical PME correction */
627             zeta2            = _mm256_mul_ps(beta2,rsq22);
628             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
629             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
630             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
631             felec            = _mm256_mul_ps(qq22,felec);
632             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
633             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
634             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
635             velec            = _mm256_mul_ps(qq22,velec);
636             
637             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
638
639             /* Update potential sum for this i atom from the interaction with this j atom. */
640             velec            = _mm256_and_ps(velec,cutoff_mask);
641             velecsum         = _mm256_add_ps(velecsum,velec);
642
643             fscal            = felec;
644
645             fscal            = _mm256_and_ps(fscal,cutoff_mask);
646
647             /* Calculate temporary vectorial force */
648             tx               = _mm256_mul_ps(fscal,dx22);
649             ty               = _mm256_mul_ps(fscal,dy22);
650             tz               = _mm256_mul_ps(fscal,dz22);
651
652             /* Update vectorial force */
653             fix2             = _mm256_add_ps(fix2,tx);
654             fiy2             = _mm256_add_ps(fiy2,ty);
655             fiz2             = _mm256_add_ps(fiz2,tz);
656
657             fjx2             = _mm256_add_ps(fjx2,tx);
658             fjy2             = _mm256_add_ps(fjy2,ty);
659             fjz2             = _mm256_add_ps(fjz2,tz);
660
661             }
662
663             /**************************
664              * CALCULATE INTERACTIONS *
665              **************************/
666
667             if (gmx_mm256_any_lt(rsq23,rcutoff2))
668             {
669
670             r23              = _mm256_mul_ps(rsq23,rinv23);
671
672             /* EWALD ELECTROSTATICS */
673             
674             /* Analytical PME correction */
675             zeta2            = _mm256_mul_ps(beta2,rsq23);
676             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
677             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
678             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
679             felec            = _mm256_mul_ps(qq23,felec);
680             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
681             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
682             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv23,sh_ewald),pmecorrV);
683             velec            = _mm256_mul_ps(qq23,velec);
684             
685             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
686
687             /* Update potential sum for this i atom from the interaction with this j atom. */
688             velec            = _mm256_and_ps(velec,cutoff_mask);
689             velecsum         = _mm256_add_ps(velecsum,velec);
690
691             fscal            = felec;
692
693             fscal            = _mm256_and_ps(fscal,cutoff_mask);
694
695             /* Calculate temporary vectorial force */
696             tx               = _mm256_mul_ps(fscal,dx23);
697             ty               = _mm256_mul_ps(fscal,dy23);
698             tz               = _mm256_mul_ps(fscal,dz23);
699
700             /* Update vectorial force */
701             fix2             = _mm256_add_ps(fix2,tx);
702             fiy2             = _mm256_add_ps(fiy2,ty);
703             fiz2             = _mm256_add_ps(fiz2,tz);
704
705             fjx3             = _mm256_add_ps(fjx3,tx);
706             fjy3             = _mm256_add_ps(fjy3,ty);
707             fjz3             = _mm256_add_ps(fjz3,tz);
708
709             }
710
711             /**************************
712              * CALCULATE INTERACTIONS *
713              **************************/
714
715             if (gmx_mm256_any_lt(rsq31,rcutoff2))
716             {
717
718             r31              = _mm256_mul_ps(rsq31,rinv31);
719
720             /* EWALD ELECTROSTATICS */
721             
722             /* Analytical PME correction */
723             zeta2            = _mm256_mul_ps(beta2,rsq31);
724             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
725             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
726             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
727             felec            = _mm256_mul_ps(qq31,felec);
728             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
729             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
730             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv31,sh_ewald),pmecorrV);
731             velec            = _mm256_mul_ps(qq31,velec);
732             
733             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
734
735             /* Update potential sum for this i atom from the interaction with this j atom. */
736             velec            = _mm256_and_ps(velec,cutoff_mask);
737             velecsum         = _mm256_add_ps(velecsum,velec);
738
739             fscal            = felec;
740
741             fscal            = _mm256_and_ps(fscal,cutoff_mask);
742
743             /* Calculate temporary vectorial force */
744             tx               = _mm256_mul_ps(fscal,dx31);
745             ty               = _mm256_mul_ps(fscal,dy31);
746             tz               = _mm256_mul_ps(fscal,dz31);
747
748             /* Update vectorial force */
749             fix3             = _mm256_add_ps(fix3,tx);
750             fiy3             = _mm256_add_ps(fiy3,ty);
751             fiz3             = _mm256_add_ps(fiz3,tz);
752
753             fjx1             = _mm256_add_ps(fjx1,tx);
754             fjy1             = _mm256_add_ps(fjy1,ty);
755             fjz1             = _mm256_add_ps(fjz1,tz);
756
757             }
758
759             /**************************
760              * CALCULATE INTERACTIONS *
761              **************************/
762
763             if (gmx_mm256_any_lt(rsq32,rcutoff2))
764             {
765
766             r32              = _mm256_mul_ps(rsq32,rinv32);
767
768             /* EWALD ELECTROSTATICS */
769             
770             /* Analytical PME correction */
771             zeta2            = _mm256_mul_ps(beta2,rsq32);
772             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
773             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
774             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
775             felec            = _mm256_mul_ps(qq32,felec);
776             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
777             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
778             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv32,sh_ewald),pmecorrV);
779             velec            = _mm256_mul_ps(qq32,velec);
780             
781             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
782
783             /* Update potential sum for this i atom from the interaction with this j atom. */
784             velec            = _mm256_and_ps(velec,cutoff_mask);
785             velecsum         = _mm256_add_ps(velecsum,velec);
786
787             fscal            = felec;
788
789             fscal            = _mm256_and_ps(fscal,cutoff_mask);
790
791             /* Calculate temporary vectorial force */
792             tx               = _mm256_mul_ps(fscal,dx32);
793             ty               = _mm256_mul_ps(fscal,dy32);
794             tz               = _mm256_mul_ps(fscal,dz32);
795
796             /* Update vectorial force */
797             fix3             = _mm256_add_ps(fix3,tx);
798             fiy3             = _mm256_add_ps(fiy3,ty);
799             fiz3             = _mm256_add_ps(fiz3,tz);
800
801             fjx2             = _mm256_add_ps(fjx2,tx);
802             fjy2             = _mm256_add_ps(fjy2,ty);
803             fjz2             = _mm256_add_ps(fjz2,tz);
804
805             }
806
807             /**************************
808              * CALCULATE INTERACTIONS *
809              **************************/
810
811             if (gmx_mm256_any_lt(rsq33,rcutoff2))
812             {
813
814             r33              = _mm256_mul_ps(rsq33,rinv33);
815
816             /* EWALD ELECTROSTATICS */
817             
818             /* Analytical PME correction */
819             zeta2            = _mm256_mul_ps(beta2,rsq33);
820             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
821             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
822             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
823             felec            = _mm256_mul_ps(qq33,felec);
824             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
825             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
826             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv33,sh_ewald),pmecorrV);
827             velec            = _mm256_mul_ps(qq33,velec);
828             
829             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
830
831             /* Update potential sum for this i atom from the interaction with this j atom. */
832             velec            = _mm256_and_ps(velec,cutoff_mask);
833             velecsum         = _mm256_add_ps(velecsum,velec);
834
835             fscal            = felec;
836
837             fscal            = _mm256_and_ps(fscal,cutoff_mask);
838
839             /* Calculate temporary vectorial force */
840             tx               = _mm256_mul_ps(fscal,dx33);
841             ty               = _mm256_mul_ps(fscal,dy33);
842             tz               = _mm256_mul_ps(fscal,dz33);
843
844             /* Update vectorial force */
845             fix3             = _mm256_add_ps(fix3,tx);
846             fiy3             = _mm256_add_ps(fiy3,ty);
847             fiz3             = _mm256_add_ps(fiz3,tz);
848
849             fjx3             = _mm256_add_ps(fjx3,tx);
850             fjy3             = _mm256_add_ps(fjy3,ty);
851             fjz3             = _mm256_add_ps(fjz3,tz);
852
853             }
854
855             fjptrA             = f+j_coord_offsetA;
856             fjptrB             = f+j_coord_offsetB;
857             fjptrC             = f+j_coord_offsetC;
858             fjptrD             = f+j_coord_offsetD;
859             fjptrE             = f+j_coord_offsetE;
860             fjptrF             = f+j_coord_offsetF;
861             fjptrG             = f+j_coord_offsetG;
862             fjptrH             = f+j_coord_offsetH;
863
864             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
865                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
866                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
867
868             /* Inner loop uses 1046 flops */
869         }
870
871         if(jidx<j_index_end)
872         {
873
874             /* Get j neighbor index, and coordinate index */
875             jnrlistA         = jjnr[jidx];
876             jnrlistB         = jjnr[jidx+1];
877             jnrlistC         = jjnr[jidx+2];
878             jnrlistD         = jjnr[jidx+3];
879             jnrlistE         = jjnr[jidx+4];
880             jnrlistF         = jjnr[jidx+5];
881             jnrlistG         = jjnr[jidx+6];
882             jnrlistH         = jjnr[jidx+7];
883             /* Sign of each element will be negative for non-real atoms.
884              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
885              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
886              */
887             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
888                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
889                                             
890             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
891             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
892             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
893             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
894             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
895             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
896             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
897             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
898             j_coord_offsetA  = DIM*jnrA;
899             j_coord_offsetB  = DIM*jnrB;
900             j_coord_offsetC  = DIM*jnrC;
901             j_coord_offsetD  = DIM*jnrD;
902             j_coord_offsetE  = DIM*jnrE;
903             j_coord_offsetF  = DIM*jnrF;
904             j_coord_offsetG  = DIM*jnrG;
905             j_coord_offsetH  = DIM*jnrH;
906
907             /* load j atom coordinates */
908             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
909                                                  x+j_coord_offsetC,x+j_coord_offsetD,
910                                                  x+j_coord_offsetE,x+j_coord_offsetF,
911                                                  x+j_coord_offsetG,x+j_coord_offsetH,
912                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
913                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
914
915             /* Calculate displacement vector */
916             dx00             = _mm256_sub_ps(ix0,jx0);
917             dy00             = _mm256_sub_ps(iy0,jy0);
918             dz00             = _mm256_sub_ps(iz0,jz0);
919             dx11             = _mm256_sub_ps(ix1,jx1);
920             dy11             = _mm256_sub_ps(iy1,jy1);
921             dz11             = _mm256_sub_ps(iz1,jz1);
922             dx12             = _mm256_sub_ps(ix1,jx2);
923             dy12             = _mm256_sub_ps(iy1,jy2);
924             dz12             = _mm256_sub_ps(iz1,jz2);
925             dx13             = _mm256_sub_ps(ix1,jx3);
926             dy13             = _mm256_sub_ps(iy1,jy3);
927             dz13             = _mm256_sub_ps(iz1,jz3);
928             dx21             = _mm256_sub_ps(ix2,jx1);
929             dy21             = _mm256_sub_ps(iy2,jy1);
930             dz21             = _mm256_sub_ps(iz2,jz1);
931             dx22             = _mm256_sub_ps(ix2,jx2);
932             dy22             = _mm256_sub_ps(iy2,jy2);
933             dz22             = _mm256_sub_ps(iz2,jz2);
934             dx23             = _mm256_sub_ps(ix2,jx3);
935             dy23             = _mm256_sub_ps(iy2,jy3);
936             dz23             = _mm256_sub_ps(iz2,jz3);
937             dx31             = _mm256_sub_ps(ix3,jx1);
938             dy31             = _mm256_sub_ps(iy3,jy1);
939             dz31             = _mm256_sub_ps(iz3,jz1);
940             dx32             = _mm256_sub_ps(ix3,jx2);
941             dy32             = _mm256_sub_ps(iy3,jy2);
942             dz32             = _mm256_sub_ps(iz3,jz2);
943             dx33             = _mm256_sub_ps(ix3,jx3);
944             dy33             = _mm256_sub_ps(iy3,jy3);
945             dz33             = _mm256_sub_ps(iz3,jz3);
946
947             /* Calculate squared distance and things based on it */
948             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
949             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
950             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
951             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
952             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
953             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
954             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
955             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
956             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
957             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
958
959             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
960             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
961             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
962             rinv13           = gmx_mm256_invsqrt_ps(rsq13);
963             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
964             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
965             rinv23           = gmx_mm256_invsqrt_ps(rsq23);
966             rinv31           = gmx_mm256_invsqrt_ps(rsq31);
967             rinv32           = gmx_mm256_invsqrt_ps(rsq32);
968             rinv33           = gmx_mm256_invsqrt_ps(rsq33);
969
970             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
971             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
972             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
973             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
974             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
975             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
976             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
977             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
978             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
979             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
980
981             fjx0             = _mm256_setzero_ps();
982             fjy0             = _mm256_setzero_ps();
983             fjz0             = _mm256_setzero_ps();
984             fjx1             = _mm256_setzero_ps();
985             fjy1             = _mm256_setzero_ps();
986             fjz1             = _mm256_setzero_ps();
987             fjx2             = _mm256_setzero_ps();
988             fjy2             = _mm256_setzero_ps();
989             fjz2             = _mm256_setzero_ps();
990             fjx3             = _mm256_setzero_ps();
991             fjy3             = _mm256_setzero_ps();
992             fjz3             = _mm256_setzero_ps();
993
994             /**************************
995              * CALCULATE INTERACTIONS *
996              **************************/
997
998             if (gmx_mm256_any_lt(rsq00,rcutoff2))
999             {
1000
1001             r00              = _mm256_mul_ps(rsq00,rinv00);
1002             r00              = _mm256_andnot_ps(dummy_mask,r00);
1003
1004             /* Analytical LJ-PME */
1005             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1006             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
1007             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1008             exponent         = gmx_simd_exp_r(ewcljrsq);
1009             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1010             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1011             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
1012             vvdw6            = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
1013             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
1014             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
1015                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
1016             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
1017             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
1018
1019             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1020
1021             /* Update potential sum for this i atom from the interaction with this j atom. */
1022             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
1023             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
1024             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
1025
1026             fscal            = fvdw;
1027
1028             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1029
1030             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1031
1032             /* Calculate temporary vectorial force */
1033             tx               = _mm256_mul_ps(fscal,dx00);
1034             ty               = _mm256_mul_ps(fscal,dy00);
1035             tz               = _mm256_mul_ps(fscal,dz00);
1036
1037             /* Update vectorial force */
1038             fix0             = _mm256_add_ps(fix0,tx);
1039             fiy0             = _mm256_add_ps(fiy0,ty);
1040             fiz0             = _mm256_add_ps(fiz0,tz);
1041
1042             fjx0             = _mm256_add_ps(fjx0,tx);
1043             fjy0             = _mm256_add_ps(fjy0,ty);
1044             fjz0             = _mm256_add_ps(fjz0,tz);
1045
1046             }
1047
1048             /**************************
1049              * CALCULATE INTERACTIONS *
1050              **************************/
1051
1052             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1053             {
1054
1055             r11              = _mm256_mul_ps(rsq11,rinv11);
1056             r11              = _mm256_andnot_ps(dummy_mask,r11);
1057
1058             /* EWALD ELECTROSTATICS */
1059             
1060             /* Analytical PME correction */
1061             zeta2            = _mm256_mul_ps(beta2,rsq11);
1062             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1063             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1064             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1065             felec            = _mm256_mul_ps(qq11,felec);
1066             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1067             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1068             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
1069             velec            = _mm256_mul_ps(qq11,velec);
1070             
1071             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1072
1073             /* Update potential sum for this i atom from the interaction with this j atom. */
1074             velec            = _mm256_and_ps(velec,cutoff_mask);
1075             velec            = _mm256_andnot_ps(dummy_mask,velec);
1076             velecsum         = _mm256_add_ps(velecsum,velec);
1077
1078             fscal            = felec;
1079
1080             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1081
1082             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1083
1084             /* Calculate temporary vectorial force */
1085             tx               = _mm256_mul_ps(fscal,dx11);
1086             ty               = _mm256_mul_ps(fscal,dy11);
1087             tz               = _mm256_mul_ps(fscal,dz11);
1088
1089             /* Update vectorial force */
1090             fix1             = _mm256_add_ps(fix1,tx);
1091             fiy1             = _mm256_add_ps(fiy1,ty);
1092             fiz1             = _mm256_add_ps(fiz1,tz);
1093
1094             fjx1             = _mm256_add_ps(fjx1,tx);
1095             fjy1             = _mm256_add_ps(fjy1,ty);
1096             fjz1             = _mm256_add_ps(fjz1,tz);
1097
1098             }
1099
1100             /**************************
1101              * CALCULATE INTERACTIONS *
1102              **************************/
1103
1104             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1105             {
1106
1107             r12              = _mm256_mul_ps(rsq12,rinv12);
1108             r12              = _mm256_andnot_ps(dummy_mask,r12);
1109
1110             /* EWALD ELECTROSTATICS */
1111             
1112             /* Analytical PME correction */
1113             zeta2            = _mm256_mul_ps(beta2,rsq12);
1114             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1115             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1116             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1117             felec            = _mm256_mul_ps(qq12,felec);
1118             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1119             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1120             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
1121             velec            = _mm256_mul_ps(qq12,velec);
1122             
1123             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1124
1125             /* Update potential sum for this i atom from the interaction with this j atom. */
1126             velec            = _mm256_and_ps(velec,cutoff_mask);
1127             velec            = _mm256_andnot_ps(dummy_mask,velec);
1128             velecsum         = _mm256_add_ps(velecsum,velec);
1129
1130             fscal            = felec;
1131
1132             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1133
1134             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1135
1136             /* Calculate temporary vectorial force */
1137             tx               = _mm256_mul_ps(fscal,dx12);
1138             ty               = _mm256_mul_ps(fscal,dy12);
1139             tz               = _mm256_mul_ps(fscal,dz12);
1140
1141             /* Update vectorial force */
1142             fix1             = _mm256_add_ps(fix1,tx);
1143             fiy1             = _mm256_add_ps(fiy1,ty);
1144             fiz1             = _mm256_add_ps(fiz1,tz);
1145
1146             fjx2             = _mm256_add_ps(fjx2,tx);
1147             fjy2             = _mm256_add_ps(fjy2,ty);
1148             fjz2             = _mm256_add_ps(fjz2,tz);
1149
1150             }
1151
1152             /**************************
1153              * CALCULATE INTERACTIONS *
1154              **************************/
1155
1156             if (gmx_mm256_any_lt(rsq13,rcutoff2))
1157             {
1158
1159             r13              = _mm256_mul_ps(rsq13,rinv13);
1160             r13              = _mm256_andnot_ps(dummy_mask,r13);
1161
1162             /* EWALD ELECTROSTATICS */
1163             
1164             /* Analytical PME correction */
1165             zeta2            = _mm256_mul_ps(beta2,rsq13);
1166             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
1167             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1168             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1169             felec            = _mm256_mul_ps(qq13,felec);
1170             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1171             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1172             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv13,sh_ewald),pmecorrV);
1173             velec            = _mm256_mul_ps(qq13,velec);
1174             
1175             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
1176
1177             /* Update potential sum for this i atom from the interaction with this j atom. */
1178             velec            = _mm256_and_ps(velec,cutoff_mask);
1179             velec            = _mm256_andnot_ps(dummy_mask,velec);
1180             velecsum         = _mm256_add_ps(velecsum,velec);
1181
1182             fscal            = felec;
1183
1184             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1185
1186             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1187
1188             /* Calculate temporary vectorial force */
1189             tx               = _mm256_mul_ps(fscal,dx13);
1190             ty               = _mm256_mul_ps(fscal,dy13);
1191             tz               = _mm256_mul_ps(fscal,dz13);
1192
1193             /* Update vectorial force */
1194             fix1             = _mm256_add_ps(fix1,tx);
1195             fiy1             = _mm256_add_ps(fiy1,ty);
1196             fiz1             = _mm256_add_ps(fiz1,tz);
1197
1198             fjx3             = _mm256_add_ps(fjx3,tx);
1199             fjy3             = _mm256_add_ps(fjy3,ty);
1200             fjz3             = _mm256_add_ps(fjz3,tz);
1201
1202             }
1203
1204             /**************************
1205              * CALCULATE INTERACTIONS *
1206              **************************/
1207
1208             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1209             {
1210
1211             r21              = _mm256_mul_ps(rsq21,rinv21);
1212             r21              = _mm256_andnot_ps(dummy_mask,r21);
1213
1214             /* EWALD ELECTROSTATICS */
1215             
1216             /* Analytical PME correction */
1217             zeta2            = _mm256_mul_ps(beta2,rsq21);
1218             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1219             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1220             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1221             felec            = _mm256_mul_ps(qq21,felec);
1222             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1223             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1224             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
1225             velec            = _mm256_mul_ps(qq21,velec);
1226             
1227             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1228
1229             /* Update potential sum for this i atom from the interaction with this j atom. */
1230             velec            = _mm256_and_ps(velec,cutoff_mask);
1231             velec            = _mm256_andnot_ps(dummy_mask,velec);
1232             velecsum         = _mm256_add_ps(velecsum,velec);
1233
1234             fscal            = felec;
1235
1236             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1237
1238             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1239
1240             /* Calculate temporary vectorial force */
1241             tx               = _mm256_mul_ps(fscal,dx21);
1242             ty               = _mm256_mul_ps(fscal,dy21);
1243             tz               = _mm256_mul_ps(fscal,dz21);
1244
1245             /* Update vectorial force */
1246             fix2             = _mm256_add_ps(fix2,tx);
1247             fiy2             = _mm256_add_ps(fiy2,ty);
1248             fiz2             = _mm256_add_ps(fiz2,tz);
1249
1250             fjx1             = _mm256_add_ps(fjx1,tx);
1251             fjy1             = _mm256_add_ps(fjy1,ty);
1252             fjz1             = _mm256_add_ps(fjz1,tz);
1253
1254             }
1255
1256             /**************************
1257              * CALCULATE INTERACTIONS *
1258              **************************/
1259
1260             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1261             {
1262
1263             r22              = _mm256_mul_ps(rsq22,rinv22);
1264             r22              = _mm256_andnot_ps(dummy_mask,r22);
1265
1266             /* EWALD ELECTROSTATICS */
1267             
1268             /* Analytical PME correction */
1269             zeta2            = _mm256_mul_ps(beta2,rsq22);
1270             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1271             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1272             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1273             felec            = _mm256_mul_ps(qq22,felec);
1274             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1275             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1276             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
1277             velec            = _mm256_mul_ps(qq22,velec);
1278             
1279             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1280
1281             /* Update potential sum for this i atom from the interaction with this j atom. */
1282             velec            = _mm256_and_ps(velec,cutoff_mask);
1283             velec            = _mm256_andnot_ps(dummy_mask,velec);
1284             velecsum         = _mm256_add_ps(velecsum,velec);
1285
1286             fscal            = felec;
1287
1288             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1289
1290             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1291
1292             /* Calculate temporary vectorial force */
1293             tx               = _mm256_mul_ps(fscal,dx22);
1294             ty               = _mm256_mul_ps(fscal,dy22);
1295             tz               = _mm256_mul_ps(fscal,dz22);
1296
1297             /* Update vectorial force */
1298             fix2             = _mm256_add_ps(fix2,tx);
1299             fiy2             = _mm256_add_ps(fiy2,ty);
1300             fiz2             = _mm256_add_ps(fiz2,tz);
1301
1302             fjx2             = _mm256_add_ps(fjx2,tx);
1303             fjy2             = _mm256_add_ps(fjy2,ty);
1304             fjz2             = _mm256_add_ps(fjz2,tz);
1305
1306             }
1307
1308             /**************************
1309              * CALCULATE INTERACTIONS *
1310              **************************/
1311
1312             if (gmx_mm256_any_lt(rsq23,rcutoff2))
1313             {
1314
1315             r23              = _mm256_mul_ps(rsq23,rinv23);
1316             r23              = _mm256_andnot_ps(dummy_mask,r23);
1317
1318             /* EWALD ELECTROSTATICS */
1319             
1320             /* Analytical PME correction */
1321             zeta2            = _mm256_mul_ps(beta2,rsq23);
1322             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
1323             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1324             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1325             felec            = _mm256_mul_ps(qq23,felec);
1326             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1327             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1328             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv23,sh_ewald),pmecorrV);
1329             velec            = _mm256_mul_ps(qq23,velec);
1330             
1331             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
1332
1333             /* Update potential sum for this i atom from the interaction with this j atom. */
1334             velec            = _mm256_and_ps(velec,cutoff_mask);
1335             velec            = _mm256_andnot_ps(dummy_mask,velec);
1336             velecsum         = _mm256_add_ps(velecsum,velec);
1337
1338             fscal            = felec;
1339
1340             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1341
1342             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1343
1344             /* Calculate temporary vectorial force */
1345             tx               = _mm256_mul_ps(fscal,dx23);
1346             ty               = _mm256_mul_ps(fscal,dy23);
1347             tz               = _mm256_mul_ps(fscal,dz23);
1348
1349             /* Update vectorial force */
1350             fix2             = _mm256_add_ps(fix2,tx);
1351             fiy2             = _mm256_add_ps(fiy2,ty);
1352             fiz2             = _mm256_add_ps(fiz2,tz);
1353
1354             fjx3             = _mm256_add_ps(fjx3,tx);
1355             fjy3             = _mm256_add_ps(fjy3,ty);
1356             fjz3             = _mm256_add_ps(fjz3,tz);
1357
1358             }
1359
1360             /**************************
1361              * CALCULATE INTERACTIONS *
1362              **************************/
1363
1364             if (gmx_mm256_any_lt(rsq31,rcutoff2))
1365             {
1366
1367             r31              = _mm256_mul_ps(rsq31,rinv31);
1368             r31              = _mm256_andnot_ps(dummy_mask,r31);
1369
1370             /* EWALD ELECTROSTATICS */
1371             
1372             /* Analytical PME correction */
1373             zeta2            = _mm256_mul_ps(beta2,rsq31);
1374             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
1375             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1376             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1377             felec            = _mm256_mul_ps(qq31,felec);
1378             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1379             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1380             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv31,sh_ewald),pmecorrV);
1381             velec            = _mm256_mul_ps(qq31,velec);
1382             
1383             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
1384
1385             /* Update potential sum for this i atom from the interaction with this j atom. */
1386             velec            = _mm256_and_ps(velec,cutoff_mask);
1387             velec            = _mm256_andnot_ps(dummy_mask,velec);
1388             velecsum         = _mm256_add_ps(velecsum,velec);
1389
1390             fscal            = felec;
1391
1392             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1393
1394             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1395
1396             /* Calculate temporary vectorial force */
1397             tx               = _mm256_mul_ps(fscal,dx31);
1398             ty               = _mm256_mul_ps(fscal,dy31);
1399             tz               = _mm256_mul_ps(fscal,dz31);
1400
1401             /* Update vectorial force */
1402             fix3             = _mm256_add_ps(fix3,tx);
1403             fiy3             = _mm256_add_ps(fiy3,ty);
1404             fiz3             = _mm256_add_ps(fiz3,tz);
1405
1406             fjx1             = _mm256_add_ps(fjx1,tx);
1407             fjy1             = _mm256_add_ps(fjy1,ty);
1408             fjz1             = _mm256_add_ps(fjz1,tz);
1409
1410             }
1411
1412             /**************************
1413              * CALCULATE INTERACTIONS *
1414              **************************/
1415
1416             if (gmx_mm256_any_lt(rsq32,rcutoff2))
1417             {
1418
1419             r32              = _mm256_mul_ps(rsq32,rinv32);
1420             r32              = _mm256_andnot_ps(dummy_mask,r32);
1421
1422             /* EWALD ELECTROSTATICS */
1423             
1424             /* Analytical PME correction */
1425             zeta2            = _mm256_mul_ps(beta2,rsq32);
1426             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
1427             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1428             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1429             felec            = _mm256_mul_ps(qq32,felec);
1430             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1431             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1432             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv32,sh_ewald),pmecorrV);
1433             velec            = _mm256_mul_ps(qq32,velec);
1434             
1435             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
1436
1437             /* Update potential sum for this i atom from the interaction with this j atom. */
1438             velec            = _mm256_and_ps(velec,cutoff_mask);
1439             velec            = _mm256_andnot_ps(dummy_mask,velec);
1440             velecsum         = _mm256_add_ps(velecsum,velec);
1441
1442             fscal            = felec;
1443
1444             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1445
1446             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1447
1448             /* Calculate temporary vectorial force */
1449             tx               = _mm256_mul_ps(fscal,dx32);
1450             ty               = _mm256_mul_ps(fscal,dy32);
1451             tz               = _mm256_mul_ps(fscal,dz32);
1452
1453             /* Update vectorial force */
1454             fix3             = _mm256_add_ps(fix3,tx);
1455             fiy3             = _mm256_add_ps(fiy3,ty);
1456             fiz3             = _mm256_add_ps(fiz3,tz);
1457
1458             fjx2             = _mm256_add_ps(fjx2,tx);
1459             fjy2             = _mm256_add_ps(fjy2,ty);
1460             fjz2             = _mm256_add_ps(fjz2,tz);
1461
1462             }
1463
1464             /**************************
1465              * CALCULATE INTERACTIONS *
1466              **************************/
1467
1468             if (gmx_mm256_any_lt(rsq33,rcutoff2))
1469             {
1470
1471             r33              = _mm256_mul_ps(rsq33,rinv33);
1472             r33              = _mm256_andnot_ps(dummy_mask,r33);
1473
1474             /* EWALD ELECTROSTATICS */
1475             
1476             /* Analytical PME correction */
1477             zeta2            = _mm256_mul_ps(beta2,rsq33);
1478             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
1479             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1480             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1481             felec            = _mm256_mul_ps(qq33,felec);
1482             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1483             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1484             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv33,sh_ewald),pmecorrV);
1485             velec            = _mm256_mul_ps(qq33,velec);
1486             
1487             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
1488
1489             /* Update potential sum for this i atom from the interaction with this j atom. */
1490             velec            = _mm256_and_ps(velec,cutoff_mask);
1491             velec            = _mm256_andnot_ps(dummy_mask,velec);
1492             velecsum         = _mm256_add_ps(velecsum,velec);
1493
1494             fscal            = felec;
1495
1496             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1497
1498             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1499
1500             /* Calculate temporary vectorial force */
1501             tx               = _mm256_mul_ps(fscal,dx33);
1502             ty               = _mm256_mul_ps(fscal,dy33);
1503             tz               = _mm256_mul_ps(fscal,dz33);
1504
1505             /* Update vectorial force */
1506             fix3             = _mm256_add_ps(fix3,tx);
1507             fiy3             = _mm256_add_ps(fiy3,ty);
1508             fiz3             = _mm256_add_ps(fiz3,tz);
1509
1510             fjx3             = _mm256_add_ps(fjx3,tx);
1511             fjy3             = _mm256_add_ps(fjy3,ty);
1512             fjz3             = _mm256_add_ps(fjz3,tz);
1513
1514             }
1515
1516             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1517             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1518             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1519             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1520             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1521             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1522             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1523             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1524
1525             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1526                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
1527                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1528
1529             /* Inner loop uses 1056 flops */
1530         }
1531
1532         /* End of innermost loop */
1533
1534         gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1535                                                  f+i_coord_offset,fshift+i_shift_offset);
1536
1537         ggid                        = gid[iidx];
1538         /* Update potential energies */
1539         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1540         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1541
1542         /* Increment number of inner iterations */
1543         inneriter                  += j_index_end - j_index_start;
1544
1545         /* Outer loop uses 26 flops */
1546     }
1547
1548     /* Increment number of outer iterations */
1549     outeriter        += nri;
1550
1551     /* Update outer/inner flops */
1552
1553     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*1056);
1554 }
1555 /*
1556  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_F_avx_256_single
1557  * Electrostatics interaction: Ewald
1558  * VdW interaction:            LJEwald
1559  * Geometry:                   Water4-Water4
1560  * Calculate force/pot:        Force
1561  */
1562 void
1563 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_F_avx_256_single
1564                     (t_nblist                    * gmx_restrict       nlist,
1565                      rvec                        * gmx_restrict          xx,
1566                      rvec                        * gmx_restrict          ff,
1567                      t_forcerec                  * gmx_restrict          fr,
1568                      t_mdatoms                   * gmx_restrict     mdatoms,
1569                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1570                      t_nrnb                      * gmx_restrict        nrnb)
1571 {
1572     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1573      * just 0 for non-waters.
1574      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1575      * jnr indices corresponding to data put in the four positions in the SIMD register.
1576      */
1577     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1578     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1579     int              jnrA,jnrB,jnrC,jnrD;
1580     int              jnrE,jnrF,jnrG,jnrH;
1581     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1582     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1583     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1584     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1585     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1586     real             rcutoff_scalar;
1587     real             *shiftvec,*fshift,*x,*f;
1588     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1589     real             scratch[4*DIM];
1590     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1591     real *           vdwioffsetptr0;
1592     real *           vdwgridioffsetptr0;
1593     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1594     real *           vdwioffsetptr1;
1595     real *           vdwgridioffsetptr1;
1596     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1597     real *           vdwioffsetptr2;
1598     real *           vdwgridioffsetptr2;
1599     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1600     real *           vdwioffsetptr3;
1601     real *           vdwgridioffsetptr3;
1602     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1603     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1604     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1605     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1606     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1607     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1608     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1609     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
1610     __m256           jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
1611     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1612     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1613     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1614     __m256           dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
1615     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1616     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1617     __m256           dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
1618     __m256           dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
1619     __m256           dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
1620     __m256           dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
1621     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
1622     real             *charge;
1623     int              nvdwtype;
1624     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1625     int              *vdwtype;
1626     real             *vdwparam;
1627     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
1628     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
1629     __m256           c6grid_00;
1630     __m256           c6grid_11;
1631     __m256           c6grid_12;
1632     __m256           c6grid_13;
1633     __m256           c6grid_21;
1634     __m256           c6grid_22;
1635     __m256           c6grid_23;
1636     __m256           c6grid_31;
1637     __m256           c6grid_32;
1638     __m256           c6grid_33;
1639     real             *vdwgridparam;
1640     __m256           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
1641     __m256           one_half  = _mm256_set1_ps(0.5);
1642     __m256           minus_one = _mm256_set1_ps(-1.0);
1643     __m256i          ewitab;
1644     __m128i          ewitab_lo,ewitab_hi;
1645     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1646     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1647     real             *ewtab;
1648     __m256           dummy_mask,cutoff_mask;
1649     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1650     __m256           one     = _mm256_set1_ps(1.0);
1651     __m256           two     = _mm256_set1_ps(2.0);
1652     x                = xx[0];
1653     f                = ff[0];
1654
1655     nri              = nlist->nri;
1656     iinr             = nlist->iinr;
1657     jindex           = nlist->jindex;
1658     jjnr             = nlist->jjnr;
1659     shiftidx         = nlist->shift;
1660     gid              = nlist->gid;
1661     shiftvec         = fr->shift_vec[0];
1662     fshift           = fr->fshift[0];
1663     facel            = _mm256_set1_ps(fr->epsfac);
1664     charge           = mdatoms->chargeA;
1665     nvdwtype         = fr->ntype;
1666     vdwparam         = fr->nbfp;
1667     vdwtype          = mdatoms->typeA;
1668     vdwgridparam     = fr->ljpme_c6grid;
1669     sh_lj_ewald      = _mm256_set1_ps(fr->ic->sh_lj_ewald);
1670     ewclj            = _mm256_set1_ps(fr->ewaldcoeff_lj);
1671     ewclj2           = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
1672
1673     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
1674     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1675     beta2            = _mm256_mul_ps(beta,beta);
1676     beta3            = _mm256_mul_ps(beta,beta2);
1677
1678     ewtab            = fr->ic->tabq_coul_F;
1679     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
1680     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1681
1682     /* Setup water-specific parameters */
1683     inr              = nlist->iinr[0];
1684     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1685     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1686     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
1687     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1688     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
1689
1690     jq1              = _mm256_set1_ps(charge[inr+1]);
1691     jq2              = _mm256_set1_ps(charge[inr+2]);
1692     jq3              = _mm256_set1_ps(charge[inr+3]);
1693     vdwjidx0A        = 2*vdwtype[inr+0];
1694     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1695     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1696     c6grid_00        = _mm256_set1_ps(vdwgridioffsetptr0[vdwjidx0A]);
1697     qq11             = _mm256_mul_ps(iq1,jq1);
1698     qq12             = _mm256_mul_ps(iq1,jq2);
1699     qq13             = _mm256_mul_ps(iq1,jq3);
1700     qq21             = _mm256_mul_ps(iq2,jq1);
1701     qq22             = _mm256_mul_ps(iq2,jq2);
1702     qq23             = _mm256_mul_ps(iq2,jq3);
1703     qq31             = _mm256_mul_ps(iq3,jq1);
1704     qq32             = _mm256_mul_ps(iq3,jq2);
1705     qq33             = _mm256_mul_ps(iq3,jq3);
1706
1707     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1708     rcutoff_scalar   = fr->rcoulomb;
1709     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
1710     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
1711
1712     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
1713     rvdw             = _mm256_set1_ps(fr->rvdw);
1714
1715     /* Avoid stupid compiler warnings */
1716     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1717     j_coord_offsetA = 0;
1718     j_coord_offsetB = 0;
1719     j_coord_offsetC = 0;
1720     j_coord_offsetD = 0;
1721     j_coord_offsetE = 0;
1722     j_coord_offsetF = 0;
1723     j_coord_offsetG = 0;
1724     j_coord_offsetH = 0;
1725
1726     outeriter        = 0;
1727     inneriter        = 0;
1728
1729     for(iidx=0;iidx<4*DIM;iidx++)
1730     {
1731         scratch[iidx] = 0.0;
1732     }
1733
1734     /* Start outer loop over neighborlists */
1735     for(iidx=0; iidx<nri; iidx++)
1736     {
1737         /* Load shift vector for this list */
1738         i_shift_offset   = DIM*shiftidx[iidx];
1739
1740         /* Load limits for loop over neighbors */
1741         j_index_start    = jindex[iidx];
1742         j_index_end      = jindex[iidx+1];
1743
1744         /* Get outer coordinate index */
1745         inr              = iinr[iidx];
1746         i_coord_offset   = DIM*inr;
1747
1748         /* Load i particle coords and add shift vector */
1749         gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1750                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1751
1752         fix0             = _mm256_setzero_ps();
1753         fiy0             = _mm256_setzero_ps();
1754         fiz0             = _mm256_setzero_ps();
1755         fix1             = _mm256_setzero_ps();
1756         fiy1             = _mm256_setzero_ps();
1757         fiz1             = _mm256_setzero_ps();
1758         fix2             = _mm256_setzero_ps();
1759         fiy2             = _mm256_setzero_ps();
1760         fiz2             = _mm256_setzero_ps();
1761         fix3             = _mm256_setzero_ps();
1762         fiy3             = _mm256_setzero_ps();
1763         fiz3             = _mm256_setzero_ps();
1764
1765         /* Start inner kernel loop */
1766         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1767         {
1768
1769             /* Get j neighbor index, and coordinate index */
1770             jnrA             = jjnr[jidx];
1771             jnrB             = jjnr[jidx+1];
1772             jnrC             = jjnr[jidx+2];
1773             jnrD             = jjnr[jidx+3];
1774             jnrE             = jjnr[jidx+4];
1775             jnrF             = jjnr[jidx+5];
1776             jnrG             = jjnr[jidx+6];
1777             jnrH             = jjnr[jidx+7];
1778             j_coord_offsetA  = DIM*jnrA;
1779             j_coord_offsetB  = DIM*jnrB;
1780             j_coord_offsetC  = DIM*jnrC;
1781             j_coord_offsetD  = DIM*jnrD;
1782             j_coord_offsetE  = DIM*jnrE;
1783             j_coord_offsetF  = DIM*jnrF;
1784             j_coord_offsetG  = DIM*jnrG;
1785             j_coord_offsetH  = DIM*jnrH;
1786
1787             /* load j atom coordinates */
1788             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1789                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1790                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1791                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1792                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1793                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
1794
1795             /* Calculate displacement vector */
1796             dx00             = _mm256_sub_ps(ix0,jx0);
1797             dy00             = _mm256_sub_ps(iy0,jy0);
1798             dz00             = _mm256_sub_ps(iz0,jz0);
1799             dx11             = _mm256_sub_ps(ix1,jx1);
1800             dy11             = _mm256_sub_ps(iy1,jy1);
1801             dz11             = _mm256_sub_ps(iz1,jz1);
1802             dx12             = _mm256_sub_ps(ix1,jx2);
1803             dy12             = _mm256_sub_ps(iy1,jy2);
1804             dz12             = _mm256_sub_ps(iz1,jz2);
1805             dx13             = _mm256_sub_ps(ix1,jx3);
1806             dy13             = _mm256_sub_ps(iy1,jy3);
1807             dz13             = _mm256_sub_ps(iz1,jz3);
1808             dx21             = _mm256_sub_ps(ix2,jx1);
1809             dy21             = _mm256_sub_ps(iy2,jy1);
1810             dz21             = _mm256_sub_ps(iz2,jz1);
1811             dx22             = _mm256_sub_ps(ix2,jx2);
1812             dy22             = _mm256_sub_ps(iy2,jy2);
1813             dz22             = _mm256_sub_ps(iz2,jz2);
1814             dx23             = _mm256_sub_ps(ix2,jx3);
1815             dy23             = _mm256_sub_ps(iy2,jy3);
1816             dz23             = _mm256_sub_ps(iz2,jz3);
1817             dx31             = _mm256_sub_ps(ix3,jx1);
1818             dy31             = _mm256_sub_ps(iy3,jy1);
1819             dz31             = _mm256_sub_ps(iz3,jz1);
1820             dx32             = _mm256_sub_ps(ix3,jx2);
1821             dy32             = _mm256_sub_ps(iy3,jy2);
1822             dz32             = _mm256_sub_ps(iz3,jz2);
1823             dx33             = _mm256_sub_ps(ix3,jx3);
1824             dy33             = _mm256_sub_ps(iy3,jy3);
1825             dz33             = _mm256_sub_ps(iz3,jz3);
1826
1827             /* Calculate squared distance and things based on it */
1828             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1829             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1830             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1831             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
1832             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1833             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1834             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
1835             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
1836             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
1837             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
1838
1839             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1840             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
1841             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
1842             rinv13           = gmx_mm256_invsqrt_ps(rsq13);
1843             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
1844             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
1845             rinv23           = gmx_mm256_invsqrt_ps(rsq23);
1846             rinv31           = gmx_mm256_invsqrt_ps(rsq31);
1847             rinv32           = gmx_mm256_invsqrt_ps(rsq32);
1848             rinv33           = gmx_mm256_invsqrt_ps(rsq33);
1849
1850             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1851             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1852             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1853             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
1854             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1855             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1856             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
1857             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
1858             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
1859             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
1860
1861             fjx0             = _mm256_setzero_ps();
1862             fjy0             = _mm256_setzero_ps();
1863             fjz0             = _mm256_setzero_ps();
1864             fjx1             = _mm256_setzero_ps();
1865             fjy1             = _mm256_setzero_ps();
1866             fjz1             = _mm256_setzero_ps();
1867             fjx2             = _mm256_setzero_ps();
1868             fjy2             = _mm256_setzero_ps();
1869             fjz2             = _mm256_setzero_ps();
1870             fjx3             = _mm256_setzero_ps();
1871             fjy3             = _mm256_setzero_ps();
1872             fjz3             = _mm256_setzero_ps();
1873
1874             /**************************
1875              * CALCULATE INTERACTIONS *
1876              **************************/
1877
1878             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1879             {
1880
1881             r00              = _mm256_mul_ps(rsq00,rinv00);
1882
1883             /* Analytical LJ-PME */
1884             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1885             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
1886             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1887             exponent         = gmx_simd_exp_r(ewcljrsq);
1888             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1889             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1890             /* f6A = 6 * C6grid * (1 - poly) */
1891             f6A              = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
1892             /* f6B = C6grid * exponent * beta^6 */
1893             f6B              = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
1894             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1895             fvdw              = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1896
1897             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1898
1899             fscal            = fvdw;
1900
1901             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1902
1903             /* Calculate temporary vectorial force */
1904             tx               = _mm256_mul_ps(fscal,dx00);
1905             ty               = _mm256_mul_ps(fscal,dy00);
1906             tz               = _mm256_mul_ps(fscal,dz00);
1907
1908             /* Update vectorial force */
1909             fix0             = _mm256_add_ps(fix0,tx);
1910             fiy0             = _mm256_add_ps(fiy0,ty);
1911             fiz0             = _mm256_add_ps(fiz0,tz);
1912
1913             fjx0             = _mm256_add_ps(fjx0,tx);
1914             fjy0             = _mm256_add_ps(fjy0,ty);
1915             fjz0             = _mm256_add_ps(fjz0,tz);
1916
1917             }
1918
1919             /**************************
1920              * CALCULATE INTERACTIONS *
1921              **************************/
1922
1923             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1924             {
1925
1926             r11              = _mm256_mul_ps(rsq11,rinv11);
1927
1928             /* EWALD ELECTROSTATICS */
1929             
1930             /* Analytical PME correction */
1931             zeta2            = _mm256_mul_ps(beta2,rsq11);
1932             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1933             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1934             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1935             felec            = _mm256_mul_ps(qq11,felec);
1936             
1937             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1938
1939             fscal            = felec;
1940
1941             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1942
1943             /* Calculate temporary vectorial force */
1944             tx               = _mm256_mul_ps(fscal,dx11);
1945             ty               = _mm256_mul_ps(fscal,dy11);
1946             tz               = _mm256_mul_ps(fscal,dz11);
1947
1948             /* Update vectorial force */
1949             fix1             = _mm256_add_ps(fix1,tx);
1950             fiy1             = _mm256_add_ps(fiy1,ty);
1951             fiz1             = _mm256_add_ps(fiz1,tz);
1952
1953             fjx1             = _mm256_add_ps(fjx1,tx);
1954             fjy1             = _mm256_add_ps(fjy1,ty);
1955             fjz1             = _mm256_add_ps(fjz1,tz);
1956
1957             }
1958
1959             /**************************
1960              * CALCULATE INTERACTIONS *
1961              **************************/
1962
1963             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1964             {
1965
1966             r12              = _mm256_mul_ps(rsq12,rinv12);
1967
1968             /* EWALD ELECTROSTATICS */
1969             
1970             /* Analytical PME correction */
1971             zeta2            = _mm256_mul_ps(beta2,rsq12);
1972             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1973             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1974             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1975             felec            = _mm256_mul_ps(qq12,felec);
1976             
1977             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1978
1979             fscal            = felec;
1980
1981             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1982
1983             /* Calculate temporary vectorial force */
1984             tx               = _mm256_mul_ps(fscal,dx12);
1985             ty               = _mm256_mul_ps(fscal,dy12);
1986             tz               = _mm256_mul_ps(fscal,dz12);
1987
1988             /* Update vectorial force */
1989             fix1             = _mm256_add_ps(fix1,tx);
1990             fiy1             = _mm256_add_ps(fiy1,ty);
1991             fiz1             = _mm256_add_ps(fiz1,tz);
1992
1993             fjx2             = _mm256_add_ps(fjx2,tx);
1994             fjy2             = _mm256_add_ps(fjy2,ty);
1995             fjz2             = _mm256_add_ps(fjz2,tz);
1996
1997             }
1998
1999             /**************************
2000              * CALCULATE INTERACTIONS *
2001              **************************/
2002
2003             if (gmx_mm256_any_lt(rsq13,rcutoff2))
2004             {
2005
2006             r13              = _mm256_mul_ps(rsq13,rinv13);
2007
2008             /* EWALD ELECTROSTATICS */
2009             
2010             /* Analytical PME correction */
2011             zeta2            = _mm256_mul_ps(beta2,rsq13);
2012             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
2013             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2014             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2015             felec            = _mm256_mul_ps(qq13,felec);
2016             
2017             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
2018
2019             fscal            = felec;
2020
2021             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2022
2023             /* Calculate temporary vectorial force */
2024             tx               = _mm256_mul_ps(fscal,dx13);
2025             ty               = _mm256_mul_ps(fscal,dy13);
2026             tz               = _mm256_mul_ps(fscal,dz13);
2027
2028             /* Update vectorial force */
2029             fix1             = _mm256_add_ps(fix1,tx);
2030             fiy1             = _mm256_add_ps(fiy1,ty);
2031             fiz1             = _mm256_add_ps(fiz1,tz);
2032
2033             fjx3             = _mm256_add_ps(fjx3,tx);
2034             fjy3             = _mm256_add_ps(fjy3,ty);
2035             fjz3             = _mm256_add_ps(fjz3,tz);
2036
2037             }
2038
2039             /**************************
2040              * CALCULATE INTERACTIONS *
2041              **************************/
2042
2043             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2044             {
2045
2046             r21              = _mm256_mul_ps(rsq21,rinv21);
2047
2048             /* EWALD ELECTROSTATICS */
2049             
2050             /* Analytical PME correction */
2051             zeta2            = _mm256_mul_ps(beta2,rsq21);
2052             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2053             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2054             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2055             felec            = _mm256_mul_ps(qq21,felec);
2056             
2057             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2058
2059             fscal            = felec;
2060
2061             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2062
2063             /* Calculate temporary vectorial force */
2064             tx               = _mm256_mul_ps(fscal,dx21);
2065             ty               = _mm256_mul_ps(fscal,dy21);
2066             tz               = _mm256_mul_ps(fscal,dz21);
2067
2068             /* Update vectorial force */
2069             fix2             = _mm256_add_ps(fix2,tx);
2070             fiy2             = _mm256_add_ps(fiy2,ty);
2071             fiz2             = _mm256_add_ps(fiz2,tz);
2072
2073             fjx1             = _mm256_add_ps(fjx1,tx);
2074             fjy1             = _mm256_add_ps(fjy1,ty);
2075             fjz1             = _mm256_add_ps(fjz1,tz);
2076
2077             }
2078
2079             /**************************
2080              * CALCULATE INTERACTIONS *
2081              **************************/
2082
2083             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2084             {
2085
2086             r22              = _mm256_mul_ps(rsq22,rinv22);
2087
2088             /* EWALD ELECTROSTATICS */
2089             
2090             /* Analytical PME correction */
2091             zeta2            = _mm256_mul_ps(beta2,rsq22);
2092             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2093             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2094             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2095             felec            = _mm256_mul_ps(qq22,felec);
2096             
2097             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2098
2099             fscal            = felec;
2100
2101             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2102
2103             /* Calculate temporary vectorial force */
2104             tx               = _mm256_mul_ps(fscal,dx22);
2105             ty               = _mm256_mul_ps(fscal,dy22);
2106             tz               = _mm256_mul_ps(fscal,dz22);
2107
2108             /* Update vectorial force */
2109             fix2             = _mm256_add_ps(fix2,tx);
2110             fiy2             = _mm256_add_ps(fiy2,ty);
2111             fiz2             = _mm256_add_ps(fiz2,tz);
2112
2113             fjx2             = _mm256_add_ps(fjx2,tx);
2114             fjy2             = _mm256_add_ps(fjy2,ty);
2115             fjz2             = _mm256_add_ps(fjz2,tz);
2116
2117             }
2118
2119             /**************************
2120              * CALCULATE INTERACTIONS *
2121              **************************/
2122
2123             if (gmx_mm256_any_lt(rsq23,rcutoff2))
2124             {
2125
2126             r23              = _mm256_mul_ps(rsq23,rinv23);
2127
2128             /* EWALD ELECTROSTATICS */
2129             
2130             /* Analytical PME correction */
2131             zeta2            = _mm256_mul_ps(beta2,rsq23);
2132             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
2133             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2134             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2135             felec            = _mm256_mul_ps(qq23,felec);
2136             
2137             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
2138
2139             fscal            = felec;
2140
2141             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2142
2143             /* Calculate temporary vectorial force */
2144             tx               = _mm256_mul_ps(fscal,dx23);
2145             ty               = _mm256_mul_ps(fscal,dy23);
2146             tz               = _mm256_mul_ps(fscal,dz23);
2147
2148             /* Update vectorial force */
2149             fix2             = _mm256_add_ps(fix2,tx);
2150             fiy2             = _mm256_add_ps(fiy2,ty);
2151             fiz2             = _mm256_add_ps(fiz2,tz);
2152
2153             fjx3             = _mm256_add_ps(fjx3,tx);
2154             fjy3             = _mm256_add_ps(fjy3,ty);
2155             fjz3             = _mm256_add_ps(fjz3,tz);
2156
2157             }
2158
2159             /**************************
2160              * CALCULATE INTERACTIONS *
2161              **************************/
2162
2163             if (gmx_mm256_any_lt(rsq31,rcutoff2))
2164             {
2165
2166             r31              = _mm256_mul_ps(rsq31,rinv31);
2167
2168             /* EWALD ELECTROSTATICS */
2169             
2170             /* Analytical PME correction */
2171             zeta2            = _mm256_mul_ps(beta2,rsq31);
2172             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
2173             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2174             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2175             felec            = _mm256_mul_ps(qq31,felec);
2176             
2177             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
2178
2179             fscal            = felec;
2180
2181             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2182
2183             /* Calculate temporary vectorial force */
2184             tx               = _mm256_mul_ps(fscal,dx31);
2185             ty               = _mm256_mul_ps(fscal,dy31);
2186             tz               = _mm256_mul_ps(fscal,dz31);
2187
2188             /* Update vectorial force */
2189             fix3             = _mm256_add_ps(fix3,tx);
2190             fiy3             = _mm256_add_ps(fiy3,ty);
2191             fiz3             = _mm256_add_ps(fiz3,tz);
2192
2193             fjx1             = _mm256_add_ps(fjx1,tx);
2194             fjy1             = _mm256_add_ps(fjy1,ty);
2195             fjz1             = _mm256_add_ps(fjz1,tz);
2196
2197             }
2198
2199             /**************************
2200              * CALCULATE INTERACTIONS *
2201              **************************/
2202
2203             if (gmx_mm256_any_lt(rsq32,rcutoff2))
2204             {
2205
2206             r32              = _mm256_mul_ps(rsq32,rinv32);
2207
2208             /* EWALD ELECTROSTATICS */
2209             
2210             /* Analytical PME correction */
2211             zeta2            = _mm256_mul_ps(beta2,rsq32);
2212             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
2213             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2214             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2215             felec            = _mm256_mul_ps(qq32,felec);
2216             
2217             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
2218
2219             fscal            = felec;
2220
2221             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2222
2223             /* Calculate temporary vectorial force */
2224             tx               = _mm256_mul_ps(fscal,dx32);
2225             ty               = _mm256_mul_ps(fscal,dy32);
2226             tz               = _mm256_mul_ps(fscal,dz32);
2227
2228             /* Update vectorial force */
2229             fix3             = _mm256_add_ps(fix3,tx);
2230             fiy3             = _mm256_add_ps(fiy3,ty);
2231             fiz3             = _mm256_add_ps(fiz3,tz);
2232
2233             fjx2             = _mm256_add_ps(fjx2,tx);
2234             fjy2             = _mm256_add_ps(fjy2,ty);
2235             fjz2             = _mm256_add_ps(fjz2,tz);
2236
2237             }
2238
2239             /**************************
2240              * CALCULATE INTERACTIONS *
2241              **************************/
2242
2243             if (gmx_mm256_any_lt(rsq33,rcutoff2))
2244             {
2245
2246             r33              = _mm256_mul_ps(rsq33,rinv33);
2247
2248             /* EWALD ELECTROSTATICS */
2249             
2250             /* Analytical PME correction */
2251             zeta2            = _mm256_mul_ps(beta2,rsq33);
2252             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
2253             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2254             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2255             felec            = _mm256_mul_ps(qq33,felec);
2256             
2257             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
2258
2259             fscal            = felec;
2260
2261             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2262
2263             /* Calculate temporary vectorial force */
2264             tx               = _mm256_mul_ps(fscal,dx33);
2265             ty               = _mm256_mul_ps(fscal,dy33);
2266             tz               = _mm256_mul_ps(fscal,dz33);
2267
2268             /* Update vectorial force */
2269             fix3             = _mm256_add_ps(fix3,tx);
2270             fiy3             = _mm256_add_ps(fiy3,ty);
2271             fiz3             = _mm256_add_ps(fiz3,tz);
2272
2273             fjx3             = _mm256_add_ps(fjx3,tx);
2274             fjy3             = _mm256_add_ps(fjy3,ty);
2275             fjz3             = _mm256_add_ps(fjz3,tz);
2276
2277             }
2278
2279             fjptrA             = f+j_coord_offsetA;
2280             fjptrB             = f+j_coord_offsetB;
2281             fjptrC             = f+j_coord_offsetC;
2282             fjptrD             = f+j_coord_offsetD;
2283             fjptrE             = f+j_coord_offsetE;
2284             fjptrF             = f+j_coord_offsetF;
2285             fjptrG             = f+j_coord_offsetG;
2286             fjptrH             = f+j_coord_offsetH;
2287
2288             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2289                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2290                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2291
2292             /* Inner loop uses 583 flops */
2293         }
2294
2295         if(jidx<j_index_end)
2296         {
2297
2298             /* Get j neighbor index, and coordinate index */
2299             jnrlistA         = jjnr[jidx];
2300             jnrlistB         = jjnr[jidx+1];
2301             jnrlistC         = jjnr[jidx+2];
2302             jnrlistD         = jjnr[jidx+3];
2303             jnrlistE         = jjnr[jidx+4];
2304             jnrlistF         = jjnr[jidx+5];
2305             jnrlistG         = jjnr[jidx+6];
2306             jnrlistH         = jjnr[jidx+7];
2307             /* Sign of each element will be negative for non-real atoms.
2308              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2309              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
2310              */
2311             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
2312                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
2313                                             
2314             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
2315             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
2316             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
2317             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
2318             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
2319             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
2320             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
2321             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
2322             j_coord_offsetA  = DIM*jnrA;
2323             j_coord_offsetB  = DIM*jnrB;
2324             j_coord_offsetC  = DIM*jnrC;
2325             j_coord_offsetD  = DIM*jnrD;
2326             j_coord_offsetE  = DIM*jnrE;
2327             j_coord_offsetF  = DIM*jnrF;
2328             j_coord_offsetG  = DIM*jnrG;
2329             j_coord_offsetH  = DIM*jnrH;
2330
2331             /* load j atom coordinates */
2332             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
2333                                                  x+j_coord_offsetC,x+j_coord_offsetD,
2334                                                  x+j_coord_offsetE,x+j_coord_offsetF,
2335                                                  x+j_coord_offsetG,x+j_coord_offsetH,
2336                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
2337                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
2338
2339             /* Calculate displacement vector */
2340             dx00             = _mm256_sub_ps(ix0,jx0);
2341             dy00             = _mm256_sub_ps(iy0,jy0);
2342             dz00             = _mm256_sub_ps(iz0,jz0);
2343             dx11             = _mm256_sub_ps(ix1,jx1);
2344             dy11             = _mm256_sub_ps(iy1,jy1);
2345             dz11             = _mm256_sub_ps(iz1,jz1);
2346             dx12             = _mm256_sub_ps(ix1,jx2);
2347             dy12             = _mm256_sub_ps(iy1,jy2);
2348             dz12             = _mm256_sub_ps(iz1,jz2);
2349             dx13             = _mm256_sub_ps(ix1,jx3);
2350             dy13             = _mm256_sub_ps(iy1,jy3);
2351             dz13             = _mm256_sub_ps(iz1,jz3);
2352             dx21             = _mm256_sub_ps(ix2,jx1);
2353             dy21             = _mm256_sub_ps(iy2,jy1);
2354             dz21             = _mm256_sub_ps(iz2,jz1);
2355             dx22             = _mm256_sub_ps(ix2,jx2);
2356             dy22             = _mm256_sub_ps(iy2,jy2);
2357             dz22             = _mm256_sub_ps(iz2,jz2);
2358             dx23             = _mm256_sub_ps(ix2,jx3);
2359             dy23             = _mm256_sub_ps(iy2,jy3);
2360             dz23             = _mm256_sub_ps(iz2,jz3);
2361             dx31             = _mm256_sub_ps(ix3,jx1);
2362             dy31             = _mm256_sub_ps(iy3,jy1);
2363             dz31             = _mm256_sub_ps(iz3,jz1);
2364             dx32             = _mm256_sub_ps(ix3,jx2);
2365             dy32             = _mm256_sub_ps(iy3,jy2);
2366             dz32             = _mm256_sub_ps(iz3,jz2);
2367             dx33             = _mm256_sub_ps(ix3,jx3);
2368             dy33             = _mm256_sub_ps(iy3,jy3);
2369             dz33             = _mm256_sub_ps(iz3,jz3);
2370
2371             /* Calculate squared distance and things based on it */
2372             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
2373             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2374             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2375             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
2376             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2377             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2378             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
2379             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
2380             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
2381             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
2382
2383             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
2384             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
2385             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
2386             rinv13           = gmx_mm256_invsqrt_ps(rsq13);
2387             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
2388             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
2389             rinv23           = gmx_mm256_invsqrt_ps(rsq23);
2390             rinv31           = gmx_mm256_invsqrt_ps(rsq31);
2391             rinv32           = gmx_mm256_invsqrt_ps(rsq32);
2392             rinv33           = gmx_mm256_invsqrt_ps(rsq33);
2393
2394             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
2395             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
2396             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
2397             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
2398             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
2399             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
2400             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
2401             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
2402             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
2403             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
2404
2405             fjx0             = _mm256_setzero_ps();
2406             fjy0             = _mm256_setzero_ps();
2407             fjz0             = _mm256_setzero_ps();
2408             fjx1             = _mm256_setzero_ps();
2409             fjy1             = _mm256_setzero_ps();
2410             fjz1             = _mm256_setzero_ps();
2411             fjx2             = _mm256_setzero_ps();
2412             fjy2             = _mm256_setzero_ps();
2413             fjz2             = _mm256_setzero_ps();
2414             fjx3             = _mm256_setzero_ps();
2415             fjy3             = _mm256_setzero_ps();
2416             fjz3             = _mm256_setzero_ps();
2417
2418             /**************************
2419              * CALCULATE INTERACTIONS *
2420              **************************/
2421
2422             if (gmx_mm256_any_lt(rsq00,rcutoff2))
2423             {
2424
2425             r00              = _mm256_mul_ps(rsq00,rinv00);
2426             r00              = _mm256_andnot_ps(dummy_mask,r00);
2427
2428             /* Analytical LJ-PME */
2429             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
2430             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
2431             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
2432             exponent         = gmx_simd_exp_r(ewcljrsq);
2433             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
2434             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
2435             /* f6A = 6 * C6grid * (1 - poly) */
2436             f6A              = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
2437             /* f6B = C6grid * exponent * beta^6 */
2438             f6B              = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
2439             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
2440             fvdw              = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
2441
2442             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
2443
2444             fscal            = fvdw;
2445
2446             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2447
2448             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2449
2450             /* Calculate temporary vectorial force */
2451             tx               = _mm256_mul_ps(fscal,dx00);
2452             ty               = _mm256_mul_ps(fscal,dy00);
2453             tz               = _mm256_mul_ps(fscal,dz00);
2454
2455             /* Update vectorial force */
2456             fix0             = _mm256_add_ps(fix0,tx);
2457             fiy0             = _mm256_add_ps(fiy0,ty);
2458             fiz0             = _mm256_add_ps(fiz0,tz);
2459
2460             fjx0             = _mm256_add_ps(fjx0,tx);
2461             fjy0             = _mm256_add_ps(fjy0,ty);
2462             fjz0             = _mm256_add_ps(fjz0,tz);
2463
2464             }
2465
2466             /**************************
2467              * CALCULATE INTERACTIONS *
2468              **************************/
2469
2470             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2471             {
2472
2473             r11              = _mm256_mul_ps(rsq11,rinv11);
2474             r11              = _mm256_andnot_ps(dummy_mask,r11);
2475
2476             /* EWALD ELECTROSTATICS */
2477             
2478             /* Analytical PME correction */
2479             zeta2            = _mm256_mul_ps(beta2,rsq11);
2480             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
2481             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2482             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2483             felec            = _mm256_mul_ps(qq11,felec);
2484             
2485             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2486
2487             fscal            = felec;
2488
2489             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2490
2491             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2492
2493             /* Calculate temporary vectorial force */
2494             tx               = _mm256_mul_ps(fscal,dx11);
2495             ty               = _mm256_mul_ps(fscal,dy11);
2496             tz               = _mm256_mul_ps(fscal,dz11);
2497
2498             /* Update vectorial force */
2499             fix1             = _mm256_add_ps(fix1,tx);
2500             fiy1             = _mm256_add_ps(fiy1,ty);
2501             fiz1             = _mm256_add_ps(fiz1,tz);
2502
2503             fjx1             = _mm256_add_ps(fjx1,tx);
2504             fjy1             = _mm256_add_ps(fjy1,ty);
2505             fjz1             = _mm256_add_ps(fjz1,tz);
2506
2507             }
2508
2509             /**************************
2510              * CALCULATE INTERACTIONS *
2511              **************************/
2512
2513             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2514             {
2515
2516             r12              = _mm256_mul_ps(rsq12,rinv12);
2517             r12              = _mm256_andnot_ps(dummy_mask,r12);
2518
2519             /* EWALD ELECTROSTATICS */
2520             
2521             /* Analytical PME correction */
2522             zeta2            = _mm256_mul_ps(beta2,rsq12);
2523             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
2524             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2525             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2526             felec            = _mm256_mul_ps(qq12,felec);
2527             
2528             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2529
2530             fscal            = felec;
2531
2532             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2533
2534             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2535
2536             /* Calculate temporary vectorial force */
2537             tx               = _mm256_mul_ps(fscal,dx12);
2538             ty               = _mm256_mul_ps(fscal,dy12);
2539             tz               = _mm256_mul_ps(fscal,dz12);
2540
2541             /* Update vectorial force */
2542             fix1             = _mm256_add_ps(fix1,tx);
2543             fiy1             = _mm256_add_ps(fiy1,ty);
2544             fiz1             = _mm256_add_ps(fiz1,tz);
2545
2546             fjx2             = _mm256_add_ps(fjx2,tx);
2547             fjy2             = _mm256_add_ps(fjy2,ty);
2548             fjz2             = _mm256_add_ps(fjz2,tz);
2549
2550             }
2551
2552             /**************************
2553              * CALCULATE INTERACTIONS *
2554              **************************/
2555
2556             if (gmx_mm256_any_lt(rsq13,rcutoff2))
2557             {
2558
2559             r13              = _mm256_mul_ps(rsq13,rinv13);
2560             r13              = _mm256_andnot_ps(dummy_mask,r13);
2561
2562             /* EWALD ELECTROSTATICS */
2563             
2564             /* Analytical PME correction */
2565             zeta2            = _mm256_mul_ps(beta2,rsq13);
2566             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
2567             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2568             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2569             felec            = _mm256_mul_ps(qq13,felec);
2570             
2571             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
2572
2573             fscal            = felec;
2574
2575             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2576
2577             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2578
2579             /* Calculate temporary vectorial force */
2580             tx               = _mm256_mul_ps(fscal,dx13);
2581             ty               = _mm256_mul_ps(fscal,dy13);
2582             tz               = _mm256_mul_ps(fscal,dz13);
2583
2584             /* Update vectorial force */
2585             fix1             = _mm256_add_ps(fix1,tx);
2586             fiy1             = _mm256_add_ps(fiy1,ty);
2587             fiz1             = _mm256_add_ps(fiz1,tz);
2588
2589             fjx3             = _mm256_add_ps(fjx3,tx);
2590             fjy3             = _mm256_add_ps(fjy3,ty);
2591             fjz3             = _mm256_add_ps(fjz3,tz);
2592
2593             }
2594
2595             /**************************
2596              * CALCULATE INTERACTIONS *
2597              **************************/
2598
2599             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2600             {
2601
2602             r21              = _mm256_mul_ps(rsq21,rinv21);
2603             r21              = _mm256_andnot_ps(dummy_mask,r21);
2604
2605             /* EWALD ELECTROSTATICS */
2606             
2607             /* Analytical PME correction */
2608             zeta2            = _mm256_mul_ps(beta2,rsq21);
2609             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2610             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2611             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2612             felec            = _mm256_mul_ps(qq21,felec);
2613             
2614             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2615
2616             fscal            = felec;
2617
2618             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2619
2620             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2621
2622             /* Calculate temporary vectorial force */
2623             tx               = _mm256_mul_ps(fscal,dx21);
2624             ty               = _mm256_mul_ps(fscal,dy21);
2625             tz               = _mm256_mul_ps(fscal,dz21);
2626
2627             /* Update vectorial force */
2628             fix2             = _mm256_add_ps(fix2,tx);
2629             fiy2             = _mm256_add_ps(fiy2,ty);
2630             fiz2             = _mm256_add_ps(fiz2,tz);
2631
2632             fjx1             = _mm256_add_ps(fjx1,tx);
2633             fjy1             = _mm256_add_ps(fjy1,ty);
2634             fjz1             = _mm256_add_ps(fjz1,tz);
2635
2636             }
2637
2638             /**************************
2639              * CALCULATE INTERACTIONS *
2640              **************************/
2641
2642             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2643             {
2644
2645             r22              = _mm256_mul_ps(rsq22,rinv22);
2646             r22              = _mm256_andnot_ps(dummy_mask,r22);
2647
2648             /* EWALD ELECTROSTATICS */
2649             
2650             /* Analytical PME correction */
2651             zeta2            = _mm256_mul_ps(beta2,rsq22);
2652             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2653             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2654             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2655             felec            = _mm256_mul_ps(qq22,felec);
2656             
2657             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2658
2659             fscal            = felec;
2660
2661             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2662
2663             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2664
2665             /* Calculate temporary vectorial force */
2666             tx               = _mm256_mul_ps(fscal,dx22);
2667             ty               = _mm256_mul_ps(fscal,dy22);
2668             tz               = _mm256_mul_ps(fscal,dz22);
2669
2670             /* Update vectorial force */
2671             fix2             = _mm256_add_ps(fix2,tx);
2672             fiy2             = _mm256_add_ps(fiy2,ty);
2673             fiz2             = _mm256_add_ps(fiz2,tz);
2674
2675             fjx2             = _mm256_add_ps(fjx2,tx);
2676             fjy2             = _mm256_add_ps(fjy2,ty);
2677             fjz2             = _mm256_add_ps(fjz2,tz);
2678
2679             }
2680
2681             /**************************
2682              * CALCULATE INTERACTIONS *
2683              **************************/
2684
2685             if (gmx_mm256_any_lt(rsq23,rcutoff2))
2686             {
2687
2688             r23              = _mm256_mul_ps(rsq23,rinv23);
2689             r23              = _mm256_andnot_ps(dummy_mask,r23);
2690
2691             /* EWALD ELECTROSTATICS */
2692             
2693             /* Analytical PME correction */
2694             zeta2            = _mm256_mul_ps(beta2,rsq23);
2695             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
2696             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2697             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2698             felec            = _mm256_mul_ps(qq23,felec);
2699             
2700             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
2701
2702             fscal            = felec;
2703
2704             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2705
2706             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2707
2708             /* Calculate temporary vectorial force */
2709             tx               = _mm256_mul_ps(fscal,dx23);
2710             ty               = _mm256_mul_ps(fscal,dy23);
2711             tz               = _mm256_mul_ps(fscal,dz23);
2712
2713             /* Update vectorial force */
2714             fix2             = _mm256_add_ps(fix2,tx);
2715             fiy2             = _mm256_add_ps(fiy2,ty);
2716             fiz2             = _mm256_add_ps(fiz2,tz);
2717
2718             fjx3             = _mm256_add_ps(fjx3,tx);
2719             fjy3             = _mm256_add_ps(fjy3,ty);
2720             fjz3             = _mm256_add_ps(fjz3,tz);
2721
2722             }
2723
2724             /**************************
2725              * CALCULATE INTERACTIONS *
2726              **************************/
2727
2728             if (gmx_mm256_any_lt(rsq31,rcutoff2))
2729             {
2730
2731             r31              = _mm256_mul_ps(rsq31,rinv31);
2732             r31              = _mm256_andnot_ps(dummy_mask,r31);
2733
2734             /* EWALD ELECTROSTATICS */
2735             
2736             /* Analytical PME correction */
2737             zeta2            = _mm256_mul_ps(beta2,rsq31);
2738             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
2739             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2740             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2741             felec            = _mm256_mul_ps(qq31,felec);
2742             
2743             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
2744
2745             fscal            = felec;
2746
2747             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2748
2749             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2750
2751             /* Calculate temporary vectorial force */
2752             tx               = _mm256_mul_ps(fscal,dx31);
2753             ty               = _mm256_mul_ps(fscal,dy31);
2754             tz               = _mm256_mul_ps(fscal,dz31);
2755
2756             /* Update vectorial force */
2757             fix3             = _mm256_add_ps(fix3,tx);
2758             fiy3             = _mm256_add_ps(fiy3,ty);
2759             fiz3             = _mm256_add_ps(fiz3,tz);
2760
2761             fjx1             = _mm256_add_ps(fjx1,tx);
2762             fjy1             = _mm256_add_ps(fjy1,ty);
2763             fjz1             = _mm256_add_ps(fjz1,tz);
2764
2765             }
2766
2767             /**************************
2768              * CALCULATE INTERACTIONS *
2769              **************************/
2770
2771             if (gmx_mm256_any_lt(rsq32,rcutoff2))
2772             {
2773
2774             r32              = _mm256_mul_ps(rsq32,rinv32);
2775             r32              = _mm256_andnot_ps(dummy_mask,r32);
2776
2777             /* EWALD ELECTROSTATICS */
2778             
2779             /* Analytical PME correction */
2780             zeta2            = _mm256_mul_ps(beta2,rsq32);
2781             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
2782             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2783             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2784             felec            = _mm256_mul_ps(qq32,felec);
2785             
2786             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
2787
2788             fscal            = felec;
2789
2790             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2791
2792             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2793
2794             /* Calculate temporary vectorial force */
2795             tx               = _mm256_mul_ps(fscal,dx32);
2796             ty               = _mm256_mul_ps(fscal,dy32);
2797             tz               = _mm256_mul_ps(fscal,dz32);
2798
2799             /* Update vectorial force */
2800             fix3             = _mm256_add_ps(fix3,tx);
2801             fiy3             = _mm256_add_ps(fiy3,ty);
2802             fiz3             = _mm256_add_ps(fiz3,tz);
2803
2804             fjx2             = _mm256_add_ps(fjx2,tx);
2805             fjy2             = _mm256_add_ps(fjy2,ty);
2806             fjz2             = _mm256_add_ps(fjz2,tz);
2807
2808             }
2809
2810             /**************************
2811              * CALCULATE INTERACTIONS *
2812              **************************/
2813
2814             if (gmx_mm256_any_lt(rsq33,rcutoff2))
2815             {
2816
2817             r33              = _mm256_mul_ps(rsq33,rinv33);
2818             r33              = _mm256_andnot_ps(dummy_mask,r33);
2819
2820             /* EWALD ELECTROSTATICS */
2821             
2822             /* Analytical PME correction */
2823             zeta2            = _mm256_mul_ps(beta2,rsq33);
2824             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
2825             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2826             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2827             felec            = _mm256_mul_ps(qq33,felec);
2828             
2829             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
2830
2831             fscal            = felec;
2832
2833             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2834
2835             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2836
2837             /* Calculate temporary vectorial force */
2838             tx               = _mm256_mul_ps(fscal,dx33);
2839             ty               = _mm256_mul_ps(fscal,dy33);
2840             tz               = _mm256_mul_ps(fscal,dz33);
2841
2842             /* Update vectorial force */
2843             fix3             = _mm256_add_ps(fix3,tx);
2844             fiy3             = _mm256_add_ps(fiy3,ty);
2845             fiz3             = _mm256_add_ps(fiz3,tz);
2846
2847             fjx3             = _mm256_add_ps(fjx3,tx);
2848             fjy3             = _mm256_add_ps(fjy3,ty);
2849             fjz3             = _mm256_add_ps(fjz3,tz);
2850
2851             }
2852
2853             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2854             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2855             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2856             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2857             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2858             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2859             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2860             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2861
2862             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2863                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2864                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2865
2866             /* Inner loop uses 593 flops */
2867         }
2868
2869         /* End of innermost loop */
2870
2871         gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
2872                                                  f+i_coord_offset,fshift+i_shift_offset);
2873
2874         /* Increment number of inner iterations */
2875         inneriter                  += j_index_end - j_index_start;
2876
2877         /* Outer loop uses 24 flops */
2878     }
2879
2880     /* Increment number of outer iterations */
2881     outeriter        += nri;
2882
2883     /* Update outer/inner flops */
2884
2885     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*593);
2886 }