Valgrind suppression for OS X 10.9
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #ifdef HAVE_CONFIG_H
39 #include <config.h>
40 #endif
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "types/simple.h"
46 #include "vec.h"
47 #include "nrnb.h"
48
49 #include "gromacs/simd/math_x86_avx_256_single.h"
50 #include "kernelutil_x86_avx_256_single.h"
51
52 /*
53  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_VF_avx_256_single
54  * Electrostatics interaction: Ewald
55  * VdW interaction:            LJEwald
56  * Geometry:                   Water4-Water4
57  * Calculate force/pot:        PotentialAndForce
58  */
59 void
60 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_VF_avx_256_single
61                     (t_nblist                    * gmx_restrict       nlist,
62                      rvec                        * gmx_restrict          xx,
63                      rvec                        * gmx_restrict          ff,
64                      t_forcerec                  * gmx_restrict          fr,
65                      t_mdatoms                   * gmx_restrict     mdatoms,
66                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67                      t_nrnb                      * gmx_restrict        nrnb)
68 {
69     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
70      * just 0 for non-waters.
71      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
72      * jnr indices corresponding to data put in the four positions in the SIMD register.
73      */
74     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
75     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76     int              jnrA,jnrB,jnrC,jnrD;
77     int              jnrE,jnrF,jnrG,jnrH;
78     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
79     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
80     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
81     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
82     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
83     real             rcutoff_scalar;
84     real             *shiftvec,*fshift,*x,*f;
85     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
86     real             scratch[4*DIM];
87     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
88     real *           vdwioffsetptr0;
89     real *           vdwgridioffsetptr0;
90     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
91     real *           vdwioffsetptr1;
92     real *           vdwgridioffsetptr1;
93     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
94     real *           vdwioffsetptr2;
95     real *           vdwgridioffsetptr2;
96     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
97     real *           vdwioffsetptr3;
98     real *           vdwgridioffsetptr3;
99     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
100     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
101     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
102     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
103     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
104     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
105     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
106     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
107     __m256           jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
108     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
109     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
110     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
111     __m256           dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
112     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
113     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
114     __m256           dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
115     __m256           dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
116     __m256           dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
117     __m256           dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
118     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
119     real             *charge;
120     int              nvdwtype;
121     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
122     int              *vdwtype;
123     real             *vdwparam;
124     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
125     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
126     __m256           c6grid_00;
127     __m256           c6grid_11;
128     __m256           c6grid_12;
129     __m256           c6grid_13;
130     __m256           c6grid_21;
131     __m256           c6grid_22;
132     __m256           c6grid_23;
133     __m256           c6grid_31;
134     __m256           c6grid_32;
135     __m256           c6grid_33;
136     real             *vdwgridparam;
137     __m256           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
138     __m256           one_half  = _mm256_set1_ps(0.5);
139     __m256           minus_one = _mm256_set1_ps(-1.0);
140     __m256i          ewitab;
141     __m128i          ewitab_lo,ewitab_hi;
142     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
143     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
144     real             *ewtab;
145     __m256           dummy_mask,cutoff_mask;
146     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
147     __m256           one     = _mm256_set1_ps(1.0);
148     __m256           two     = _mm256_set1_ps(2.0);
149     x                = xx[0];
150     f                = ff[0];
151
152     nri              = nlist->nri;
153     iinr             = nlist->iinr;
154     jindex           = nlist->jindex;
155     jjnr             = nlist->jjnr;
156     shiftidx         = nlist->shift;
157     gid              = nlist->gid;
158     shiftvec         = fr->shift_vec[0];
159     fshift           = fr->fshift[0];
160     facel            = _mm256_set1_ps(fr->epsfac);
161     charge           = mdatoms->chargeA;
162     nvdwtype         = fr->ntype;
163     vdwparam         = fr->nbfp;
164     vdwtype          = mdatoms->typeA;
165     vdwgridparam     = fr->ljpme_c6grid;
166     sh_lj_ewald      = _mm256_set1_ps(fr->ic->sh_lj_ewald);
167     ewclj            = _mm256_set1_ps(fr->ewaldcoeff_lj);
168     ewclj2           = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
169
170     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
171     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
172     beta2            = _mm256_mul_ps(beta,beta);
173     beta3            = _mm256_mul_ps(beta,beta2);
174
175     ewtab            = fr->ic->tabq_coul_FDV0;
176     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
177     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
178
179     /* Setup water-specific parameters */
180     inr              = nlist->iinr[0];
181     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
182     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
183     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
184     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
185     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
186
187     jq1              = _mm256_set1_ps(charge[inr+1]);
188     jq2              = _mm256_set1_ps(charge[inr+2]);
189     jq3              = _mm256_set1_ps(charge[inr+3]);
190     vdwjidx0A        = 2*vdwtype[inr+0];
191     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
192     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
193     c6grid_00        = _mm256_set1_ps(vdwgridioffsetptr0[vdwjidx0A]);
194     qq11             = _mm256_mul_ps(iq1,jq1);
195     qq12             = _mm256_mul_ps(iq1,jq2);
196     qq13             = _mm256_mul_ps(iq1,jq3);
197     qq21             = _mm256_mul_ps(iq2,jq1);
198     qq22             = _mm256_mul_ps(iq2,jq2);
199     qq23             = _mm256_mul_ps(iq2,jq3);
200     qq31             = _mm256_mul_ps(iq3,jq1);
201     qq32             = _mm256_mul_ps(iq3,jq2);
202     qq33             = _mm256_mul_ps(iq3,jq3);
203
204     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
205     rcutoff_scalar   = fr->rcoulomb;
206     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
207     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
208
209     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
210     rvdw             = _mm256_set1_ps(fr->rvdw);
211
212     /* Avoid stupid compiler warnings */
213     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
214     j_coord_offsetA = 0;
215     j_coord_offsetB = 0;
216     j_coord_offsetC = 0;
217     j_coord_offsetD = 0;
218     j_coord_offsetE = 0;
219     j_coord_offsetF = 0;
220     j_coord_offsetG = 0;
221     j_coord_offsetH = 0;
222
223     outeriter        = 0;
224     inneriter        = 0;
225
226     for(iidx=0;iidx<4*DIM;iidx++)
227     {
228         scratch[iidx] = 0.0;
229     }
230
231     /* Start outer loop over neighborlists */
232     for(iidx=0; iidx<nri; iidx++)
233     {
234         /* Load shift vector for this list */
235         i_shift_offset   = DIM*shiftidx[iidx];
236
237         /* Load limits for loop over neighbors */
238         j_index_start    = jindex[iidx];
239         j_index_end      = jindex[iidx+1];
240
241         /* Get outer coordinate index */
242         inr              = iinr[iidx];
243         i_coord_offset   = DIM*inr;
244
245         /* Load i particle coords and add shift vector */
246         gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
247                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
248
249         fix0             = _mm256_setzero_ps();
250         fiy0             = _mm256_setzero_ps();
251         fiz0             = _mm256_setzero_ps();
252         fix1             = _mm256_setzero_ps();
253         fiy1             = _mm256_setzero_ps();
254         fiz1             = _mm256_setzero_ps();
255         fix2             = _mm256_setzero_ps();
256         fiy2             = _mm256_setzero_ps();
257         fiz2             = _mm256_setzero_ps();
258         fix3             = _mm256_setzero_ps();
259         fiy3             = _mm256_setzero_ps();
260         fiz3             = _mm256_setzero_ps();
261
262         /* Reset potential sums */
263         velecsum         = _mm256_setzero_ps();
264         vvdwsum          = _mm256_setzero_ps();
265
266         /* Start inner kernel loop */
267         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
268         {
269
270             /* Get j neighbor index, and coordinate index */
271             jnrA             = jjnr[jidx];
272             jnrB             = jjnr[jidx+1];
273             jnrC             = jjnr[jidx+2];
274             jnrD             = jjnr[jidx+3];
275             jnrE             = jjnr[jidx+4];
276             jnrF             = jjnr[jidx+5];
277             jnrG             = jjnr[jidx+6];
278             jnrH             = jjnr[jidx+7];
279             j_coord_offsetA  = DIM*jnrA;
280             j_coord_offsetB  = DIM*jnrB;
281             j_coord_offsetC  = DIM*jnrC;
282             j_coord_offsetD  = DIM*jnrD;
283             j_coord_offsetE  = DIM*jnrE;
284             j_coord_offsetF  = DIM*jnrF;
285             j_coord_offsetG  = DIM*jnrG;
286             j_coord_offsetH  = DIM*jnrH;
287
288             /* load j atom coordinates */
289             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
290                                                  x+j_coord_offsetC,x+j_coord_offsetD,
291                                                  x+j_coord_offsetE,x+j_coord_offsetF,
292                                                  x+j_coord_offsetG,x+j_coord_offsetH,
293                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
294                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
295
296             /* Calculate displacement vector */
297             dx00             = _mm256_sub_ps(ix0,jx0);
298             dy00             = _mm256_sub_ps(iy0,jy0);
299             dz00             = _mm256_sub_ps(iz0,jz0);
300             dx11             = _mm256_sub_ps(ix1,jx1);
301             dy11             = _mm256_sub_ps(iy1,jy1);
302             dz11             = _mm256_sub_ps(iz1,jz1);
303             dx12             = _mm256_sub_ps(ix1,jx2);
304             dy12             = _mm256_sub_ps(iy1,jy2);
305             dz12             = _mm256_sub_ps(iz1,jz2);
306             dx13             = _mm256_sub_ps(ix1,jx3);
307             dy13             = _mm256_sub_ps(iy1,jy3);
308             dz13             = _mm256_sub_ps(iz1,jz3);
309             dx21             = _mm256_sub_ps(ix2,jx1);
310             dy21             = _mm256_sub_ps(iy2,jy1);
311             dz21             = _mm256_sub_ps(iz2,jz1);
312             dx22             = _mm256_sub_ps(ix2,jx2);
313             dy22             = _mm256_sub_ps(iy2,jy2);
314             dz22             = _mm256_sub_ps(iz2,jz2);
315             dx23             = _mm256_sub_ps(ix2,jx3);
316             dy23             = _mm256_sub_ps(iy2,jy3);
317             dz23             = _mm256_sub_ps(iz2,jz3);
318             dx31             = _mm256_sub_ps(ix3,jx1);
319             dy31             = _mm256_sub_ps(iy3,jy1);
320             dz31             = _mm256_sub_ps(iz3,jz1);
321             dx32             = _mm256_sub_ps(ix3,jx2);
322             dy32             = _mm256_sub_ps(iy3,jy2);
323             dz32             = _mm256_sub_ps(iz3,jz2);
324             dx33             = _mm256_sub_ps(ix3,jx3);
325             dy33             = _mm256_sub_ps(iy3,jy3);
326             dz33             = _mm256_sub_ps(iz3,jz3);
327
328             /* Calculate squared distance and things based on it */
329             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
330             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
331             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
332             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
333             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
334             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
335             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
336             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
337             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
338             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
339
340             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
341             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
342             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
343             rinv13           = gmx_mm256_invsqrt_ps(rsq13);
344             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
345             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
346             rinv23           = gmx_mm256_invsqrt_ps(rsq23);
347             rinv31           = gmx_mm256_invsqrt_ps(rsq31);
348             rinv32           = gmx_mm256_invsqrt_ps(rsq32);
349             rinv33           = gmx_mm256_invsqrt_ps(rsq33);
350
351             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
352             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
353             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
354             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
355             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
356             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
357             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
358             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
359             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
360             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
361
362             fjx0             = _mm256_setzero_ps();
363             fjy0             = _mm256_setzero_ps();
364             fjz0             = _mm256_setzero_ps();
365             fjx1             = _mm256_setzero_ps();
366             fjy1             = _mm256_setzero_ps();
367             fjz1             = _mm256_setzero_ps();
368             fjx2             = _mm256_setzero_ps();
369             fjy2             = _mm256_setzero_ps();
370             fjz2             = _mm256_setzero_ps();
371             fjx3             = _mm256_setzero_ps();
372             fjy3             = _mm256_setzero_ps();
373             fjz3             = _mm256_setzero_ps();
374
375             /**************************
376              * CALCULATE INTERACTIONS *
377              **************************/
378
379             if (gmx_mm256_any_lt(rsq00,rcutoff2))
380             {
381
382             r00              = _mm256_mul_ps(rsq00,rinv00);
383
384             /* Analytical LJ-PME */
385             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
386             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
387             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
388             exponent         = gmx_simd_exp_r(ewcljrsq);
389             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
390             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
391             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
392             vvdw6            = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
393             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
394             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
395                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
396             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
397             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
398
399             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
400
401             /* Update potential sum for this i atom from the interaction with this j atom. */
402             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
403             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
404
405             fscal            = fvdw;
406
407             fscal            = _mm256_and_ps(fscal,cutoff_mask);
408
409             /* Calculate temporary vectorial force */
410             tx               = _mm256_mul_ps(fscal,dx00);
411             ty               = _mm256_mul_ps(fscal,dy00);
412             tz               = _mm256_mul_ps(fscal,dz00);
413
414             /* Update vectorial force */
415             fix0             = _mm256_add_ps(fix0,tx);
416             fiy0             = _mm256_add_ps(fiy0,ty);
417             fiz0             = _mm256_add_ps(fiz0,tz);
418
419             fjx0             = _mm256_add_ps(fjx0,tx);
420             fjy0             = _mm256_add_ps(fjy0,ty);
421             fjz0             = _mm256_add_ps(fjz0,tz);
422
423             }
424
425             /**************************
426              * CALCULATE INTERACTIONS *
427              **************************/
428
429             if (gmx_mm256_any_lt(rsq11,rcutoff2))
430             {
431
432             r11              = _mm256_mul_ps(rsq11,rinv11);
433
434             /* EWALD ELECTROSTATICS */
435             
436             /* Analytical PME correction */
437             zeta2            = _mm256_mul_ps(beta2,rsq11);
438             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
439             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
440             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
441             felec            = _mm256_mul_ps(qq11,felec);
442             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
443             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
444             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
445             velec            = _mm256_mul_ps(qq11,velec);
446             
447             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
448
449             /* Update potential sum for this i atom from the interaction with this j atom. */
450             velec            = _mm256_and_ps(velec,cutoff_mask);
451             velecsum         = _mm256_add_ps(velecsum,velec);
452
453             fscal            = felec;
454
455             fscal            = _mm256_and_ps(fscal,cutoff_mask);
456
457             /* Calculate temporary vectorial force */
458             tx               = _mm256_mul_ps(fscal,dx11);
459             ty               = _mm256_mul_ps(fscal,dy11);
460             tz               = _mm256_mul_ps(fscal,dz11);
461
462             /* Update vectorial force */
463             fix1             = _mm256_add_ps(fix1,tx);
464             fiy1             = _mm256_add_ps(fiy1,ty);
465             fiz1             = _mm256_add_ps(fiz1,tz);
466
467             fjx1             = _mm256_add_ps(fjx1,tx);
468             fjy1             = _mm256_add_ps(fjy1,ty);
469             fjz1             = _mm256_add_ps(fjz1,tz);
470
471             }
472
473             /**************************
474              * CALCULATE INTERACTIONS *
475              **************************/
476
477             if (gmx_mm256_any_lt(rsq12,rcutoff2))
478             {
479
480             r12              = _mm256_mul_ps(rsq12,rinv12);
481
482             /* EWALD ELECTROSTATICS */
483             
484             /* Analytical PME correction */
485             zeta2            = _mm256_mul_ps(beta2,rsq12);
486             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
487             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
488             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
489             felec            = _mm256_mul_ps(qq12,felec);
490             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
491             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
492             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
493             velec            = _mm256_mul_ps(qq12,velec);
494             
495             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
496
497             /* Update potential sum for this i atom from the interaction with this j atom. */
498             velec            = _mm256_and_ps(velec,cutoff_mask);
499             velecsum         = _mm256_add_ps(velecsum,velec);
500
501             fscal            = felec;
502
503             fscal            = _mm256_and_ps(fscal,cutoff_mask);
504
505             /* Calculate temporary vectorial force */
506             tx               = _mm256_mul_ps(fscal,dx12);
507             ty               = _mm256_mul_ps(fscal,dy12);
508             tz               = _mm256_mul_ps(fscal,dz12);
509
510             /* Update vectorial force */
511             fix1             = _mm256_add_ps(fix1,tx);
512             fiy1             = _mm256_add_ps(fiy1,ty);
513             fiz1             = _mm256_add_ps(fiz1,tz);
514
515             fjx2             = _mm256_add_ps(fjx2,tx);
516             fjy2             = _mm256_add_ps(fjy2,ty);
517             fjz2             = _mm256_add_ps(fjz2,tz);
518
519             }
520
521             /**************************
522              * CALCULATE INTERACTIONS *
523              **************************/
524
525             if (gmx_mm256_any_lt(rsq13,rcutoff2))
526             {
527
528             r13              = _mm256_mul_ps(rsq13,rinv13);
529
530             /* EWALD ELECTROSTATICS */
531             
532             /* Analytical PME correction */
533             zeta2            = _mm256_mul_ps(beta2,rsq13);
534             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
535             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
536             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
537             felec            = _mm256_mul_ps(qq13,felec);
538             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
539             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
540             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv13,sh_ewald),pmecorrV);
541             velec            = _mm256_mul_ps(qq13,velec);
542             
543             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
544
545             /* Update potential sum for this i atom from the interaction with this j atom. */
546             velec            = _mm256_and_ps(velec,cutoff_mask);
547             velecsum         = _mm256_add_ps(velecsum,velec);
548
549             fscal            = felec;
550
551             fscal            = _mm256_and_ps(fscal,cutoff_mask);
552
553             /* Calculate temporary vectorial force */
554             tx               = _mm256_mul_ps(fscal,dx13);
555             ty               = _mm256_mul_ps(fscal,dy13);
556             tz               = _mm256_mul_ps(fscal,dz13);
557
558             /* Update vectorial force */
559             fix1             = _mm256_add_ps(fix1,tx);
560             fiy1             = _mm256_add_ps(fiy1,ty);
561             fiz1             = _mm256_add_ps(fiz1,tz);
562
563             fjx3             = _mm256_add_ps(fjx3,tx);
564             fjy3             = _mm256_add_ps(fjy3,ty);
565             fjz3             = _mm256_add_ps(fjz3,tz);
566
567             }
568
569             /**************************
570              * CALCULATE INTERACTIONS *
571              **************************/
572
573             if (gmx_mm256_any_lt(rsq21,rcutoff2))
574             {
575
576             r21              = _mm256_mul_ps(rsq21,rinv21);
577
578             /* EWALD ELECTROSTATICS */
579             
580             /* Analytical PME correction */
581             zeta2            = _mm256_mul_ps(beta2,rsq21);
582             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
583             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
584             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
585             felec            = _mm256_mul_ps(qq21,felec);
586             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
587             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
588             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
589             velec            = _mm256_mul_ps(qq21,velec);
590             
591             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
592
593             /* Update potential sum for this i atom from the interaction with this j atom. */
594             velec            = _mm256_and_ps(velec,cutoff_mask);
595             velecsum         = _mm256_add_ps(velecsum,velec);
596
597             fscal            = felec;
598
599             fscal            = _mm256_and_ps(fscal,cutoff_mask);
600
601             /* Calculate temporary vectorial force */
602             tx               = _mm256_mul_ps(fscal,dx21);
603             ty               = _mm256_mul_ps(fscal,dy21);
604             tz               = _mm256_mul_ps(fscal,dz21);
605
606             /* Update vectorial force */
607             fix2             = _mm256_add_ps(fix2,tx);
608             fiy2             = _mm256_add_ps(fiy2,ty);
609             fiz2             = _mm256_add_ps(fiz2,tz);
610
611             fjx1             = _mm256_add_ps(fjx1,tx);
612             fjy1             = _mm256_add_ps(fjy1,ty);
613             fjz1             = _mm256_add_ps(fjz1,tz);
614
615             }
616
617             /**************************
618              * CALCULATE INTERACTIONS *
619              **************************/
620
621             if (gmx_mm256_any_lt(rsq22,rcutoff2))
622             {
623
624             r22              = _mm256_mul_ps(rsq22,rinv22);
625
626             /* EWALD ELECTROSTATICS */
627             
628             /* Analytical PME correction */
629             zeta2            = _mm256_mul_ps(beta2,rsq22);
630             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
631             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
632             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
633             felec            = _mm256_mul_ps(qq22,felec);
634             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
635             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
636             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
637             velec            = _mm256_mul_ps(qq22,velec);
638             
639             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
640
641             /* Update potential sum for this i atom from the interaction with this j atom. */
642             velec            = _mm256_and_ps(velec,cutoff_mask);
643             velecsum         = _mm256_add_ps(velecsum,velec);
644
645             fscal            = felec;
646
647             fscal            = _mm256_and_ps(fscal,cutoff_mask);
648
649             /* Calculate temporary vectorial force */
650             tx               = _mm256_mul_ps(fscal,dx22);
651             ty               = _mm256_mul_ps(fscal,dy22);
652             tz               = _mm256_mul_ps(fscal,dz22);
653
654             /* Update vectorial force */
655             fix2             = _mm256_add_ps(fix2,tx);
656             fiy2             = _mm256_add_ps(fiy2,ty);
657             fiz2             = _mm256_add_ps(fiz2,tz);
658
659             fjx2             = _mm256_add_ps(fjx2,tx);
660             fjy2             = _mm256_add_ps(fjy2,ty);
661             fjz2             = _mm256_add_ps(fjz2,tz);
662
663             }
664
665             /**************************
666              * CALCULATE INTERACTIONS *
667              **************************/
668
669             if (gmx_mm256_any_lt(rsq23,rcutoff2))
670             {
671
672             r23              = _mm256_mul_ps(rsq23,rinv23);
673
674             /* EWALD ELECTROSTATICS */
675             
676             /* Analytical PME correction */
677             zeta2            = _mm256_mul_ps(beta2,rsq23);
678             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
679             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
680             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
681             felec            = _mm256_mul_ps(qq23,felec);
682             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
683             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
684             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv23,sh_ewald),pmecorrV);
685             velec            = _mm256_mul_ps(qq23,velec);
686             
687             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
688
689             /* Update potential sum for this i atom from the interaction with this j atom. */
690             velec            = _mm256_and_ps(velec,cutoff_mask);
691             velecsum         = _mm256_add_ps(velecsum,velec);
692
693             fscal            = felec;
694
695             fscal            = _mm256_and_ps(fscal,cutoff_mask);
696
697             /* Calculate temporary vectorial force */
698             tx               = _mm256_mul_ps(fscal,dx23);
699             ty               = _mm256_mul_ps(fscal,dy23);
700             tz               = _mm256_mul_ps(fscal,dz23);
701
702             /* Update vectorial force */
703             fix2             = _mm256_add_ps(fix2,tx);
704             fiy2             = _mm256_add_ps(fiy2,ty);
705             fiz2             = _mm256_add_ps(fiz2,tz);
706
707             fjx3             = _mm256_add_ps(fjx3,tx);
708             fjy3             = _mm256_add_ps(fjy3,ty);
709             fjz3             = _mm256_add_ps(fjz3,tz);
710
711             }
712
713             /**************************
714              * CALCULATE INTERACTIONS *
715              **************************/
716
717             if (gmx_mm256_any_lt(rsq31,rcutoff2))
718             {
719
720             r31              = _mm256_mul_ps(rsq31,rinv31);
721
722             /* EWALD ELECTROSTATICS */
723             
724             /* Analytical PME correction */
725             zeta2            = _mm256_mul_ps(beta2,rsq31);
726             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
727             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
728             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
729             felec            = _mm256_mul_ps(qq31,felec);
730             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
731             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
732             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv31,sh_ewald),pmecorrV);
733             velec            = _mm256_mul_ps(qq31,velec);
734             
735             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
736
737             /* Update potential sum for this i atom from the interaction with this j atom. */
738             velec            = _mm256_and_ps(velec,cutoff_mask);
739             velecsum         = _mm256_add_ps(velecsum,velec);
740
741             fscal            = felec;
742
743             fscal            = _mm256_and_ps(fscal,cutoff_mask);
744
745             /* Calculate temporary vectorial force */
746             tx               = _mm256_mul_ps(fscal,dx31);
747             ty               = _mm256_mul_ps(fscal,dy31);
748             tz               = _mm256_mul_ps(fscal,dz31);
749
750             /* Update vectorial force */
751             fix3             = _mm256_add_ps(fix3,tx);
752             fiy3             = _mm256_add_ps(fiy3,ty);
753             fiz3             = _mm256_add_ps(fiz3,tz);
754
755             fjx1             = _mm256_add_ps(fjx1,tx);
756             fjy1             = _mm256_add_ps(fjy1,ty);
757             fjz1             = _mm256_add_ps(fjz1,tz);
758
759             }
760
761             /**************************
762              * CALCULATE INTERACTIONS *
763              **************************/
764
765             if (gmx_mm256_any_lt(rsq32,rcutoff2))
766             {
767
768             r32              = _mm256_mul_ps(rsq32,rinv32);
769
770             /* EWALD ELECTROSTATICS */
771             
772             /* Analytical PME correction */
773             zeta2            = _mm256_mul_ps(beta2,rsq32);
774             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
775             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
776             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
777             felec            = _mm256_mul_ps(qq32,felec);
778             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
779             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
780             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv32,sh_ewald),pmecorrV);
781             velec            = _mm256_mul_ps(qq32,velec);
782             
783             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
784
785             /* Update potential sum for this i atom from the interaction with this j atom. */
786             velec            = _mm256_and_ps(velec,cutoff_mask);
787             velecsum         = _mm256_add_ps(velecsum,velec);
788
789             fscal            = felec;
790
791             fscal            = _mm256_and_ps(fscal,cutoff_mask);
792
793             /* Calculate temporary vectorial force */
794             tx               = _mm256_mul_ps(fscal,dx32);
795             ty               = _mm256_mul_ps(fscal,dy32);
796             tz               = _mm256_mul_ps(fscal,dz32);
797
798             /* Update vectorial force */
799             fix3             = _mm256_add_ps(fix3,tx);
800             fiy3             = _mm256_add_ps(fiy3,ty);
801             fiz3             = _mm256_add_ps(fiz3,tz);
802
803             fjx2             = _mm256_add_ps(fjx2,tx);
804             fjy2             = _mm256_add_ps(fjy2,ty);
805             fjz2             = _mm256_add_ps(fjz2,tz);
806
807             }
808
809             /**************************
810              * CALCULATE INTERACTIONS *
811              **************************/
812
813             if (gmx_mm256_any_lt(rsq33,rcutoff2))
814             {
815
816             r33              = _mm256_mul_ps(rsq33,rinv33);
817
818             /* EWALD ELECTROSTATICS */
819             
820             /* Analytical PME correction */
821             zeta2            = _mm256_mul_ps(beta2,rsq33);
822             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
823             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
824             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
825             felec            = _mm256_mul_ps(qq33,felec);
826             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
827             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
828             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv33,sh_ewald),pmecorrV);
829             velec            = _mm256_mul_ps(qq33,velec);
830             
831             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
832
833             /* Update potential sum for this i atom from the interaction with this j atom. */
834             velec            = _mm256_and_ps(velec,cutoff_mask);
835             velecsum         = _mm256_add_ps(velecsum,velec);
836
837             fscal            = felec;
838
839             fscal            = _mm256_and_ps(fscal,cutoff_mask);
840
841             /* Calculate temporary vectorial force */
842             tx               = _mm256_mul_ps(fscal,dx33);
843             ty               = _mm256_mul_ps(fscal,dy33);
844             tz               = _mm256_mul_ps(fscal,dz33);
845
846             /* Update vectorial force */
847             fix3             = _mm256_add_ps(fix3,tx);
848             fiy3             = _mm256_add_ps(fiy3,ty);
849             fiz3             = _mm256_add_ps(fiz3,tz);
850
851             fjx3             = _mm256_add_ps(fjx3,tx);
852             fjy3             = _mm256_add_ps(fjy3,ty);
853             fjz3             = _mm256_add_ps(fjz3,tz);
854
855             }
856
857             fjptrA             = f+j_coord_offsetA;
858             fjptrB             = f+j_coord_offsetB;
859             fjptrC             = f+j_coord_offsetC;
860             fjptrD             = f+j_coord_offsetD;
861             fjptrE             = f+j_coord_offsetE;
862             fjptrF             = f+j_coord_offsetF;
863             fjptrG             = f+j_coord_offsetG;
864             fjptrH             = f+j_coord_offsetH;
865
866             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
867                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
868                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
869
870             /* Inner loop uses 1046 flops */
871         }
872
873         if(jidx<j_index_end)
874         {
875
876             /* Get j neighbor index, and coordinate index */
877             jnrlistA         = jjnr[jidx];
878             jnrlistB         = jjnr[jidx+1];
879             jnrlistC         = jjnr[jidx+2];
880             jnrlistD         = jjnr[jidx+3];
881             jnrlistE         = jjnr[jidx+4];
882             jnrlistF         = jjnr[jidx+5];
883             jnrlistG         = jjnr[jidx+6];
884             jnrlistH         = jjnr[jidx+7];
885             /* Sign of each element will be negative for non-real atoms.
886              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
887              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
888              */
889             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
890                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
891                                             
892             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
893             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
894             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
895             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
896             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
897             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
898             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
899             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
900             j_coord_offsetA  = DIM*jnrA;
901             j_coord_offsetB  = DIM*jnrB;
902             j_coord_offsetC  = DIM*jnrC;
903             j_coord_offsetD  = DIM*jnrD;
904             j_coord_offsetE  = DIM*jnrE;
905             j_coord_offsetF  = DIM*jnrF;
906             j_coord_offsetG  = DIM*jnrG;
907             j_coord_offsetH  = DIM*jnrH;
908
909             /* load j atom coordinates */
910             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
911                                                  x+j_coord_offsetC,x+j_coord_offsetD,
912                                                  x+j_coord_offsetE,x+j_coord_offsetF,
913                                                  x+j_coord_offsetG,x+j_coord_offsetH,
914                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
915                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
916
917             /* Calculate displacement vector */
918             dx00             = _mm256_sub_ps(ix0,jx0);
919             dy00             = _mm256_sub_ps(iy0,jy0);
920             dz00             = _mm256_sub_ps(iz0,jz0);
921             dx11             = _mm256_sub_ps(ix1,jx1);
922             dy11             = _mm256_sub_ps(iy1,jy1);
923             dz11             = _mm256_sub_ps(iz1,jz1);
924             dx12             = _mm256_sub_ps(ix1,jx2);
925             dy12             = _mm256_sub_ps(iy1,jy2);
926             dz12             = _mm256_sub_ps(iz1,jz2);
927             dx13             = _mm256_sub_ps(ix1,jx3);
928             dy13             = _mm256_sub_ps(iy1,jy3);
929             dz13             = _mm256_sub_ps(iz1,jz3);
930             dx21             = _mm256_sub_ps(ix2,jx1);
931             dy21             = _mm256_sub_ps(iy2,jy1);
932             dz21             = _mm256_sub_ps(iz2,jz1);
933             dx22             = _mm256_sub_ps(ix2,jx2);
934             dy22             = _mm256_sub_ps(iy2,jy2);
935             dz22             = _mm256_sub_ps(iz2,jz2);
936             dx23             = _mm256_sub_ps(ix2,jx3);
937             dy23             = _mm256_sub_ps(iy2,jy3);
938             dz23             = _mm256_sub_ps(iz2,jz3);
939             dx31             = _mm256_sub_ps(ix3,jx1);
940             dy31             = _mm256_sub_ps(iy3,jy1);
941             dz31             = _mm256_sub_ps(iz3,jz1);
942             dx32             = _mm256_sub_ps(ix3,jx2);
943             dy32             = _mm256_sub_ps(iy3,jy2);
944             dz32             = _mm256_sub_ps(iz3,jz2);
945             dx33             = _mm256_sub_ps(ix3,jx3);
946             dy33             = _mm256_sub_ps(iy3,jy3);
947             dz33             = _mm256_sub_ps(iz3,jz3);
948
949             /* Calculate squared distance and things based on it */
950             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
951             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
952             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
953             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
954             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
955             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
956             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
957             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
958             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
959             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
960
961             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
962             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
963             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
964             rinv13           = gmx_mm256_invsqrt_ps(rsq13);
965             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
966             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
967             rinv23           = gmx_mm256_invsqrt_ps(rsq23);
968             rinv31           = gmx_mm256_invsqrt_ps(rsq31);
969             rinv32           = gmx_mm256_invsqrt_ps(rsq32);
970             rinv33           = gmx_mm256_invsqrt_ps(rsq33);
971
972             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
973             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
974             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
975             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
976             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
977             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
978             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
979             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
980             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
981             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
982
983             fjx0             = _mm256_setzero_ps();
984             fjy0             = _mm256_setzero_ps();
985             fjz0             = _mm256_setzero_ps();
986             fjx1             = _mm256_setzero_ps();
987             fjy1             = _mm256_setzero_ps();
988             fjz1             = _mm256_setzero_ps();
989             fjx2             = _mm256_setzero_ps();
990             fjy2             = _mm256_setzero_ps();
991             fjz2             = _mm256_setzero_ps();
992             fjx3             = _mm256_setzero_ps();
993             fjy3             = _mm256_setzero_ps();
994             fjz3             = _mm256_setzero_ps();
995
996             /**************************
997              * CALCULATE INTERACTIONS *
998              **************************/
999
1000             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1001             {
1002
1003             r00              = _mm256_mul_ps(rsq00,rinv00);
1004             r00              = _mm256_andnot_ps(dummy_mask,r00);
1005
1006             /* Analytical LJ-PME */
1007             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1008             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
1009             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1010             exponent         = gmx_simd_exp_r(ewcljrsq);
1011             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1012             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1013             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
1014             vvdw6            = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
1015             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
1016             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
1017                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
1018             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
1019             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
1020
1021             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1022
1023             /* Update potential sum for this i atom from the interaction with this j atom. */
1024             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
1025             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
1026             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
1027
1028             fscal            = fvdw;
1029
1030             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1031
1032             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1033
1034             /* Calculate temporary vectorial force */
1035             tx               = _mm256_mul_ps(fscal,dx00);
1036             ty               = _mm256_mul_ps(fscal,dy00);
1037             tz               = _mm256_mul_ps(fscal,dz00);
1038
1039             /* Update vectorial force */
1040             fix0             = _mm256_add_ps(fix0,tx);
1041             fiy0             = _mm256_add_ps(fiy0,ty);
1042             fiz0             = _mm256_add_ps(fiz0,tz);
1043
1044             fjx0             = _mm256_add_ps(fjx0,tx);
1045             fjy0             = _mm256_add_ps(fjy0,ty);
1046             fjz0             = _mm256_add_ps(fjz0,tz);
1047
1048             }
1049
1050             /**************************
1051              * CALCULATE INTERACTIONS *
1052              **************************/
1053
1054             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1055             {
1056
1057             r11              = _mm256_mul_ps(rsq11,rinv11);
1058             r11              = _mm256_andnot_ps(dummy_mask,r11);
1059
1060             /* EWALD ELECTROSTATICS */
1061             
1062             /* Analytical PME correction */
1063             zeta2            = _mm256_mul_ps(beta2,rsq11);
1064             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1065             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1066             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1067             felec            = _mm256_mul_ps(qq11,felec);
1068             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1069             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1070             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
1071             velec            = _mm256_mul_ps(qq11,velec);
1072             
1073             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1074
1075             /* Update potential sum for this i atom from the interaction with this j atom. */
1076             velec            = _mm256_and_ps(velec,cutoff_mask);
1077             velec            = _mm256_andnot_ps(dummy_mask,velec);
1078             velecsum         = _mm256_add_ps(velecsum,velec);
1079
1080             fscal            = felec;
1081
1082             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1083
1084             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1085
1086             /* Calculate temporary vectorial force */
1087             tx               = _mm256_mul_ps(fscal,dx11);
1088             ty               = _mm256_mul_ps(fscal,dy11);
1089             tz               = _mm256_mul_ps(fscal,dz11);
1090
1091             /* Update vectorial force */
1092             fix1             = _mm256_add_ps(fix1,tx);
1093             fiy1             = _mm256_add_ps(fiy1,ty);
1094             fiz1             = _mm256_add_ps(fiz1,tz);
1095
1096             fjx1             = _mm256_add_ps(fjx1,tx);
1097             fjy1             = _mm256_add_ps(fjy1,ty);
1098             fjz1             = _mm256_add_ps(fjz1,tz);
1099
1100             }
1101
1102             /**************************
1103              * CALCULATE INTERACTIONS *
1104              **************************/
1105
1106             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1107             {
1108
1109             r12              = _mm256_mul_ps(rsq12,rinv12);
1110             r12              = _mm256_andnot_ps(dummy_mask,r12);
1111
1112             /* EWALD ELECTROSTATICS */
1113             
1114             /* Analytical PME correction */
1115             zeta2            = _mm256_mul_ps(beta2,rsq12);
1116             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1117             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1118             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1119             felec            = _mm256_mul_ps(qq12,felec);
1120             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1121             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1122             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
1123             velec            = _mm256_mul_ps(qq12,velec);
1124             
1125             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1126
1127             /* Update potential sum for this i atom from the interaction with this j atom. */
1128             velec            = _mm256_and_ps(velec,cutoff_mask);
1129             velec            = _mm256_andnot_ps(dummy_mask,velec);
1130             velecsum         = _mm256_add_ps(velecsum,velec);
1131
1132             fscal            = felec;
1133
1134             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1135
1136             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1137
1138             /* Calculate temporary vectorial force */
1139             tx               = _mm256_mul_ps(fscal,dx12);
1140             ty               = _mm256_mul_ps(fscal,dy12);
1141             tz               = _mm256_mul_ps(fscal,dz12);
1142
1143             /* Update vectorial force */
1144             fix1             = _mm256_add_ps(fix1,tx);
1145             fiy1             = _mm256_add_ps(fiy1,ty);
1146             fiz1             = _mm256_add_ps(fiz1,tz);
1147
1148             fjx2             = _mm256_add_ps(fjx2,tx);
1149             fjy2             = _mm256_add_ps(fjy2,ty);
1150             fjz2             = _mm256_add_ps(fjz2,tz);
1151
1152             }
1153
1154             /**************************
1155              * CALCULATE INTERACTIONS *
1156              **************************/
1157
1158             if (gmx_mm256_any_lt(rsq13,rcutoff2))
1159             {
1160
1161             r13              = _mm256_mul_ps(rsq13,rinv13);
1162             r13              = _mm256_andnot_ps(dummy_mask,r13);
1163
1164             /* EWALD ELECTROSTATICS */
1165             
1166             /* Analytical PME correction */
1167             zeta2            = _mm256_mul_ps(beta2,rsq13);
1168             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
1169             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1170             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1171             felec            = _mm256_mul_ps(qq13,felec);
1172             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1173             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1174             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv13,sh_ewald),pmecorrV);
1175             velec            = _mm256_mul_ps(qq13,velec);
1176             
1177             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
1178
1179             /* Update potential sum for this i atom from the interaction with this j atom. */
1180             velec            = _mm256_and_ps(velec,cutoff_mask);
1181             velec            = _mm256_andnot_ps(dummy_mask,velec);
1182             velecsum         = _mm256_add_ps(velecsum,velec);
1183
1184             fscal            = felec;
1185
1186             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1187
1188             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1189
1190             /* Calculate temporary vectorial force */
1191             tx               = _mm256_mul_ps(fscal,dx13);
1192             ty               = _mm256_mul_ps(fscal,dy13);
1193             tz               = _mm256_mul_ps(fscal,dz13);
1194
1195             /* Update vectorial force */
1196             fix1             = _mm256_add_ps(fix1,tx);
1197             fiy1             = _mm256_add_ps(fiy1,ty);
1198             fiz1             = _mm256_add_ps(fiz1,tz);
1199
1200             fjx3             = _mm256_add_ps(fjx3,tx);
1201             fjy3             = _mm256_add_ps(fjy3,ty);
1202             fjz3             = _mm256_add_ps(fjz3,tz);
1203
1204             }
1205
1206             /**************************
1207              * CALCULATE INTERACTIONS *
1208              **************************/
1209
1210             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1211             {
1212
1213             r21              = _mm256_mul_ps(rsq21,rinv21);
1214             r21              = _mm256_andnot_ps(dummy_mask,r21);
1215
1216             /* EWALD ELECTROSTATICS */
1217             
1218             /* Analytical PME correction */
1219             zeta2            = _mm256_mul_ps(beta2,rsq21);
1220             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1221             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1222             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1223             felec            = _mm256_mul_ps(qq21,felec);
1224             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1225             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1226             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
1227             velec            = _mm256_mul_ps(qq21,velec);
1228             
1229             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1230
1231             /* Update potential sum for this i atom from the interaction with this j atom. */
1232             velec            = _mm256_and_ps(velec,cutoff_mask);
1233             velec            = _mm256_andnot_ps(dummy_mask,velec);
1234             velecsum         = _mm256_add_ps(velecsum,velec);
1235
1236             fscal            = felec;
1237
1238             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1239
1240             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1241
1242             /* Calculate temporary vectorial force */
1243             tx               = _mm256_mul_ps(fscal,dx21);
1244             ty               = _mm256_mul_ps(fscal,dy21);
1245             tz               = _mm256_mul_ps(fscal,dz21);
1246
1247             /* Update vectorial force */
1248             fix2             = _mm256_add_ps(fix2,tx);
1249             fiy2             = _mm256_add_ps(fiy2,ty);
1250             fiz2             = _mm256_add_ps(fiz2,tz);
1251
1252             fjx1             = _mm256_add_ps(fjx1,tx);
1253             fjy1             = _mm256_add_ps(fjy1,ty);
1254             fjz1             = _mm256_add_ps(fjz1,tz);
1255
1256             }
1257
1258             /**************************
1259              * CALCULATE INTERACTIONS *
1260              **************************/
1261
1262             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1263             {
1264
1265             r22              = _mm256_mul_ps(rsq22,rinv22);
1266             r22              = _mm256_andnot_ps(dummy_mask,r22);
1267
1268             /* EWALD ELECTROSTATICS */
1269             
1270             /* Analytical PME correction */
1271             zeta2            = _mm256_mul_ps(beta2,rsq22);
1272             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1273             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1274             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1275             felec            = _mm256_mul_ps(qq22,felec);
1276             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1277             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1278             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
1279             velec            = _mm256_mul_ps(qq22,velec);
1280             
1281             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1282
1283             /* Update potential sum for this i atom from the interaction with this j atom. */
1284             velec            = _mm256_and_ps(velec,cutoff_mask);
1285             velec            = _mm256_andnot_ps(dummy_mask,velec);
1286             velecsum         = _mm256_add_ps(velecsum,velec);
1287
1288             fscal            = felec;
1289
1290             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1291
1292             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1293
1294             /* Calculate temporary vectorial force */
1295             tx               = _mm256_mul_ps(fscal,dx22);
1296             ty               = _mm256_mul_ps(fscal,dy22);
1297             tz               = _mm256_mul_ps(fscal,dz22);
1298
1299             /* Update vectorial force */
1300             fix2             = _mm256_add_ps(fix2,tx);
1301             fiy2             = _mm256_add_ps(fiy2,ty);
1302             fiz2             = _mm256_add_ps(fiz2,tz);
1303
1304             fjx2             = _mm256_add_ps(fjx2,tx);
1305             fjy2             = _mm256_add_ps(fjy2,ty);
1306             fjz2             = _mm256_add_ps(fjz2,tz);
1307
1308             }
1309
1310             /**************************
1311              * CALCULATE INTERACTIONS *
1312              **************************/
1313
1314             if (gmx_mm256_any_lt(rsq23,rcutoff2))
1315             {
1316
1317             r23              = _mm256_mul_ps(rsq23,rinv23);
1318             r23              = _mm256_andnot_ps(dummy_mask,r23);
1319
1320             /* EWALD ELECTROSTATICS */
1321             
1322             /* Analytical PME correction */
1323             zeta2            = _mm256_mul_ps(beta2,rsq23);
1324             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
1325             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1326             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1327             felec            = _mm256_mul_ps(qq23,felec);
1328             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1329             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1330             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv23,sh_ewald),pmecorrV);
1331             velec            = _mm256_mul_ps(qq23,velec);
1332             
1333             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
1334
1335             /* Update potential sum for this i atom from the interaction with this j atom. */
1336             velec            = _mm256_and_ps(velec,cutoff_mask);
1337             velec            = _mm256_andnot_ps(dummy_mask,velec);
1338             velecsum         = _mm256_add_ps(velecsum,velec);
1339
1340             fscal            = felec;
1341
1342             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1343
1344             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1345
1346             /* Calculate temporary vectorial force */
1347             tx               = _mm256_mul_ps(fscal,dx23);
1348             ty               = _mm256_mul_ps(fscal,dy23);
1349             tz               = _mm256_mul_ps(fscal,dz23);
1350
1351             /* Update vectorial force */
1352             fix2             = _mm256_add_ps(fix2,tx);
1353             fiy2             = _mm256_add_ps(fiy2,ty);
1354             fiz2             = _mm256_add_ps(fiz2,tz);
1355
1356             fjx3             = _mm256_add_ps(fjx3,tx);
1357             fjy3             = _mm256_add_ps(fjy3,ty);
1358             fjz3             = _mm256_add_ps(fjz3,tz);
1359
1360             }
1361
1362             /**************************
1363              * CALCULATE INTERACTIONS *
1364              **************************/
1365
1366             if (gmx_mm256_any_lt(rsq31,rcutoff2))
1367             {
1368
1369             r31              = _mm256_mul_ps(rsq31,rinv31);
1370             r31              = _mm256_andnot_ps(dummy_mask,r31);
1371
1372             /* EWALD ELECTROSTATICS */
1373             
1374             /* Analytical PME correction */
1375             zeta2            = _mm256_mul_ps(beta2,rsq31);
1376             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
1377             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1378             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1379             felec            = _mm256_mul_ps(qq31,felec);
1380             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1381             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1382             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv31,sh_ewald),pmecorrV);
1383             velec            = _mm256_mul_ps(qq31,velec);
1384             
1385             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
1386
1387             /* Update potential sum for this i atom from the interaction with this j atom. */
1388             velec            = _mm256_and_ps(velec,cutoff_mask);
1389             velec            = _mm256_andnot_ps(dummy_mask,velec);
1390             velecsum         = _mm256_add_ps(velecsum,velec);
1391
1392             fscal            = felec;
1393
1394             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1395
1396             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1397
1398             /* Calculate temporary vectorial force */
1399             tx               = _mm256_mul_ps(fscal,dx31);
1400             ty               = _mm256_mul_ps(fscal,dy31);
1401             tz               = _mm256_mul_ps(fscal,dz31);
1402
1403             /* Update vectorial force */
1404             fix3             = _mm256_add_ps(fix3,tx);
1405             fiy3             = _mm256_add_ps(fiy3,ty);
1406             fiz3             = _mm256_add_ps(fiz3,tz);
1407
1408             fjx1             = _mm256_add_ps(fjx1,tx);
1409             fjy1             = _mm256_add_ps(fjy1,ty);
1410             fjz1             = _mm256_add_ps(fjz1,tz);
1411
1412             }
1413
1414             /**************************
1415              * CALCULATE INTERACTIONS *
1416              **************************/
1417
1418             if (gmx_mm256_any_lt(rsq32,rcutoff2))
1419             {
1420
1421             r32              = _mm256_mul_ps(rsq32,rinv32);
1422             r32              = _mm256_andnot_ps(dummy_mask,r32);
1423
1424             /* EWALD ELECTROSTATICS */
1425             
1426             /* Analytical PME correction */
1427             zeta2            = _mm256_mul_ps(beta2,rsq32);
1428             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
1429             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1430             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1431             felec            = _mm256_mul_ps(qq32,felec);
1432             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1433             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1434             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv32,sh_ewald),pmecorrV);
1435             velec            = _mm256_mul_ps(qq32,velec);
1436             
1437             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
1438
1439             /* Update potential sum for this i atom from the interaction with this j atom. */
1440             velec            = _mm256_and_ps(velec,cutoff_mask);
1441             velec            = _mm256_andnot_ps(dummy_mask,velec);
1442             velecsum         = _mm256_add_ps(velecsum,velec);
1443
1444             fscal            = felec;
1445
1446             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1447
1448             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1449
1450             /* Calculate temporary vectorial force */
1451             tx               = _mm256_mul_ps(fscal,dx32);
1452             ty               = _mm256_mul_ps(fscal,dy32);
1453             tz               = _mm256_mul_ps(fscal,dz32);
1454
1455             /* Update vectorial force */
1456             fix3             = _mm256_add_ps(fix3,tx);
1457             fiy3             = _mm256_add_ps(fiy3,ty);
1458             fiz3             = _mm256_add_ps(fiz3,tz);
1459
1460             fjx2             = _mm256_add_ps(fjx2,tx);
1461             fjy2             = _mm256_add_ps(fjy2,ty);
1462             fjz2             = _mm256_add_ps(fjz2,tz);
1463
1464             }
1465
1466             /**************************
1467              * CALCULATE INTERACTIONS *
1468              **************************/
1469
1470             if (gmx_mm256_any_lt(rsq33,rcutoff2))
1471             {
1472
1473             r33              = _mm256_mul_ps(rsq33,rinv33);
1474             r33              = _mm256_andnot_ps(dummy_mask,r33);
1475
1476             /* EWALD ELECTROSTATICS */
1477             
1478             /* Analytical PME correction */
1479             zeta2            = _mm256_mul_ps(beta2,rsq33);
1480             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
1481             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1482             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1483             felec            = _mm256_mul_ps(qq33,felec);
1484             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1485             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1486             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv33,sh_ewald),pmecorrV);
1487             velec            = _mm256_mul_ps(qq33,velec);
1488             
1489             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
1490
1491             /* Update potential sum for this i atom from the interaction with this j atom. */
1492             velec            = _mm256_and_ps(velec,cutoff_mask);
1493             velec            = _mm256_andnot_ps(dummy_mask,velec);
1494             velecsum         = _mm256_add_ps(velecsum,velec);
1495
1496             fscal            = felec;
1497
1498             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1499
1500             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1501
1502             /* Calculate temporary vectorial force */
1503             tx               = _mm256_mul_ps(fscal,dx33);
1504             ty               = _mm256_mul_ps(fscal,dy33);
1505             tz               = _mm256_mul_ps(fscal,dz33);
1506
1507             /* Update vectorial force */
1508             fix3             = _mm256_add_ps(fix3,tx);
1509             fiy3             = _mm256_add_ps(fiy3,ty);
1510             fiz3             = _mm256_add_ps(fiz3,tz);
1511
1512             fjx3             = _mm256_add_ps(fjx3,tx);
1513             fjy3             = _mm256_add_ps(fjy3,ty);
1514             fjz3             = _mm256_add_ps(fjz3,tz);
1515
1516             }
1517
1518             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1519             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1520             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1521             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1522             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1523             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1524             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1525             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1526
1527             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1528                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
1529                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1530
1531             /* Inner loop uses 1056 flops */
1532         }
1533
1534         /* End of innermost loop */
1535
1536         gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1537                                                  f+i_coord_offset,fshift+i_shift_offset);
1538
1539         ggid                        = gid[iidx];
1540         /* Update potential energies */
1541         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1542         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1543
1544         /* Increment number of inner iterations */
1545         inneriter                  += j_index_end - j_index_start;
1546
1547         /* Outer loop uses 26 flops */
1548     }
1549
1550     /* Increment number of outer iterations */
1551     outeriter        += nri;
1552
1553     /* Update outer/inner flops */
1554
1555     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*1056);
1556 }
1557 /*
1558  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_F_avx_256_single
1559  * Electrostatics interaction: Ewald
1560  * VdW interaction:            LJEwald
1561  * Geometry:                   Water4-Water4
1562  * Calculate force/pot:        Force
1563  */
1564 void
1565 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4W4_F_avx_256_single
1566                     (t_nblist                    * gmx_restrict       nlist,
1567                      rvec                        * gmx_restrict          xx,
1568                      rvec                        * gmx_restrict          ff,
1569                      t_forcerec                  * gmx_restrict          fr,
1570                      t_mdatoms                   * gmx_restrict     mdatoms,
1571                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1572                      t_nrnb                      * gmx_restrict        nrnb)
1573 {
1574     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1575      * just 0 for non-waters.
1576      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1577      * jnr indices corresponding to data put in the four positions in the SIMD register.
1578      */
1579     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1580     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1581     int              jnrA,jnrB,jnrC,jnrD;
1582     int              jnrE,jnrF,jnrG,jnrH;
1583     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1584     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1585     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1586     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1587     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1588     real             rcutoff_scalar;
1589     real             *shiftvec,*fshift,*x,*f;
1590     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1591     real             scratch[4*DIM];
1592     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1593     real *           vdwioffsetptr0;
1594     real *           vdwgridioffsetptr0;
1595     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1596     real *           vdwioffsetptr1;
1597     real *           vdwgridioffsetptr1;
1598     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1599     real *           vdwioffsetptr2;
1600     real *           vdwgridioffsetptr2;
1601     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1602     real *           vdwioffsetptr3;
1603     real *           vdwgridioffsetptr3;
1604     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1605     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1606     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1607     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1608     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1609     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1610     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1611     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
1612     __m256           jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
1613     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1614     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1615     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1616     __m256           dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
1617     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1618     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1619     __m256           dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
1620     __m256           dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
1621     __m256           dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
1622     __m256           dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
1623     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
1624     real             *charge;
1625     int              nvdwtype;
1626     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1627     int              *vdwtype;
1628     real             *vdwparam;
1629     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
1630     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
1631     __m256           c6grid_00;
1632     __m256           c6grid_11;
1633     __m256           c6grid_12;
1634     __m256           c6grid_13;
1635     __m256           c6grid_21;
1636     __m256           c6grid_22;
1637     __m256           c6grid_23;
1638     __m256           c6grid_31;
1639     __m256           c6grid_32;
1640     __m256           c6grid_33;
1641     real             *vdwgridparam;
1642     __m256           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
1643     __m256           one_half  = _mm256_set1_ps(0.5);
1644     __m256           minus_one = _mm256_set1_ps(-1.0);
1645     __m256i          ewitab;
1646     __m128i          ewitab_lo,ewitab_hi;
1647     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1648     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1649     real             *ewtab;
1650     __m256           dummy_mask,cutoff_mask;
1651     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1652     __m256           one     = _mm256_set1_ps(1.0);
1653     __m256           two     = _mm256_set1_ps(2.0);
1654     x                = xx[0];
1655     f                = ff[0];
1656
1657     nri              = nlist->nri;
1658     iinr             = nlist->iinr;
1659     jindex           = nlist->jindex;
1660     jjnr             = nlist->jjnr;
1661     shiftidx         = nlist->shift;
1662     gid              = nlist->gid;
1663     shiftvec         = fr->shift_vec[0];
1664     fshift           = fr->fshift[0];
1665     facel            = _mm256_set1_ps(fr->epsfac);
1666     charge           = mdatoms->chargeA;
1667     nvdwtype         = fr->ntype;
1668     vdwparam         = fr->nbfp;
1669     vdwtype          = mdatoms->typeA;
1670     vdwgridparam     = fr->ljpme_c6grid;
1671     sh_lj_ewald      = _mm256_set1_ps(fr->ic->sh_lj_ewald);
1672     ewclj            = _mm256_set1_ps(fr->ewaldcoeff_lj);
1673     ewclj2           = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
1674
1675     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
1676     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1677     beta2            = _mm256_mul_ps(beta,beta);
1678     beta3            = _mm256_mul_ps(beta,beta2);
1679
1680     ewtab            = fr->ic->tabq_coul_F;
1681     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
1682     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1683
1684     /* Setup water-specific parameters */
1685     inr              = nlist->iinr[0];
1686     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1687     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1688     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
1689     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1690     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
1691
1692     jq1              = _mm256_set1_ps(charge[inr+1]);
1693     jq2              = _mm256_set1_ps(charge[inr+2]);
1694     jq3              = _mm256_set1_ps(charge[inr+3]);
1695     vdwjidx0A        = 2*vdwtype[inr+0];
1696     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1697     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1698     c6grid_00        = _mm256_set1_ps(vdwgridioffsetptr0[vdwjidx0A]);
1699     qq11             = _mm256_mul_ps(iq1,jq1);
1700     qq12             = _mm256_mul_ps(iq1,jq2);
1701     qq13             = _mm256_mul_ps(iq1,jq3);
1702     qq21             = _mm256_mul_ps(iq2,jq1);
1703     qq22             = _mm256_mul_ps(iq2,jq2);
1704     qq23             = _mm256_mul_ps(iq2,jq3);
1705     qq31             = _mm256_mul_ps(iq3,jq1);
1706     qq32             = _mm256_mul_ps(iq3,jq2);
1707     qq33             = _mm256_mul_ps(iq3,jq3);
1708
1709     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1710     rcutoff_scalar   = fr->rcoulomb;
1711     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
1712     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
1713
1714     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
1715     rvdw             = _mm256_set1_ps(fr->rvdw);
1716
1717     /* Avoid stupid compiler warnings */
1718     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1719     j_coord_offsetA = 0;
1720     j_coord_offsetB = 0;
1721     j_coord_offsetC = 0;
1722     j_coord_offsetD = 0;
1723     j_coord_offsetE = 0;
1724     j_coord_offsetF = 0;
1725     j_coord_offsetG = 0;
1726     j_coord_offsetH = 0;
1727
1728     outeriter        = 0;
1729     inneriter        = 0;
1730
1731     for(iidx=0;iidx<4*DIM;iidx++)
1732     {
1733         scratch[iidx] = 0.0;
1734     }
1735
1736     /* Start outer loop over neighborlists */
1737     for(iidx=0; iidx<nri; iidx++)
1738     {
1739         /* Load shift vector for this list */
1740         i_shift_offset   = DIM*shiftidx[iidx];
1741
1742         /* Load limits for loop over neighbors */
1743         j_index_start    = jindex[iidx];
1744         j_index_end      = jindex[iidx+1];
1745
1746         /* Get outer coordinate index */
1747         inr              = iinr[iidx];
1748         i_coord_offset   = DIM*inr;
1749
1750         /* Load i particle coords and add shift vector */
1751         gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1752                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1753
1754         fix0             = _mm256_setzero_ps();
1755         fiy0             = _mm256_setzero_ps();
1756         fiz0             = _mm256_setzero_ps();
1757         fix1             = _mm256_setzero_ps();
1758         fiy1             = _mm256_setzero_ps();
1759         fiz1             = _mm256_setzero_ps();
1760         fix2             = _mm256_setzero_ps();
1761         fiy2             = _mm256_setzero_ps();
1762         fiz2             = _mm256_setzero_ps();
1763         fix3             = _mm256_setzero_ps();
1764         fiy3             = _mm256_setzero_ps();
1765         fiz3             = _mm256_setzero_ps();
1766
1767         /* Start inner kernel loop */
1768         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1769         {
1770
1771             /* Get j neighbor index, and coordinate index */
1772             jnrA             = jjnr[jidx];
1773             jnrB             = jjnr[jidx+1];
1774             jnrC             = jjnr[jidx+2];
1775             jnrD             = jjnr[jidx+3];
1776             jnrE             = jjnr[jidx+4];
1777             jnrF             = jjnr[jidx+5];
1778             jnrG             = jjnr[jidx+6];
1779             jnrH             = jjnr[jidx+7];
1780             j_coord_offsetA  = DIM*jnrA;
1781             j_coord_offsetB  = DIM*jnrB;
1782             j_coord_offsetC  = DIM*jnrC;
1783             j_coord_offsetD  = DIM*jnrD;
1784             j_coord_offsetE  = DIM*jnrE;
1785             j_coord_offsetF  = DIM*jnrF;
1786             j_coord_offsetG  = DIM*jnrG;
1787             j_coord_offsetH  = DIM*jnrH;
1788
1789             /* load j atom coordinates */
1790             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1791                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1792                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1793                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1794                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1795                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
1796
1797             /* Calculate displacement vector */
1798             dx00             = _mm256_sub_ps(ix0,jx0);
1799             dy00             = _mm256_sub_ps(iy0,jy0);
1800             dz00             = _mm256_sub_ps(iz0,jz0);
1801             dx11             = _mm256_sub_ps(ix1,jx1);
1802             dy11             = _mm256_sub_ps(iy1,jy1);
1803             dz11             = _mm256_sub_ps(iz1,jz1);
1804             dx12             = _mm256_sub_ps(ix1,jx2);
1805             dy12             = _mm256_sub_ps(iy1,jy2);
1806             dz12             = _mm256_sub_ps(iz1,jz2);
1807             dx13             = _mm256_sub_ps(ix1,jx3);
1808             dy13             = _mm256_sub_ps(iy1,jy3);
1809             dz13             = _mm256_sub_ps(iz1,jz3);
1810             dx21             = _mm256_sub_ps(ix2,jx1);
1811             dy21             = _mm256_sub_ps(iy2,jy1);
1812             dz21             = _mm256_sub_ps(iz2,jz1);
1813             dx22             = _mm256_sub_ps(ix2,jx2);
1814             dy22             = _mm256_sub_ps(iy2,jy2);
1815             dz22             = _mm256_sub_ps(iz2,jz2);
1816             dx23             = _mm256_sub_ps(ix2,jx3);
1817             dy23             = _mm256_sub_ps(iy2,jy3);
1818             dz23             = _mm256_sub_ps(iz2,jz3);
1819             dx31             = _mm256_sub_ps(ix3,jx1);
1820             dy31             = _mm256_sub_ps(iy3,jy1);
1821             dz31             = _mm256_sub_ps(iz3,jz1);
1822             dx32             = _mm256_sub_ps(ix3,jx2);
1823             dy32             = _mm256_sub_ps(iy3,jy2);
1824             dz32             = _mm256_sub_ps(iz3,jz2);
1825             dx33             = _mm256_sub_ps(ix3,jx3);
1826             dy33             = _mm256_sub_ps(iy3,jy3);
1827             dz33             = _mm256_sub_ps(iz3,jz3);
1828
1829             /* Calculate squared distance and things based on it */
1830             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1831             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1832             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1833             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
1834             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1835             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1836             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
1837             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
1838             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
1839             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
1840
1841             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1842             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
1843             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
1844             rinv13           = gmx_mm256_invsqrt_ps(rsq13);
1845             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
1846             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
1847             rinv23           = gmx_mm256_invsqrt_ps(rsq23);
1848             rinv31           = gmx_mm256_invsqrt_ps(rsq31);
1849             rinv32           = gmx_mm256_invsqrt_ps(rsq32);
1850             rinv33           = gmx_mm256_invsqrt_ps(rsq33);
1851
1852             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1853             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1854             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1855             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
1856             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1857             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1858             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
1859             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
1860             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
1861             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
1862
1863             fjx0             = _mm256_setzero_ps();
1864             fjy0             = _mm256_setzero_ps();
1865             fjz0             = _mm256_setzero_ps();
1866             fjx1             = _mm256_setzero_ps();
1867             fjy1             = _mm256_setzero_ps();
1868             fjz1             = _mm256_setzero_ps();
1869             fjx2             = _mm256_setzero_ps();
1870             fjy2             = _mm256_setzero_ps();
1871             fjz2             = _mm256_setzero_ps();
1872             fjx3             = _mm256_setzero_ps();
1873             fjy3             = _mm256_setzero_ps();
1874             fjz3             = _mm256_setzero_ps();
1875
1876             /**************************
1877              * CALCULATE INTERACTIONS *
1878              **************************/
1879
1880             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1881             {
1882
1883             r00              = _mm256_mul_ps(rsq00,rinv00);
1884
1885             /* Analytical LJ-PME */
1886             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1887             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
1888             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1889             exponent         = gmx_simd_exp_r(ewcljrsq);
1890             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1891             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1892             /* f6A = 6 * C6grid * (1 - poly) */
1893             f6A              = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
1894             /* f6B = C6grid * exponent * beta^6 */
1895             f6B              = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
1896             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1897             fvdw              = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1898
1899             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1900
1901             fscal            = fvdw;
1902
1903             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1904
1905             /* Calculate temporary vectorial force */
1906             tx               = _mm256_mul_ps(fscal,dx00);
1907             ty               = _mm256_mul_ps(fscal,dy00);
1908             tz               = _mm256_mul_ps(fscal,dz00);
1909
1910             /* Update vectorial force */
1911             fix0             = _mm256_add_ps(fix0,tx);
1912             fiy0             = _mm256_add_ps(fiy0,ty);
1913             fiz0             = _mm256_add_ps(fiz0,tz);
1914
1915             fjx0             = _mm256_add_ps(fjx0,tx);
1916             fjy0             = _mm256_add_ps(fjy0,ty);
1917             fjz0             = _mm256_add_ps(fjz0,tz);
1918
1919             }
1920
1921             /**************************
1922              * CALCULATE INTERACTIONS *
1923              **************************/
1924
1925             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1926             {
1927
1928             r11              = _mm256_mul_ps(rsq11,rinv11);
1929
1930             /* EWALD ELECTROSTATICS */
1931             
1932             /* Analytical PME correction */
1933             zeta2            = _mm256_mul_ps(beta2,rsq11);
1934             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1935             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1936             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1937             felec            = _mm256_mul_ps(qq11,felec);
1938             
1939             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1940
1941             fscal            = felec;
1942
1943             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1944
1945             /* Calculate temporary vectorial force */
1946             tx               = _mm256_mul_ps(fscal,dx11);
1947             ty               = _mm256_mul_ps(fscal,dy11);
1948             tz               = _mm256_mul_ps(fscal,dz11);
1949
1950             /* Update vectorial force */
1951             fix1             = _mm256_add_ps(fix1,tx);
1952             fiy1             = _mm256_add_ps(fiy1,ty);
1953             fiz1             = _mm256_add_ps(fiz1,tz);
1954
1955             fjx1             = _mm256_add_ps(fjx1,tx);
1956             fjy1             = _mm256_add_ps(fjy1,ty);
1957             fjz1             = _mm256_add_ps(fjz1,tz);
1958
1959             }
1960
1961             /**************************
1962              * CALCULATE INTERACTIONS *
1963              **************************/
1964
1965             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1966             {
1967
1968             r12              = _mm256_mul_ps(rsq12,rinv12);
1969
1970             /* EWALD ELECTROSTATICS */
1971             
1972             /* Analytical PME correction */
1973             zeta2            = _mm256_mul_ps(beta2,rsq12);
1974             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1975             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1976             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1977             felec            = _mm256_mul_ps(qq12,felec);
1978             
1979             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1980
1981             fscal            = felec;
1982
1983             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1984
1985             /* Calculate temporary vectorial force */
1986             tx               = _mm256_mul_ps(fscal,dx12);
1987             ty               = _mm256_mul_ps(fscal,dy12);
1988             tz               = _mm256_mul_ps(fscal,dz12);
1989
1990             /* Update vectorial force */
1991             fix1             = _mm256_add_ps(fix1,tx);
1992             fiy1             = _mm256_add_ps(fiy1,ty);
1993             fiz1             = _mm256_add_ps(fiz1,tz);
1994
1995             fjx2             = _mm256_add_ps(fjx2,tx);
1996             fjy2             = _mm256_add_ps(fjy2,ty);
1997             fjz2             = _mm256_add_ps(fjz2,tz);
1998
1999             }
2000
2001             /**************************
2002              * CALCULATE INTERACTIONS *
2003              **************************/
2004
2005             if (gmx_mm256_any_lt(rsq13,rcutoff2))
2006             {
2007
2008             r13              = _mm256_mul_ps(rsq13,rinv13);
2009
2010             /* EWALD ELECTROSTATICS */
2011             
2012             /* Analytical PME correction */
2013             zeta2            = _mm256_mul_ps(beta2,rsq13);
2014             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
2015             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2016             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2017             felec            = _mm256_mul_ps(qq13,felec);
2018             
2019             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
2020
2021             fscal            = felec;
2022
2023             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2024
2025             /* Calculate temporary vectorial force */
2026             tx               = _mm256_mul_ps(fscal,dx13);
2027             ty               = _mm256_mul_ps(fscal,dy13);
2028             tz               = _mm256_mul_ps(fscal,dz13);
2029
2030             /* Update vectorial force */
2031             fix1             = _mm256_add_ps(fix1,tx);
2032             fiy1             = _mm256_add_ps(fiy1,ty);
2033             fiz1             = _mm256_add_ps(fiz1,tz);
2034
2035             fjx3             = _mm256_add_ps(fjx3,tx);
2036             fjy3             = _mm256_add_ps(fjy3,ty);
2037             fjz3             = _mm256_add_ps(fjz3,tz);
2038
2039             }
2040
2041             /**************************
2042              * CALCULATE INTERACTIONS *
2043              **************************/
2044
2045             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2046             {
2047
2048             r21              = _mm256_mul_ps(rsq21,rinv21);
2049
2050             /* EWALD ELECTROSTATICS */
2051             
2052             /* Analytical PME correction */
2053             zeta2            = _mm256_mul_ps(beta2,rsq21);
2054             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2055             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2056             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2057             felec            = _mm256_mul_ps(qq21,felec);
2058             
2059             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2060
2061             fscal            = felec;
2062
2063             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2064
2065             /* Calculate temporary vectorial force */
2066             tx               = _mm256_mul_ps(fscal,dx21);
2067             ty               = _mm256_mul_ps(fscal,dy21);
2068             tz               = _mm256_mul_ps(fscal,dz21);
2069
2070             /* Update vectorial force */
2071             fix2             = _mm256_add_ps(fix2,tx);
2072             fiy2             = _mm256_add_ps(fiy2,ty);
2073             fiz2             = _mm256_add_ps(fiz2,tz);
2074
2075             fjx1             = _mm256_add_ps(fjx1,tx);
2076             fjy1             = _mm256_add_ps(fjy1,ty);
2077             fjz1             = _mm256_add_ps(fjz1,tz);
2078
2079             }
2080
2081             /**************************
2082              * CALCULATE INTERACTIONS *
2083              **************************/
2084
2085             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2086             {
2087
2088             r22              = _mm256_mul_ps(rsq22,rinv22);
2089
2090             /* EWALD ELECTROSTATICS */
2091             
2092             /* Analytical PME correction */
2093             zeta2            = _mm256_mul_ps(beta2,rsq22);
2094             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2095             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2096             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2097             felec            = _mm256_mul_ps(qq22,felec);
2098             
2099             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2100
2101             fscal            = felec;
2102
2103             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2104
2105             /* Calculate temporary vectorial force */
2106             tx               = _mm256_mul_ps(fscal,dx22);
2107             ty               = _mm256_mul_ps(fscal,dy22);
2108             tz               = _mm256_mul_ps(fscal,dz22);
2109
2110             /* Update vectorial force */
2111             fix2             = _mm256_add_ps(fix2,tx);
2112             fiy2             = _mm256_add_ps(fiy2,ty);
2113             fiz2             = _mm256_add_ps(fiz2,tz);
2114
2115             fjx2             = _mm256_add_ps(fjx2,tx);
2116             fjy2             = _mm256_add_ps(fjy2,ty);
2117             fjz2             = _mm256_add_ps(fjz2,tz);
2118
2119             }
2120
2121             /**************************
2122              * CALCULATE INTERACTIONS *
2123              **************************/
2124
2125             if (gmx_mm256_any_lt(rsq23,rcutoff2))
2126             {
2127
2128             r23              = _mm256_mul_ps(rsq23,rinv23);
2129
2130             /* EWALD ELECTROSTATICS */
2131             
2132             /* Analytical PME correction */
2133             zeta2            = _mm256_mul_ps(beta2,rsq23);
2134             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
2135             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2136             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2137             felec            = _mm256_mul_ps(qq23,felec);
2138             
2139             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
2140
2141             fscal            = felec;
2142
2143             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2144
2145             /* Calculate temporary vectorial force */
2146             tx               = _mm256_mul_ps(fscal,dx23);
2147             ty               = _mm256_mul_ps(fscal,dy23);
2148             tz               = _mm256_mul_ps(fscal,dz23);
2149
2150             /* Update vectorial force */
2151             fix2             = _mm256_add_ps(fix2,tx);
2152             fiy2             = _mm256_add_ps(fiy2,ty);
2153             fiz2             = _mm256_add_ps(fiz2,tz);
2154
2155             fjx3             = _mm256_add_ps(fjx3,tx);
2156             fjy3             = _mm256_add_ps(fjy3,ty);
2157             fjz3             = _mm256_add_ps(fjz3,tz);
2158
2159             }
2160
2161             /**************************
2162              * CALCULATE INTERACTIONS *
2163              **************************/
2164
2165             if (gmx_mm256_any_lt(rsq31,rcutoff2))
2166             {
2167
2168             r31              = _mm256_mul_ps(rsq31,rinv31);
2169
2170             /* EWALD ELECTROSTATICS */
2171             
2172             /* Analytical PME correction */
2173             zeta2            = _mm256_mul_ps(beta2,rsq31);
2174             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
2175             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2176             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2177             felec            = _mm256_mul_ps(qq31,felec);
2178             
2179             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
2180
2181             fscal            = felec;
2182
2183             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2184
2185             /* Calculate temporary vectorial force */
2186             tx               = _mm256_mul_ps(fscal,dx31);
2187             ty               = _mm256_mul_ps(fscal,dy31);
2188             tz               = _mm256_mul_ps(fscal,dz31);
2189
2190             /* Update vectorial force */
2191             fix3             = _mm256_add_ps(fix3,tx);
2192             fiy3             = _mm256_add_ps(fiy3,ty);
2193             fiz3             = _mm256_add_ps(fiz3,tz);
2194
2195             fjx1             = _mm256_add_ps(fjx1,tx);
2196             fjy1             = _mm256_add_ps(fjy1,ty);
2197             fjz1             = _mm256_add_ps(fjz1,tz);
2198
2199             }
2200
2201             /**************************
2202              * CALCULATE INTERACTIONS *
2203              **************************/
2204
2205             if (gmx_mm256_any_lt(rsq32,rcutoff2))
2206             {
2207
2208             r32              = _mm256_mul_ps(rsq32,rinv32);
2209
2210             /* EWALD ELECTROSTATICS */
2211             
2212             /* Analytical PME correction */
2213             zeta2            = _mm256_mul_ps(beta2,rsq32);
2214             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
2215             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2216             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2217             felec            = _mm256_mul_ps(qq32,felec);
2218             
2219             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
2220
2221             fscal            = felec;
2222
2223             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2224
2225             /* Calculate temporary vectorial force */
2226             tx               = _mm256_mul_ps(fscal,dx32);
2227             ty               = _mm256_mul_ps(fscal,dy32);
2228             tz               = _mm256_mul_ps(fscal,dz32);
2229
2230             /* Update vectorial force */
2231             fix3             = _mm256_add_ps(fix3,tx);
2232             fiy3             = _mm256_add_ps(fiy3,ty);
2233             fiz3             = _mm256_add_ps(fiz3,tz);
2234
2235             fjx2             = _mm256_add_ps(fjx2,tx);
2236             fjy2             = _mm256_add_ps(fjy2,ty);
2237             fjz2             = _mm256_add_ps(fjz2,tz);
2238
2239             }
2240
2241             /**************************
2242              * CALCULATE INTERACTIONS *
2243              **************************/
2244
2245             if (gmx_mm256_any_lt(rsq33,rcutoff2))
2246             {
2247
2248             r33              = _mm256_mul_ps(rsq33,rinv33);
2249
2250             /* EWALD ELECTROSTATICS */
2251             
2252             /* Analytical PME correction */
2253             zeta2            = _mm256_mul_ps(beta2,rsq33);
2254             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
2255             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2256             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2257             felec            = _mm256_mul_ps(qq33,felec);
2258             
2259             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
2260
2261             fscal            = felec;
2262
2263             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2264
2265             /* Calculate temporary vectorial force */
2266             tx               = _mm256_mul_ps(fscal,dx33);
2267             ty               = _mm256_mul_ps(fscal,dy33);
2268             tz               = _mm256_mul_ps(fscal,dz33);
2269
2270             /* Update vectorial force */
2271             fix3             = _mm256_add_ps(fix3,tx);
2272             fiy3             = _mm256_add_ps(fiy3,ty);
2273             fiz3             = _mm256_add_ps(fiz3,tz);
2274
2275             fjx3             = _mm256_add_ps(fjx3,tx);
2276             fjy3             = _mm256_add_ps(fjy3,ty);
2277             fjz3             = _mm256_add_ps(fjz3,tz);
2278
2279             }
2280
2281             fjptrA             = f+j_coord_offsetA;
2282             fjptrB             = f+j_coord_offsetB;
2283             fjptrC             = f+j_coord_offsetC;
2284             fjptrD             = f+j_coord_offsetD;
2285             fjptrE             = f+j_coord_offsetE;
2286             fjptrF             = f+j_coord_offsetF;
2287             fjptrG             = f+j_coord_offsetG;
2288             fjptrH             = f+j_coord_offsetH;
2289
2290             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2291                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2292                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2293
2294             /* Inner loop uses 583 flops */
2295         }
2296
2297         if(jidx<j_index_end)
2298         {
2299
2300             /* Get j neighbor index, and coordinate index */
2301             jnrlistA         = jjnr[jidx];
2302             jnrlistB         = jjnr[jidx+1];
2303             jnrlistC         = jjnr[jidx+2];
2304             jnrlistD         = jjnr[jidx+3];
2305             jnrlistE         = jjnr[jidx+4];
2306             jnrlistF         = jjnr[jidx+5];
2307             jnrlistG         = jjnr[jidx+6];
2308             jnrlistH         = jjnr[jidx+7];
2309             /* Sign of each element will be negative for non-real atoms.
2310              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2311              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
2312              */
2313             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
2314                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
2315                                             
2316             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
2317             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
2318             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
2319             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
2320             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
2321             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
2322             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
2323             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
2324             j_coord_offsetA  = DIM*jnrA;
2325             j_coord_offsetB  = DIM*jnrB;
2326             j_coord_offsetC  = DIM*jnrC;
2327             j_coord_offsetD  = DIM*jnrD;
2328             j_coord_offsetE  = DIM*jnrE;
2329             j_coord_offsetF  = DIM*jnrF;
2330             j_coord_offsetG  = DIM*jnrG;
2331             j_coord_offsetH  = DIM*jnrH;
2332
2333             /* load j atom coordinates */
2334             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
2335                                                  x+j_coord_offsetC,x+j_coord_offsetD,
2336                                                  x+j_coord_offsetE,x+j_coord_offsetF,
2337                                                  x+j_coord_offsetG,x+j_coord_offsetH,
2338                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
2339                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
2340
2341             /* Calculate displacement vector */
2342             dx00             = _mm256_sub_ps(ix0,jx0);
2343             dy00             = _mm256_sub_ps(iy0,jy0);
2344             dz00             = _mm256_sub_ps(iz0,jz0);
2345             dx11             = _mm256_sub_ps(ix1,jx1);
2346             dy11             = _mm256_sub_ps(iy1,jy1);
2347             dz11             = _mm256_sub_ps(iz1,jz1);
2348             dx12             = _mm256_sub_ps(ix1,jx2);
2349             dy12             = _mm256_sub_ps(iy1,jy2);
2350             dz12             = _mm256_sub_ps(iz1,jz2);
2351             dx13             = _mm256_sub_ps(ix1,jx3);
2352             dy13             = _mm256_sub_ps(iy1,jy3);
2353             dz13             = _mm256_sub_ps(iz1,jz3);
2354             dx21             = _mm256_sub_ps(ix2,jx1);
2355             dy21             = _mm256_sub_ps(iy2,jy1);
2356             dz21             = _mm256_sub_ps(iz2,jz1);
2357             dx22             = _mm256_sub_ps(ix2,jx2);
2358             dy22             = _mm256_sub_ps(iy2,jy2);
2359             dz22             = _mm256_sub_ps(iz2,jz2);
2360             dx23             = _mm256_sub_ps(ix2,jx3);
2361             dy23             = _mm256_sub_ps(iy2,jy3);
2362             dz23             = _mm256_sub_ps(iz2,jz3);
2363             dx31             = _mm256_sub_ps(ix3,jx1);
2364             dy31             = _mm256_sub_ps(iy3,jy1);
2365             dz31             = _mm256_sub_ps(iz3,jz1);
2366             dx32             = _mm256_sub_ps(ix3,jx2);
2367             dy32             = _mm256_sub_ps(iy3,jy2);
2368             dz32             = _mm256_sub_ps(iz3,jz2);
2369             dx33             = _mm256_sub_ps(ix3,jx3);
2370             dy33             = _mm256_sub_ps(iy3,jy3);
2371             dz33             = _mm256_sub_ps(iz3,jz3);
2372
2373             /* Calculate squared distance and things based on it */
2374             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
2375             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2376             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2377             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
2378             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2379             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2380             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
2381             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
2382             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
2383             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
2384
2385             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
2386             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
2387             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
2388             rinv13           = gmx_mm256_invsqrt_ps(rsq13);
2389             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
2390             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
2391             rinv23           = gmx_mm256_invsqrt_ps(rsq23);
2392             rinv31           = gmx_mm256_invsqrt_ps(rsq31);
2393             rinv32           = gmx_mm256_invsqrt_ps(rsq32);
2394             rinv33           = gmx_mm256_invsqrt_ps(rsq33);
2395
2396             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
2397             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
2398             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
2399             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
2400             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
2401             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
2402             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
2403             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
2404             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
2405             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
2406
2407             fjx0             = _mm256_setzero_ps();
2408             fjy0             = _mm256_setzero_ps();
2409             fjz0             = _mm256_setzero_ps();
2410             fjx1             = _mm256_setzero_ps();
2411             fjy1             = _mm256_setzero_ps();
2412             fjz1             = _mm256_setzero_ps();
2413             fjx2             = _mm256_setzero_ps();
2414             fjy2             = _mm256_setzero_ps();
2415             fjz2             = _mm256_setzero_ps();
2416             fjx3             = _mm256_setzero_ps();
2417             fjy3             = _mm256_setzero_ps();
2418             fjz3             = _mm256_setzero_ps();
2419
2420             /**************************
2421              * CALCULATE INTERACTIONS *
2422              **************************/
2423
2424             if (gmx_mm256_any_lt(rsq00,rcutoff2))
2425             {
2426
2427             r00              = _mm256_mul_ps(rsq00,rinv00);
2428             r00              = _mm256_andnot_ps(dummy_mask,r00);
2429
2430             /* Analytical LJ-PME */
2431             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
2432             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
2433             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
2434             exponent         = gmx_simd_exp_r(ewcljrsq);
2435             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
2436             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
2437             /* f6A = 6 * C6grid * (1 - poly) */
2438             f6A              = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
2439             /* f6B = C6grid * exponent * beta^6 */
2440             f6B              = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
2441             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
2442             fvdw              = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
2443
2444             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
2445
2446             fscal            = fvdw;
2447
2448             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2449
2450             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2451
2452             /* Calculate temporary vectorial force */
2453             tx               = _mm256_mul_ps(fscal,dx00);
2454             ty               = _mm256_mul_ps(fscal,dy00);
2455             tz               = _mm256_mul_ps(fscal,dz00);
2456
2457             /* Update vectorial force */
2458             fix0             = _mm256_add_ps(fix0,tx);
2459             fiy0             = _mm256_add_ps(fiy0,ty);
2460             fiz0             = _mm256_add_ps(fiz0,tz);
2461
2462             fjx0             = _mm256_add_ps(fjx0,tx);
2463             fjy0             = _mm256_add_ps(fjy0,ty);
2464             fjz0             = _mm256_add_ps(fjz0,tz);
2465
2466             }
2467
2468             /**************************
2469              * CALCULATE INTERACTIONS *
2470              **************************/
2471
2472             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2473             {
2474
2475             r11              = _mm256_mul_ps(rsq11,rinv11);
2476             r11              = _mm256_andnot_ps(dummy_mask,r11);
2477
2478             /* EWALD ELECTROSTATICS */
2479             
2480             /* Analytical PME correction */
2481             zeta2            = _mm256_mul_ps(beta2,rsq11);
2482             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
2483             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2484             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2485             felec            = _mm256_mul_ps(qq11,felec);
2486             
2487             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2488
2489             fscal            = felec;
2490
2491             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2492
2493             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2494
2495             /* Calculate temporary vectorial force */
2496             tx               = _mm256_mul_ps(fscal,dx11);
2497             ty               = _mm256_mul_ps(fscal,dy11);
2498             tz               = _mm256_mul_ps(fscal,dz11);
2499
2500             /* Update vectorial force */
2501             fix1             = _mm256_add_ps(fix1,tx);
2502             fiy1             = _mm256_add_ps(fiy1,ty);
2503             fiz1             = _mm256_add_ps(fiz1,tz);
2504
2505             fjx1             = _mm256_add_ps(fjx1,tx);
2506             fjy1             = _mm256_add_ps(fjy1,ty);
2507             fjz1             = _mm256_add_ps(fjz1,tz);
2508
2509             }
2510
2511             /**************************
2512              * CALCULATE INTERACTIONS *
2513              **************************/
2514
2515             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2516             {
2517
2518             r12              = _mm256_mul_ps(rsq12,rinv12);
2519             r12              = _mm256_andnot_ps(dummy_mask,r12);
2520
2521             /* EWALD ELECTROSTATICS */
2522             
2523             /* Analytical PME correction */
2524             zeta2            = _mm256_mul_ps(beta2,rsq12);
2525             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
2526             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2527             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2528             felec            = _mm256_mul_ps(qq12,felec);
2529             
2530             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2531
2532             fscal            = felec;
2533
2534             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2535
2536             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2537
2538             /* Calculate temporary vectorial force */
2539             tx               = _mm256_mul_ps(fscal,dx12);
2540             ty               = _mm256_mul_ps(fscal,dy12);
2541             tz               = _mm256_mul_ps(fscal,dz12);
2542
2543             /* Update vectorial force */
2544             fix1             = _mm256_add_ps(fix1,tx);
2545             fiy1             = _mm256_add_ps(fiy1,ty);
2546             fiz1             = _mm256_add_ps(fiz1,tz);
2547
2548             fjx2             = _mm256_add_ps(fjx2,tx);
2549             fjy2             = _mm256_add_ps(fjy2,ty);
2550             fjz2             = _mm256_add_ps(fjz2,tz);
2551
2552             }
2553
2554             /**************************
2555              * CALCULATE INTERACTIONS *
2556              **************************/
2557
2558             if (gmx_mm256_any_lt(rsq13,rcutoff2))
2559             {
2560
2561             r13              = _mm256_mul_ps(rsq13,rinv13);
2562             r13              = _mm256_andnot_ps(dummy_mask,r13);
2563
2564             /* EWALD ELECTROSTATICS */
2565             
2566             /* Analytical PME correction */
2567             zeta2            = _mm256_mul_ps(beta2,rsq13);
2568             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
2569             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2570             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2571             felec            = _mm256_mul_ps(qq13,felec);
2572             
2573             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
2574
2575             fscal            = felec;
2576
2577             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2578
2579             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2580
2581             /* Calculate temporary vectorial force */
2582             tx               = _mm256_mul_ps(fscal,dx13);
2583             ty               = _mm256_mul_ps(fscal,dy13);
2584             tz               = _mm256_mul_ps(fscal,dz13);
2585
2586             /* Update vectorial force */
2587             fix1             = _mm256_add_ps(fix1,tx);
2588             fiy1             = _mm256_add_ps(fiy1,ty);
2589             fiz1             = _mm256_add_ps(fiz1,tz);
2590
2591             fjx3             = _mm256_add_ps(fjx3,tx);
2592             fjy3             = _mm256_add_ps(fjy3,ty);
2593             fjz3             = _mm256_add_ps(fjz3,tz);
2594
2595             }
2596
2597             /**************************
2598              * CALCULATE INTERACTIONS *
2599              **************************/
2600
2601             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2602             {
2603
2604             r21              = _mm256_mul_ps(rsq21,rinv21);
2605             r21              = _mm256_andnot_ps(dummy_mask,r21);
2606
2607             /* EWALD ELECTROSTATICS */
2608             
2609             /* Analytical PME correction */
2610             zeta2            = _mm256_mul_ps(beta2,rsq21);
2611             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2612             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2613             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2614             felec            = _mm256_mul_ps(qq21,felec);
2615             
2616             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2617
2618             fscal            = felec;
2619
2620             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2621
2622             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2623
2624             /* Calculate temporary vectorial force */
2625             tx               = _mm256_mul_ps(fscal,dx21);
2626             ty               = _mm256_mul_ps(fscal,dy21);
2627             tz               = _mm256_mul_ps(fscal,dz21);
2628
2629             /* Update vectorial force */
2630             fix2             = _mm256_add_ps(fix2,tx);
2631             fiy2             = _mm256_add_ps(fiy2,ty);
2632             fiz2             = _mm256_add_ps(fiz2,tz);
2633
2634             fjx1             = _mm256_add_ps(fjx1,tx);
2635             fjy1             = _mm256_add_ps(fjy1,ty);
2636             fjz1             = _mm256_add_ps(fjz1,tz);
2637
2638             }
2639
2640             /**************************
2641              * CALCULATE INTERACTIONS *
2642              **************************/
2643
2644             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2645             {
2646
2647             r22              = _mm256_mul_ps(rsq22,rinv22);
2648             r22              = _mm256_andnot_ps(dummy_mask,r22);
2649
2650             /* EWALD ELECTROSTATICS */
2651             
2652             /* Analytical PME correction */
2653             zeta2            = _mm256_mul_ps(beta2,rsq22);
2654             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2655             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2656             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2657             felec            = _mm256_mul_ps(qq22,felec);
2658             
2659             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2660
2661             fscal            = felec;
2662
2663             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2664
2665             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2666
2667             /* Calculate temporary vectorial force */
2668             tx               = _mm256_mul_ps(fscal,dx22);
2669             ty               = _mm256_mul_ps(fscal,dy22);
2670             tz               = _mm256_mul_ps(fscal,dz22);
2671
2672             /* Update vectorial force */
2673             fix2             = _mm256_add_ps(fix2,tx);
2674             fiy2             = _mm256_add_ps(fiy2,ty);
2675             fiz2             = _mm256_add_ps(fiz2,tz);
2676
2677             fjx2             = _mm256_add_ps(fjx2,tx);
2678             fjy2             = _mm256_add_ps(fjy2,ty);
2679             fjz2             = _mm256_add_ps(fjz2,tz);
2680
2681             }
2682
2683             /**************************
2684              * CALCULATE INTERACTIONS *
2685              **************************/
2686
2687             if (gmx_mm256_any_lt(rsq23,rcutoff2))
2688             {
2689
2690             r23              = _mm256_mul_ps(rsq23,rinv23);
2691             r23              = _mm256_andnot_ps(dummy_mask,r23);
2692
2693             /* EWALD ELECTROSTATICS */
2694             
2695             /* Analytical PME correction */
2696             zeta2            = _mm256_mul_ps(beta2,rsq23);
2697             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
2698             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2699             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2700             felec            = _mm256_mul_ps(qq23,felec);
2701             
2702             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
2703
2704             fscal            = felec;
2705
2706             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2707
2708             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2709
2710             /* Calculate temporary vectorial force */
2711             tx               = _mm256_mul_ps(fscal,dx23);
2712             ty               = _mm256_mul_ps(fscal,dy23);
2713             tz               = _mm256_mul_ps(fscal,dz23);
2714
2715             /* Update vectorial force */
2716             fix2             = _mm256_add_ps(fix2,tx);
2717             fiy2             = _mm256_add_ps(fiy2,ty);
2718             fiz2             = _mm256_add_ps(fiz2,tz);
2719
2720             fjx3             = _mm256_add_ps(fjx3,tx);
2721             fjy3             = _mm256_add_ps(fjy3,ty);
2722             fjz3             = _mm256_add_ps(fjz3,tz);
2723
2724             }
2725
2726             /**************************
2727              * CALCULATE INTERACTIONS *
2728              **************************/
2729
2730             if (gmx_mm256_any_lt(rsq31,rcutoff2))
2731             {
2732
2733             r31              = _mm256_mul_ps(rsq31,rinv31);
2734             r31              = _mm256_andnot_ps(dummy_mask,r31);
2735
2736             /* EWALD ELECTROSTATICS */
2737             
2738             /* Analytical PME correction */
2739             zeta2            = _mm256_mul_ps(beta2,rsq31);
2740             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
2741             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2742             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2743             felec            = _mm256_mul_ps(qq31,felec);
2744             
2745             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
2746
2747             fscal            = felec;
2748
2749             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2750
2751             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2752
2753             /* Calculate temporary vectorial force */
2754             tx               = _mm256_mul_ps(fscal,dx31);
2755             ty               = _mm256_mul_ps(fscal,dy31);
2756             tz               = _mm256_mul_ps(fscal,dz31);
2757
2758             /* Update vectorial force */
2759             fix3             = _mm256_add_ps(fix3,tx);
2760             fiy3             = _mm256_add_ps(fiy3,ty);
2761             fiz3             = _mm256_add_ps(fiz3,tz);
2762
2763             fjx1             = _mm256_add_ps(fjx1,tx);
2764             fjy1             = _mm256_add_ps(fjy1,ty);
2765             fjz1             = _mm256_add_ps(fjz1,tz);
2766
2767             }
2768
2769             /**************************
2770              * CALCULATE INTERACTIONS *
2771              **************************/
2772
2773             if (gmx_mm256_any_lt(rsq32,rcutoff2))
2774             {
2775
2776             r32              = _mm256_mul_ps(rsq32,rinv32);
2777             r32              = _mm256_andnot_ps(dummy_mask,r32);
2778
2779             /* EWALD ELECTROSTATICS */
2780             
2781             /* Analytical PME correction */
2782             zeta2            = _mm256_mul_ps(beta2,rsq32);
2783             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
2784             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2785             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2786             felec            = _mm256_mul_ps(qq32,felec);
2787             
2788             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
2789
2790             fscal            = felec;
2791
2792             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2793
2794             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2795
2796             /* Calculate temporary vectorial force */
2797             tx               = _mm256_mul_ps(fscal,dx32);
2798             ty               = _mm256_mul_ps(fscal,dy32);
2799             tz               = _mm256_mul_ps(fscal,dz32);
2800
2801             /* Update vectorial force */
2802             fix3             = _mm256_add_ps(fix3,tx);
2803             fiy3             = _mm256_add_ps(fiy3,ty);
2804             fiz3             = _mm256_add_ps(fiz3,tz);
2805
2806             fjx2             = _mm256_add_ps(fjx2,tx);
2807             fjy2             = _mm256_add_ps(fjy2,ty);
2808             fjz2             = _mm256_add_ps(fjz2,tz);
2809
2810             }
2811
2812             /**************************
2813              * CALCULATE INTERACTIONS *
2814              **************************/
2815
2816             if (gmx_mm256_any_lt(rsq33,rcutoff2))
2817             {
2818
2819             r33              = _mm256_mul_ps(rsq33,rinv33);
2820             r33              = _mm256_andnot_ps(dummy_mask,r33);
2821
2822             /* EWALD ELECTROSTATICS */
2823             
2824             /* Analytical PME correction */
2825             zeta2            = _mm256_mul_ps(beta2,rsq33);
2826             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
2827             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2828             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2829             felec            = _mm256_mul_ps(qq33,felec);
2830             
2831             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
2832
2833             fscal            = felec;
2834
2835             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2836
2837             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2838
2839             /* Calculate temporary vectorial force */
2840             tx               = _mm256_mul_ps(fscal,dx33);
2841             ty               = _mm256_mul_ps(fscal,dy33);
2842             tz               = _mm256_mul_ps(fscal,dz33);
2843
2844             /* Update vectorial force */
2845             fix3             = _mm256_add_ps(fix3,tx);
2846             fiy3             = _mm256_add_ps(fiy3,ty);
2847             fiz3             = _mm256_add_ps(fiz3,tz);
2848
2849             fjx3             = _mm256_add_ps(fjx3,tx);
2850             fjy3             = _mm256_add_ps(fjy3,ty);
2851             fjz3             = _mm256_add_ps(fjz3,tz);
2852
2853             }
2854
2855             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2856             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2857             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2858             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2859             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2860             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2861             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2862             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2863
2864             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2865                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2866                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2867
2868             /* Inner loop uses 593 flops */
2869         }
2870
2871         /* End of innermost loop */
2872
2873         gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
2874                                                  f+i_coord_offset,fshift+i_shift_offset);
2875
2876         /* Increment number of inner iterations */
2877         inneriter                  += j_index_end - j_index_start;
2878
2879         /* Outer loop uses 24 flops */
2880     }
2881
2882     /* Increment number of outer iterations */
2883     outeriter        += nri;
2884
2885     /* Update outer/inner flops */
2886
2887     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*593);
2888 }