Use full path for legacyheaders
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEw_VdwLJEw_GeomW3W3_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "config.h"
39
40 #include <math.h>
41
42 #include "../nb_kernel.h"
43 #include "gromacs/legacyheaders/types/simple.h"
44 #include "gromacs/math/vec.h"
45 #include "gromacs/legacyheaders/nrnb.h"
46
47 #include "gromacs/simd/math_x86_avx_256_single.h"
48 #include "kernelutil_x86_avx_256_single.h"
49
50 /*
51  * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwLJEw_GeomW3W3_VF_avx_256_single
52  * Electrostatics interaction: Ewald
53  * VdW interaction:            LJEwald
54  * Geometry:                   Water3-Water3
55  * Calculate force/pot:        PotentialAndForce
56  */
57 void
58 nb_kernel_ElecEw_VdwLJEw_GeomW3W3_VF_avx_256_single
59                     (t_nblist                    * gmx_restrict       nlist,
60                      rvec                        * gmx_restrict          xx,
61                      rvec                        * gmx_restrict          ff,
62                      t_forcerec                  * gmx_restrict          fr,
63                      t_mdatoms                   * gmx_restrict     mdatoms,
64                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65                      t_nrnb                      * gmx_restrict        nrnb)
66 {
67     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
68      * just 0 for non-waters.
69      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
70      * jnr indices corresponding to data put in the four positions in the SIMD register.
71      */
72     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
73     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74     int              jnrA,jnrB,jnrC,jnrD;
75     int              jnrE,jnrF,jnrG,jnrH;
76     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
77     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
78     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
79     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
80     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
81     real             rcutoff_scalar;
82     real             *shiftvec,*fshift,*x,*f;
83     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
84     real             scratch[4*DIM];
85     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86     real *           vdwioffsetptr0;
87     real *           vdwgridioffsetptr0;
88     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
89     real *           vdwioffsetptr1;
90     real *           vdwgridioffsetptr1;
91     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
92     real *           vdwioffsetptr2;
93     real *           vdwgridioffsetptr2;
94     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
95     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
96     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
97     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
98     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
99     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
100     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
101     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
102     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
103     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
104     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
105     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
106     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
107     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
108     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
109     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
110     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
111     real             *charge;
112     int              nvdwtype;
113     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
114     int              *vdwtype;
115     real             *vdwparam;
116     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
117     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
118     __m256           c6grid_00;
119     __m256           c6grid_01;
120     __m256           c6grid_02;
121     __m256           c6grid_10;
122     __m256           c6grid_11;
123     __m256           c6grid_12;
124     __m256           c6grid_20;
125     __m256           c6grid_21;
126     __m256           c6grid_22;
127     real             *vdwgridparam;
128     __m256           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
129     __m256           one_half  = _mm256_set1_ps(0.5);
130     __m256           minus_one = _mm256_set1_ps(-1.0);
131     __m256i          ewitab;
132     __m128i          ewitab_lo,ewitab_hi;
133     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
134     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
135     real             *ewtab;
136     __m256           dummy_mask,cutoff_mask;
137     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
138     __m256           one     = _mm256_set1_ps(1.0);
139     __m256           two     = _mm256_set1_ps(2.0);
140     x                = xx[0];
141     f                = ff[0];
142
143     nri              = nlist->nri;
144     iinr             = nlist->iinr;
145     jindex           = nlist->jindex;
146     jjnr             = nlist->jjnr;
147     shiftidx         = nlist->shift;
148     gid              = nlist->gid;
149     shiftvec         = fr->shift_vec[0];
150     fshift           = fr->fshift[0];
151     facel            = _mm256_set1_ps(fr->epsfac);
152     charge           = mdatoms->chargeA;
153     nvdwtype         = fr->ntype;
154     vdwparam         = fr->nbfp;
155     vdwtype          = mdatoms->typeA;
156     vdwgridparam     = fr->ljpme_c6grid;
157     sh_lj_ewald      = _mm256_set1_ps(fr->ic->sh_lj_ewald);
158     ewclj            = _mm256_set1_ps(fr->ewaldcoeff_lj);
159     ewclj2           = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
160
161     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
162     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
163     beta2            = _mm256_mul_ps(beta,beta);
164     beta3            = _mm256_mul_ps(beta,beta2);
165
166     ewtab            = fr->ic->tabq_coul_FDV0;
167     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
168     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
169
170     /* Setup water-specific parameters */
171     inr              = nlist->iinr[0];
172     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
173     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
174     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
175     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
176     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
177
178     jq0              = _mm256_set1_ps(charge[inr+0]);
179     jq1              = _mm256_set1_ps(charge[inr+1]);
180     jq2              = _mm256_set1_ps(charge[inr+2]);
181     vdwjidx0A        = 2*vdwtype[inr+0];
182     qq00             = _mm256_mul_ps(iq0,jq0);
183     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
184     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
185     c6grid_00        = _mm256_set1_ps(vdwgridioffsetptr0[vdwjidx0A]);
186     qq01             = _mm256_mul_ps(iq0,jq1);
187     qq02             = _mm256_mul_ps(iq0,jq2);
188     qq10             = _mm256_mul_ps(iq1,jq0);
189     qq11             = _mm256_mul_ps(iq1,jq1);
190     qq12             = _mm256_mul_ps(iq1,jq2);
191     qq20             = _mm256_mul_ps(iq2,jq0);
192     qq21             = _mm256_mul_ps(iq2,jq1);
193     qq22             = _mm256_mul_ps(iq2,jq2);
194
195     /* Avoid stupid compiler warnings */
196     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
197     j_coord_offsetA = 0;
198     j_coord_offsetB = 0;
199     j_coord_offsetC = 0;
200     j_coord_offsetD = 0;
201     j_coord_offsetE = 0;
202     j_coord_offsetF = 0;
203     j_coord_offsetG = 0;
204     j_coord_offsetH = 0;
205
206     outeriter        = 0;
207     inneriter        = 0;
208
209     for(iidx=0;iidx<4*DIM;iidx++)
210     {
211         scratch[iidx] = 0.0;
212     }
213
214     /* Start outer loop over neighborlists */
215     for(iidx=0; iidx<nri; iidx++)
216     {
217         /* Load shift vector for this list */
218         i_shift_offset   = DIM*shiftidx[iidx];
219
220         /* Load limits for loop over neighbors */
221         j_index_start    = jindex[iidx];
222         j_index_end      = jindex[iidx+1];
223
224         /* Get outer coordinate index */
225         inr              = iinr[iidx];
226         i_coord_offset   = DIM*inr;
227
228         /* Load i particle coords and add shift vector */
229         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
230                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
231
232         fix0             = _mm256_setzero_ps();
233         fiy0             = _mm256_setzero_ps();
234         fiz0             = _mm256_setzero_ps();
235         fix1             = _mm256_setzero_ps();
236         fiy1             = _mm256_setzero_ps();
237         fiz1             = _mm256_setzero_ps();
238         fix2             = _mm256_setzero_ps();
239         fiy2             = _mm256_setzero_ps();
240         fiz2             = _mm256_setzero_ps();
241
242         /* Reset potential sums */
243         velecsum         = _mm256_setzero_ps();
244         vvdwsum          = _mm256_setzero_ps();
245
246         /* Start inner kernel loop */
247         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
248         {
249
250             /* Get j neighbor index, and coordinate index */
251             jnrA             = jjnr[jidx];
252             jnrB             = jjnr[jidx+1];
253             jnrC             = jjnr[jidx+2];
254             jnrD             = jjnr[jidx+3];
255             jnrE             = jjnr[jidx+4];
256             jnrF             = jjnr[jidx+5];
257             jnrG             = jjnr[jidx+6];
258             jnrH             = jjnr[jidx+7];
259             j_coord_offsetA  = DIM*jnrA;
260             j_coord_offsetB  = DIM*jnrB;
261             j_coord_offsetC  = DIM*jnrC;
262             j_coord_offsetD  = DIM*jnrD;
263             j_coord_offsetE  = DIM*jnrE;
264             j_coord_offsetF  = DIM*jnrF;
265             j_coord_offsetG  = DIM*jnrG;
266             j_coord_offsetH  = DIM*jnrH;
267
268             /* load j atom coordinates */
269             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
270                                                  x+j_coord_offsetC,x+j_coord_offsetD,
271                                                  x+j_coord_offsetE,x+j_coord_offsetF,
272                                                  x+j_coord_offsetG,x+j_coord_offsetH,
273                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
274
275             /* Calculate displacement vector */
276             dx00             = _mm256_sub_ps(ix0,jx0);
277             dy00             = _mm256_sub_ps(iy0,jy0);
278             dz00             = _mm256_sub_ps(iz0,jz0);
279             dx01             = _mm256_sub_ps(ix0,jx1);
280             dy01             = _mm256_sub_ps(iy0,jy1);
281             dz01             = _mm256_sub_ps(iz0,jz1);
282             dx02             = _mm256_sub_ps(ix0,jx2);
283             dy02             = _mm256_sub_ps(iy0,jy2);
284             dz02             = _mm256_sub_ps(iz0,jz2);
285             dx10             = _mm256_sub_ps(ix1,jx0);
286             dy10             = _mm256_sub_ps(iy1,jy0);
287             dz10             = _mm256_sub_ps(iz1,jz0);
288             dx11             = _mm256_sub_ps(ix1,jx1);
289             dy11             = _mm256_sub_ps(iy1,jy1);
290             dz11             = _mm256_sub_ps(iz1,jz1);
291             dx12             = _mm256_sub_ps(ix1,jx2);
292             dy12             = _mm256_sub_ps(iy1,jy2);
293             dz12             = _mm256_sub_ps(iz1,jz2);
294             dx20             = _mm256_sub_ps(ix2,jx0);
295             dy20             = _mm256_sub_ps(iy2,jy0);
296             dz20             = _mm256_sub_ps(iz2,jz0);
297             dx21             = _mm256_sub_ps(ix2,jx1);
298             dy21             = _mm256_sub_ps(iy2,jy1);
299             dz21             = _mm256_sub_ps(iz2,jz1);
300             dx22             = _mm256_sub_ps(ix2,jx2);
301             dy22             = _mm256_sub_ps(iy2,jy2);
302             dz22             = _mm256_sub_ps(iz2,jz2);
303
304             /* Calculate squared distance and things based on it */
305             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
306             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
307             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
308             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
309             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
310             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
311             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
312             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
313             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
314
315             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
316             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
317             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
318             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
319             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
320             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
321             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
322             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
323             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
324
325             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
326             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
327             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
328             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
329             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
330             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
331             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
332             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
333             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
334
335             fjx0             = _mm256_setzero_ps();
336             fjy0             = _mm256_setzero_ps();
337             fjz0             = _mm256_setzero_ps();
338             fjx1             = _mm256_setzero_ps();
339             fjy1             = _mm256_setzero_ps();
340             fjz1             = _mm256_setzero_ps();
341             fjx2             = _mm256_setzero_ps();
342             fjy2             = _mm256_setzero_ps();
343             fjz2             = _mm256_setzero_ps();
344
345             /**************************
346              * CALCULATE INTERACTIONS *
347              **************************/
348
349             r00              = _mm256_mul_ps(rsq00,rinv00);
350
351             /* EWALD ELECTROSTATICS */
352             
353             /* Analytical PME correction */
354             zeta2            = _mm256_mul_ps(beta2,rsq00);
355             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
356             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
357             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
358             felec            = _mm256_mul_ps(qq00,felec);
359             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
360             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
361             velec            = _mm256_sub_ps(rinv00,pmecorrV);
362             velec            = _mm256_mul_ps(qq00,velec);
363             
364             /* Analytical LJ-PME */
365             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
366             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
367             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
368             exponent         = gmx_simd_exp_r(ewcljrsq);
369             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
370             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
371             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
372             vvdw6            = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
373             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
374             vvdw             = _mm256_sub_ps(_mm256_mul_ps(vvdw12,one_twelfth),_mm256_mul_ps(vvdw6,one_sixth));
375             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
376             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
377
378             /* Update potential sum for this i atom from the interaction with this j atom. */
379             velecsum         = _mm256_add_ps(velecsum,velec);
380             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
381
382             fscal            = _mm256_add_ps(felec,fvdw);
383
384             /* Calculate temporary vectorial force */
385             tx               = _mm256_mul_ps(fscal,dx00);
386             ty               = _mm256_mul_ps(fscal,dy00);
387             tz               = _mm256_mul_ps(fscal,dz00);
388
389             /* Update vectorial force */
390             fix0             = _mm256_add_ps(fix0,tx);
391             fiy0             = _mm256_add_ps(fiy0,ty);
392             fiz0             = _mm256_add_ps(fiz0,tz);
393
394             fjx0             = _mm256_add_ps(fjx0,tx);
395             fjy0             = _mm256_add_ps(fjy0,ty);
396             fjz0             = _mm256_add_ps(fjz0,tz);
397
398             /**************************
399              * CALCULATE INTERACTIONS *
400              **************************/
401
402             r01              = _mm256_mul_ps(rsq01,rinv01);
403
404             /* EWALD ELECTROSTATICS */
405             
406             /* Analytical PME correction */
407             zeta2            = _mm256_mul_ps(beta2,rsq01);
408             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
409             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
410             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
411             felec            = _mm256_mul_ps(qq01,felec);
412             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
413             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
414             velec            = _mm256_sub_ps(rinv01,pmecorrV);
415             velec            = _mm256_mul_ps(qq01,velec);
416             
417             /* Update potential sum for this i atom from the interaction with this j atom. */
418             velecsum         = _mm256_add_ps(velecsum,velec);
419
420             fscal            = felec;
421
422             /* Calculate temporary vectorial force */
423             tx               = _mm256_mul_ps(fscal,dx01);
424             ty               = _mm256_mul_ps(fscal,dy01);
425             tz               = _mm256_mul_ps(fscal,dz01);
426
427             /* Update vectorial force */
428             fix0             = _mm256_add_ps(fix0,tx);
429             fiy0             = _mm256_add_ps(fiy0,ty);
430             fiz0             = _mm256_add_ps(fiz0,tz);
431
432             fjx1             = _mm256_add_ps(fjx1,tx);
433             fjy1             = _mm256_add_ps(fjy1,ty);
434             fjz1             = _mm256_add_ps(fjz1,tz);
435
436             /**************************
437              * CALCULATE INTERACTIONS *
438              **************************/
439
440             r02              = _mm256_mul_ps(rsq02,rinv02);
441
442             /* EWALD ELECTROSTATICS */
443             
444             /* Analytical PME correction */
445             zeta2            = _mm256_mul_ps(beta2,rsq02);
446             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
447             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
448             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
449             felec            = _mm256_mul_ps(qq02,felec);
450             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
451             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
452             velec            = _mm256_sub_ps(rinv02,pmecorrV);
453             velec            = _mm256_mul_ps(qq02,velec);
454             
455             /* Update potential sum for this i atom from the interaction with this j atom. */
456             velecsum         = _mm256_add_ps(velecsum,velec);
457
458             fscal            = felec;
459
460             /* Calculate temporary vectorial force */
461             tx               = _mm256_mul_ps(fscal,dx02);
462             ty               = _mm256_mul_ps(fscal,dy02);
463             tz               = _mm256_mul_ps(fscal,dz02);
464
465             /* Update vectorial force */
466             fix0             = _mm256_add_ps(fix0,tx);
467             fiy0             = _mm256_add_ps(fiy0,ty);
468             fiz0             = _mm256_add_ps(fiz0,tz);
469
470             fjx2             = _mm256_add_ps(fjx2,tx);
471             fjy2             = _mm256_add_ps(fjy2,ty);
472             fjz2             = _mm256_add_ps(fjz2,tz);
473
474             /**************************
475              * CALCULATE INTERACTIONS *
476              **************************/
477
478             r10              = _mm256_mul_ps(rsq10,rinv10);
479
480             /* EWALD ELECTROSTATICS */
481             
482             /* Analytical PME correction */
483             zeta2            = _mm256_mul_ps(beta2,rsq10);
484             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
485             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
486             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
487             felec            = _mm256_mul_ps(qq10,felec);
488             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
489             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
490             velec            = _mm256_sub_ps(rinv10,pmecorrV);
491             velec            = _mm256_mul_ps(qq10,velec);
492             
493             /* Update potential sum for this i atom from the interaction with this j atom. */
494             velecsum         = _mm256_add_ps(velecsum,velec);
495
496             fscal            = felec;
497
498             /* Calculate temporary vectorial force */
499             tx               = _mm256_mul_ps(fscal,dx10);
500             ty               = _mm256_mul_ps(fscal,dy10);
501             tz               = _mm256_mul_ps(fscal,dz10);
502
503             /* Update vectorial force */
504             fix1             = _mm256_add_ps(fix1,tx);
505             fiy1             = _mm256_add_ps(fiy1,ty);
506             fiz1             = _mm256_add_ps(fiz1,tz);
507
508             fjx0             = _mm256_add_ps(fjx0,tx);
509             fjy0             = _mm256_add_ps(fjy0,ty);
510             fjz0             = _mm256_add_ps(fjz0,tz);
511
512             /**************************
513              * CALCULATE INTERACTIONS *
514              **************************/
515
516             r11              = _mm256_mul_ps(rsq11,rinv11);
517
518             /* EWALD ELECTROSTATICS */
519             
520             /* Analytical PME correction */
521             zeta2            = _mm256_mul_ps(beta2,rsq11);
522             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
523             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
524             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
525             felec            = _mm256_mul_ps(qq11,felec);
526             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
527             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
528             velec            = _mm256_sub_ps(rinv11,pmecorrV);
529             velec            = _mm256_mul_ps(qq11,velec);
530             
531             /* Update potential sum for this i atom from the interaction with this j atom. */
532             velecsum         = _mm256_add_ps(velecsum,velec);
533
534             fscal            = felec;
535
536             /* Calculate temporary vectorial force */
537             tx               = _mm256_mul_ps(fscal,dx11);
538             ty               = _mm256_mul_ps(fscal,dy11);
539             tz               = _mm256_mul_ps(fscal,dz11);
540
541             /* Update vectorial force */
542             fix1             = _mm256_add_ps(fix1,tx);
543             fiy1             = _mm256_add_ps(fiy1,ty);
544             fiz1             = _mm256_add_ps(fiz1,tz);
545
546             fjx1             = _mm256_add_ps(fjx1,tx);
547             fjy1             = _mm256_add_ps(fjy1,ty);
548             fjz1             = _mm256_add_ps(fjz1,tz);
549
550             /**************************
551              * CALCULATE INTERACTIONS *
552              **************************/
553
554             r12              = _mm256_mul_ps(rsq12,rinv12);
555
556             /* EWALD ELECTROSTATICS */
557             
558             /* Analytical PME correction */
559             zeta2            = _mm256_mul_ps(beta2,rsq12);
560             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
561             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
562             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
563             felec            = _mm256_mul_ps(qq12,felec);
564             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
565             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
566             velec            = _mm256_sub_ps(rinv12,pmecorrV);
567             velec            = _mm256_mul_ps(qq12,velec);
568             
569             /* Update potential sum for this i atom from the interaction with this j atom. */
570             velecsum         = _mm256_add_ps(velecsum,velec);
571
572             fscal            = felec;
573
574             /* Calculate temporary vectorial force */
575             tx               = _mm256_mul_ps(fscal,dx12);
576             ty               = _mm256_mul_ps(fscal,dy12);
577             tz               = _mm256_mul_ps(fscal,dz12);
578
579             /* Update vectorial force */
580             fix1             = _mm256_add_ps(fix1,tx);
581             fiy1             = _mm256_add_ps(fiy1,ty);
582             fiz1             = _mm256_add_ps(fiz1,tz);
583
584             fjx2             = _mm256_add_ps(fjx2,tx);
585             fjy2             = _mm256_add_ps(fjy2,ty);
586             fjz2             = _mm256_add_ps(fjz2,tz);
587
588             /**************************
589              * CALCULATE INTERACTIONS *
590              **************************/
591
592             r20              = _mm256_mul_ps(rsq20,rinv20);
593
594             /* EWALD ELECTROSTATICS */
595             
596             /* Analytical PME correction */
597             zeta2            = _mm256_mul_ps(beta2,rsq20);
598             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
599             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
600             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
601             felec            = _mm256_mul_ps(qq20,felec);
602             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
603             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
604             velec            = _mm256_sub_ps(rinv20,pmecorrV);
605             velec            = _mm256_mul_ps(qq20,velec);
606             
607             /* Update potential sum for this i atom from the interaction with this j atom. */
608             velecsum         = _mm256_add_ps(velecsum,velec);
609
610             fscal            = felec;
611
612             /* Calculate temporary vectorial force */
613             tx               = _mm256_mul_ps(fscal,dx20);
614             ty               = _mm256_mul_ps(fscal,dy20);
615             tz               = _mm256_mul_ps(fscal,dz20);
616
617             /* Update vectorial force */
618             fix2             = _mm256_add_ps(fix2,tx);
619             fiy2             = _mm256_add_ps(fiy2,ty);
620             fiz2             = _mm256_add_ps(fiz2,tz);
621
622             fjx0             = _mm256_add_ps(fjx0,tx);
623             fjy0             = _mm256_add_ps(fjy0,ty);
624             fjz0             = _mm256_add_ps(fjz0,tz);
625
626             /**************************
627              * CALCULATE INTERACTIONS *
628              **************************/
629
630             r21              = _mm256_mul_ps(rsq21,rinv21);
631
632             /* EWALD ELECTROSTATICS */
633             
634             /* Analytical PME correction */
635             zeta2            = _mm256_mul_ps(beta2,rsq21);
636             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
637             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
638             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
639             felec            = _mm256_mul_ps(qq21,felec);
640             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
641             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
642             velec            = _mm256_sub_ps(rinv21,pmecorrV);
643             velec            = _mm256_mul_ps(qq21,velec);
644             
645             /* Update potential sum for this i atom from the interaction with this j atom. */
646             velecsum         = _mm256_add_ps(velecsum,velec);
647
648             fscal            = felec;
649
650             /* Calculate temporary vectorial force */
651             tx               = _mm256_mul_ps(fscal,dx21);
652             ty               = _mm256_mul_ps(fscal,dy21);
653             tz               = _mm256_mul_ps(fscal,dz21);
654
655             /* Update vectorial force */
656             fix2             = _mm256_add_ps(fix2,tx);
657             fiy2             = _mm256_add_ps(fiy2,ty);
658             fiz2             = _mm256_add_ps(fiz2,tz);
659
660             fjx1             = _mm256_add_ps(fjx1,tx);
661             fjy1             = _mm256_add_ps(fjy1,ty);
662             fjz1             = _mm256_add_ps(fjz1,tz);
663
664             /**************************
665              * CALCULATE INTERACTIONS *
666              **************************/
667
668             r22              = _mm256_mul_ps(rsq22,rinv22);
669
670             /* EWALD ELECTROSTATICS */
671             
672             /* Analytical PME correction */
673             zeta2            = _mm256_mul_ps(beta2,rsq22);
674             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
675             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
676             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
677             felec            = _mm256_mul_ps(qq22,felec);
678             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
679             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
680             velec            = _mm256_sub_ps(rinv22,pmecorrV);
681             velec            = _mm256_mul_ps(qq22,velec);
682             
683             /* Update potential sum for this i atom from the interaction with this j atom. */
684             velecsum         = _mm256_add_ps(velecsum,velec);
685
686             fscal            = felec;
687
688             /* Calculate temporary vectorial force */
689             tx               = _mm256_mul_ps(fscal,dx22);
690             ty               = _mm256_mul_ps(fscal,dy22);
691             tz               = _mm256_mul_ps(fscal,dz22);
692
693             /* Update vectorial force */
694             fix2             = _mm256_add_ps(fix2,tx);
695             fiy2             = _mm256_add_ps(fiy2,ty);
696             fiz2             = _mm256_add_ps(fiz2,tz);
697
698             fjx2             = _mm256_add_ps(fjx2,tx);
699             fjy2             = _mm256_add_ps(fjy2,ty);
700             fjz2             = _mm256_add_ps(fjz2,tz);
701
702             fjptrA             = f+j_coord_offsetA;
703             fjptrB             = f+j_coord_offsetB;
704             fjptrC             = f+j_coord_offsetC;
705             fjptrD             = f+j_coord_offsetD;
706             fjptrE             = f+j_coord_offsetE;
707             fjptrF             = f+j_coord_offsetF;
708             fjptrG             = f+j_coord_offsetG;
709             fjptrH             = f+j_coord_offsetH;
710
711             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
712                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
713
714             /* Inner loop uses 784 flops */
715         }
716
717         if(jidx<j_index_end)
718         {
719
720             /* Get j neighbor index, and coordinate index */
721             jnrlistA         = jjnr[jidx];
722             jnrlistB         = jjnr[jidx+1];
723             jnrlistC         = jjnr[jidx+2];
724             jnrlistD         = jjnr[jidx+3];
725             jnrlistE         = jjnr[jidx+4];
726             jnrlistF         = jjnr[jidx+5];
727             jnrlistG         = jjnr[jidx+6];
728             jnrlistH         = jjnr[jidx+7];
729             /* Sign of each element will be negative for non-real atoms.
730              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
731              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
732              */
733             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
734                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
735                                             
736             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
737             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
738             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
739             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
740             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
741             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
742             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
743             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
744             j_coord_offsetA  = DIM*jnrA;
745             j_coord_offsetB  = DIM*jnrB;
746             j_coord_offsetC  = DIM*jnrC;
747             j_coord_offsetD  = DIM*jnrD;
748             j_coord_offsetE  = DIM*jnrE;
749             j_coord_offsetF  = DIM*jnrF;
750             j_coord_offsetG  = DIM*jnrG;
751             j_coord_offsetH  = DIM*jnrH;
752
753             /* load j atom coordinates */
754             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
755                                                  x+j_coord_offsetC,x+j_coord_offsetD,
756                                                  x+j_coord_offsetE,x+j_coord_offsetF,
757                                                  x+j_coord_offsetG,x+j_coord_offsetH,
758                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
759
760             /* Calculate displacement vector */
761             dx00             = _mm256_sub_ps(ix0,jx0);
762             dy00             = _mm256_sub_ps(iy0,jy0);
763             dz00             = _mm256_sub_ps(iz0,jz0);
764             dx01             = _mm256_sub_ps(ix0,jx1);
765             dy01             = _mm256_sub_ps(iy0,jy1);
766             dz01             = _mm256_sub_ps(iz0,jz1);
767             dx02             = _mm256_sub_ps(ix0,jx2);
768             dy02             = _mm256_sub_ps(iy0,jy2);
769             dz02             = _mm256_sub_ps(iz0,jz2);
770             dx10             = _mm256_sub_ps(ix1,jx0);
771             dy10             = _mm256_sub_ps(iy1,jy0);
772             dz10             = _mm256_sub_ps(iz1,jz0);
773             dx11             = _mm256_sub_ps(ix1,jx1);
774             dy11             = _mm256_sub_ps(iy1,jy1);
775             dz11             = _mm256_sub_ps(iz1,jz1);
776             dx12             = _mm256_sub_ps(ix1,jx2);
777             dy12             = _mm256_sub_ps(iy1,jy2);
778             dz12             = _mm256_sub_ps(iz1,jz2);
779             dx20             = _mm256_sub_ps(ix2,jx0);
780             dy20             = _mm256_sub_ps(iy2,jy0);
781             dz20             = _mm256_sub_ps(iz2,jz0);
782             dx21             = _mm256_sub_ps(ix2,jx1);
783             dy21             = _mm256_sub_ps(iy2,jy1);
784             dz21             = _mm256_sub_ps(iz2,jz1);
785             dx22             = _mm256_sub_ps(ix2,jx2);
786             dy22             = _mm256_sub_ps(iy2,jy2);
787             dz22             = _mm256_sub_ps(iz2,jz2);
788
789             /* Calculate squared distance and things based on it */
790             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
791             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
792             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
793             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
794             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
795             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
796             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
797             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
798             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
799
800             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
801             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
802             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
803             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
804             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
805             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
806             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
807             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
808             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
809
810             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
811             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
812             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
813             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
814             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
815             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
816             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
817             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
818             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
819
820             fjx0             = _mm256_setzero_ps();
821             fjy0             = _mm256_setzero_ps();
822             fjz0             = _mm256_setzero_ps();
823             fjx1             = _mm256_setzero_ps();
824             fjy1             = _mm256_setzero_ps();
825             fjz1             = _mm256_setzero_ps();
826             fjx2             = _mm256_setzero_ps();
827             fjy2             = _mm256_setzero_ps();
828             fjz2             = _mm256_setzero_ps();
829
830             /**************************
831              * CALCULATE INTERACTIONS *
832              **************************/
833
834             r00              = _mm256_mul_ps(rsq00,rinv00);
835             r00              = _mm256_andnot_ps(dummy_mask,r00);
836
837             /* EWALD ELECTROSTATICS */
838             
839             /* Analytical PME correction */
840             zeta2            = _mm256_mul_ps(beta2,rsq00);
841             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
842             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
843             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
844             felec            = _mm256_mul_ps(qq00,felec);
845             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
846             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
847             velec            = _mm256_sub_ps(rinv00,pmecorrV);
848             velec            = _mm256_mul_ps(qq00,velec);
849             
850             /* Analytical LJ-PME */
851             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
852             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
853             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
854             exponent         = gmx_simd_exp_r(ewcljrsq);
855             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
856             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
857             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
858             vvdw6            = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
859             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
860             vvdw             = _mm256_sub_ps(_mm256_mul_ps(vvdw12,one_twelfth),_mm256_mul_ps(vvdw6,one_sixth));
861             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
862             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
863
864             /* Update potential sum for this i atom from the interaction with this j atom. */
865             velec            = _mm256_andnot_ps(dummy_mask,velec);
866             velecsum         = _mm256_add_ps(velecsum,velec);
867             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
868             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
869
870             fscal            = _mm256_add_ps(felec,fvdw);
871
872             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
873
874             /* Calculate temporary vectorial force */
875             tx               = _mm256_mul_ps(fscal,dx00);
876             ty               = _mm256_mul_ps(fscal,dy00);
877             tz               = _mm256_mul_ps(fscal,dz00);
878
879             /* Update vectorial force */
880             fix0             = _mm256_add_ps(fix0,tx);
881             fiy0             = _mm256_add_ps(fiy0,ty);
882             fiz0             = _mm256_add_ps(fiz0,tz);
883
884             fjx0             = _mm256_add_ps(fjx0,tx);
885             fjy0             = _mm256_add_ps(fjy0,ty);
886             fjz0             = _mm256_add_ps(fjz0,tz);
887
888             /**************************
889              * CALCULATE INTERACTIONS *
890              **************************/
891
892             r01              = _mm256_mul_ps(rsq01,rinv01);
893             r01              = _mm256_andnot_ps(dummy_mask,r01);
894
895             /* EWALD ELECTROSTATICS */
896             
897             /* Analytical PME correction */
898             zeta2            = _mm256_mul_ps(beta2,rsq01);
899             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
900             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
901             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
902             felec            = _mm256_mul_ps(qq01,felec);
903             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
904             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
905             velec            = _mm256_sub_ps(rinv01,pmecorrV);
906             velec            = _mm256_mul_ps(qq01,velec);
907             
908             /* Update potential sum for this i atom from the interaction with this j atom. */
909             velec            = _mm256_andnot_ps(dummy_mask,velec);
910             velecsum         = _mm256_add_ps(velecsum,velec);
911
912             fscal            = felec;
913
914             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
915
916             /* Calculate temporary vectorial force */
917             tx               = _mm256_mul_ps(fscal,dx01);
918             ty               = _mm256_mul_ps(fscal,dy01);
919             tz               = _mm256_mul_ps(fscal,dz01);
920
921             /* Update vectorial force */
922             fix0             = _mm256_add_ps(fix0,tx);
923             fiy0             = _mm256_add_ps(fiy0,ty);
924             fiz0             = _mm256_add_ps(fiz0,tz);
925
926             fjx1             = _mm256_add_ps(fjx1,tx);
927             fjy1             = _mm256_add_ps(fjy1,ty);
928             fjz1             = _mm256_add_ps(fjz1,tz);
929
930             /**************************
931              * CALCULATE INTERACTIONS *
932              **************************/
933
934             r02              = _mm256_mul_ps(rsq02,rinv02);
935             r02              = _mm256_andnot_ps(dummy_mask,r02);
936
937             /* EWALD ELECTROSTATICS */
938             
939             /* Analytical PME correction */
940             zeta2            = _mm256_mul_ps(beta2,rsq02);
941             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
942             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
943             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
944             felec            = _mm256_mul_ps(qq02,felec);
945             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
946             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
947             velec            = _mm256_sub_ps(rinv02,pmecorrV);
948             velec            = _mm256_mul_ps(qq02,velec);
949             
950             /* Update potential sum for this i atom from the interaction with this j atom. */
951             velec            = _mm256_andnot_ps(dummy_mask,velec);
952             velecsum         = _mm256_add_ps(velecsum,velec);
953
954             fscal            = felec;
955
956             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
957
958             /* Calculate temporary vectorial force */
959             tx               = _mm256_mul_ps(fscal,dx02);
960             ty               = _mm256_mul_ps(fscal,dy02);
961             tz               = _mm256_mul_ps(fscal,dz02);
962
963             /* Update vectorial force */
964             fix0             = _mm256_add_ps(fix0,tx);
965             fiy0             = _mm256_add_ps(fiy0,ty);
966             fiz0             = _mm256_add_ps(fiz0,tz);
967
968             fjx2             = _mm256_add_ps(fjx2,tx);
969             fjy2             = _mm256_add_ps(fjy2,ty);
970             fjz2             = _mm256_add_ps(fjz2,tz);
971
972             /**************************
973              * CALCULATE INTERACTIONS *
974              **************************/
975
976             r10              = _mm256_mul_ps(rsq10,rinv10);
977             r10              = _mm256_andnot_ps(dummy_mask,r10);
978
979             /* EWALD ELECTROSTATICS */
980             
981             /* Analytical PME correction */
982             zeta2            = _mm256_mul_ps(beta2,rsq10);
983             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
984             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
985             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
986             felec            = _mm256_mul_ps(qq10,felec);
987             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
988             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
989             velec            = _mm256_sub_ps(rinv10,pmecorrV);
990             velec            = _mm256_mul_ps(qq10,velec);
991             
992             /* Update potential sum for this i atom from the interaction with this j atom. */
993             velec            = _mm256_andnot_ps(dummy_mask,velec);
994             velecsum         = _mm256_add_ps(velecsum,velec);
995
996             fscal            = felec;
997
998             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
999
1000             /* Calculate temporary vectorial force */
1001             tx               = _mm256_mul_ps(fscal,dx10);
1002             ty               = _mm256_mul_ps(fscal,dy10);
1003             tz               = _mm256_mul_ps(fscal,dz10);
1004
1005             /* Update vectorial force */
1006             fix1             = _mm256_add_ps(fix1,tx);
1007             fiy1             = _mm256_add_ps(fiy1,ty);
1008             fiz1             = _mm256_add_ps(fiz1,tz);
1009
1010             fjx0             = _mm256_add_ps(fjx0,tx);
1011             fjy0             = _mm256_add_ps(fjy0,ty);
1012             fjz0             = _mm256_add_ps(fjz0,tz);
1013
1014             /**************************
1015              * CALCULATE INTERACTIONS *
1016              **************************/
1017
1018             r11              = _mm256_mul_ps(rsq11,rinv11);
1019             r11              = _mm256_andnot_ps(dummy_mask,r11);
1020
1021             /* EWALD ELECTROSTATICS */
1022             
1023             /* Analytical PME correction */
1024             zeta2            = _mm256_mul_ps(beta2,rsq11);
1025             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1026             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1027             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1028             felec            = _mm256_mul_ps(qq11,felec);
1029             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1030             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1031             velec            = _mm256_sub_ps(rinv11,pmecorrV);
1032             velec            = _mm256_mul_ps(qq11,velec);
1033             
1034             /* Update potential sum for this i atom from the interaction with this j atom. */
1035             velec            = _mm256_andnot_ps(dummy_mask,velec);
1036             velecsum         = _mm256_add_ps(velecsum,velec);
1037
1038             fscal            = felec;
1039
1040             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1041
1042             /* Calculate temporary vectorial force */
1043             tx               = _mm256_mul_ps(fscal,dx11);
1044             ty               = _mm256_mul_ps(fscal,dy11);
1045             tz               = _mm256_mul_ps(fscal,dz11);
1046
1047             /* Update vectorial force */
1048             fix1             = _mm256_add_ps(fix1,tx);
1049             fiy1             = _mm256_add_ps(fiy1,ty);
1050             fiz1             = _mm256_add_ps(fiz1,tz);
1051
1052             fjx1             = _mm256_add_ps(fjx1,tx);
1053             fjy1             = _mm256_add_ps(fjy1,ty);
1054             fjz1             = _mm256_add_ps(fjz1,tz);
1055
1056             /**************************
1057              * CALCULATE INTERACTIONS *
1058              **************************/
1059
1060             r12              = _mm256_mul_ps(rsq12,rinv12);
1061             r12              = _mm256_andnot_ps(dummy_mask,r12);
1062
1063             /* EWALD ELECTROSTATICS */
1064             
1065             /* Analytical PME correction */
1066             zeta2            = _mm256_mul_ps(beta2,rsq12);
1067             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1068             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1069             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1070             felec            = _mm256_mul_ps(qq12,felec);
1071             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1072             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1073             velec            = _mm256_sub_ps(rinv12,pmecorrV);
1074             velec            = _mm256_mul_ps(qq12,velec);
1075             
1076             /* Update potential sum for this i atom from the interaction with this j atom. */
1077             velec            = _mm256_andnot_ps(dummy_mask,velec);
1078             velecsum         = _mm256_add_ps(velecsum,velec);
1079
1080             fscal            = felec;
1081
1082             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1083
1084             /* Calculate temporary vectorial force */
1085             tx               = _mm256_mul_ps(fscal,dx12);
1086             ty               = _mm256_mul_ps(fscal,dy12);
1087             tz               = _mm256_mul_ps(fscal,dz12);
1088
1089             /* Update vectorial force */
1090             fix1             = _mm256_add_ps(fix1,tx);
1091             fiy1             = _mm256_add_ps(fiy1,ty);
1092             fiz1             = _mm256_add_ps(fiz1,tz);
1093
1094             fjx2             = _mm256_add_ps(fjx2,tx);
1095             fjy2             = _mm256_add_ps(fjy2,ty);
1096             fjz2             = _mm256_add_ps(fjz2,tz);
1097
1098             /**************************
1099              * CALCULATE INTERACTIONS *
1100              **************************/
1101
1102             r20              = _mm256_mul_ps(rsq20,rinv20);
1103             r20              = _mm256_andnot_ps(dummy_mask,r20);
1104
1105             /* EWALD ELECTROSTATICS */
1106             
1107             /* Analytical PME correction */
1108             zeta2            = _mm256_mul_ps(beta2,rsq20);
1109             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1110             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1111             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1112             felec            = _mm256_mul_ps(qq20,felec);
1113             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1114             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1115             velec            = _mm256_sub_ps(rinv20,pmecorrV);
1116             velec            = _mm256_mul_ps(qq20,velec);
1117             
1118             /* Update potential sum for this i atom from the interaction with this j atom. */
1119             velec            = _mm256_andnot_ps(dummy_mask,velec);
1120             velecsum         = _mm256_add_ps(velecsum,velec);
1121
1122             fscal            = felec;
1123
1124             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1125
1126             /* Calculate temporary vectorial force */
1127             tx               = _mm256_mul_ps(fscal,dx20);
1128             ty               = _mm256_mul_ps(fscal,dy20);
1129             tz               = _mm256_mul_ps(fscal,dz20);
1130
1131             /* Update vectorial force */
1132             fix2             = _mm256_add_ps(fix2,tx);
1133             fiy2             = _mm256_add_ps(fiy2,ty);
1134             fiz2             = _mm256_add_ps(fiz2,tz);
1135
1136             fjx0             = _mm256_add_ps(fjx0,tx);
1137             fjy0             = _mm256_add_ps(fjy0,ty);
1138             fjz0             = _mm256_add_ps(fjz0,tz);
1139
1140             /**************************
1141              * CALCULATE INTERACTIONS *
1142              **************************/
1143
1144             r21              = _mm256_mul_ps(rsq21,rinv21);
1145             r21              = _mm256_andnot_ps(dummy_mask,r21);
1146
1147             /* EWALD ELECTROSTATICS */
1148             
1149             /* Analytical PME correction */
1150             zeta2            = _mm256_mul_ps(beta2,rsq21);
1151             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1152             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1153             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1154             felec            = _mm256_mul_ps(qq21,felec);
1155             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1156             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1157             velec            = _mm256_sub_ps(rinv21,pmecorrV);
1158             velec            = _mm256_mul_ps(qq21,velec);
1159             
1160             /* Update potential sum for this i atom from the interaction with this j atom. */
1161             velec            = _mm256_andnot_ps(dummy_mask,velec);
1162             velecsum         = _mm256_add_ps(velecsum,velec);
1163
1164             fscal            = felec;
1165
1166             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1167
1168             /* Calculate temporary vectorial force */
1169             tx               = _mm256_mul_ps(fscal,dx21);
1170             ty               = _mm256_mul_ps(fscal,dy21);
1171             tz               = _mm256_mul_ps(fscal,dz21);
1172
1173             /* Update vectorial force */
1174             fix2             = _mm256_add_ps(fix2,tx);
1175             fiy2             = _mm256_add_ps(fiy2,ty);
1176             fiz2             = _mm256_add_ps(fiz2,tz);
1177
1178             fjx1             = _mm256_add_ps(fjx1,tx);
1179             fjy1             = _mm256_add_ps(fjy1,ty);
1180             fjz1             = _mm256_add_ps(fjz1,tz);
1181
1182             /**************************
1183              * CALCULATE INTERACTIONS *
1184              **************************/
1185
1186             r22              = _mm256_mul_ps(rsq22,rinv22);
1187             r22              = _mm256_andnot_ps(dummy_mask,r22);
1188
1189             /* EWALD ELECTROSTATICS */
1190             
1191             /* Analytical PME correction */
1192             zeta2            = _mm256_mul_ps(beta2,rsq22);
1193             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1194             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1195             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1196             felec            = _mm256_mul_ps(qq22,felec);
1197             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1198             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1199             velec            = _mm256_sub_ps(rinv22,pmecorrV);
1200             velec            = _mm256_mul_ps(qq22,velec);
1201             
1202             /* Update potential sum for this i atom from the interaction with this j atom. */
1203             velec            = _mm256_andnot_ps(dummy_mask,velec);
1204             velecsum         = _mm256_add_ps(velecsum,velec);
1205
1206             fscal            = felec;
1207
1208             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1209
1210             /* Calculate temporary vectorial force */
1211             tx               = _mm256_mul_ps(fscal,dx22);
1212             ty               = _mm256_mul_ps(fscal,dy22);
1213             tz               = _mm256_mul_ps(fscal,dz22);
1214
1215             /* Update vectorial force */
1216             fix2             = _mm256_add_ps(fix2,tx);
1217             fiy2             = _mm256_add_ps(fiy2,ty);
1218             fiz2             = _mm256_add_ps(fiz2,tz);
1219
1220             fjx2             = _mm256_add_ps(fjx2,tx);
1221             fjy2             = _mm256_add_ps(fjy2,ty);
1222             fjz2             = _mm256_add_ps(fjz2,tz);
1223
1224             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1225             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1226             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1227             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1228             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1229             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1230             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1231             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1232
1233             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1234                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1235
1236             /* Inner loop uses 793 flops */
1237         }
1238
1239         /* End of innermost loop */
1240
1241         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1242                                                  f+i_coord_offset,fshift+i_shift_offset);
1243
1244         ggid                        = gid[iidx];
1245         /* Update potential energies */
1246         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1247         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1248
1249         /* Increment number of inner iterations */
1250         inneriter                  += j_index_end - j_index_start;
1251
1252         /* Outer loop uses 20 flops */
1253     }
1254
1255     /* Increment number of outer iterations */
1256     outeriter        += nri;
1257
1258     /* Update outer/inner flops */
1259
1260     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*793);
1261 }
1262 /*
1263  * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwLJEw_GeomW3W3_F_avx_256_single
1264  * Electrostatics interaction: Ewald
1265  * VdW interaction:            LJEwald
1266  * Geometry:                   Water3-Water3
1267  * Calculate force/pot:        Force
1268  */
1269 void
1270 nb_kernel_ElecEw_VdwLJEw_GeomW3W3_F_avx_256_single
1271                     (t_nblist                    * gmx_restrict       nlist,
1272                      rvec                        * gmx_restrict          xx,
1273                      rvec                        * gmx_restrict          ff,
1274                      t_forcerec                  * gmx_restrict          fr,
1275                      t_mdatoms                   * gmx_restrict     mdatoms,
1276                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1277                      t_nrnb                      * gmx_restrict        nrnb)
1278 {
1279     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1280      * just 0 for non-waters.
1281      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1282      * jnr indices corresponding to data put in the four positions in the SIMD register.
1283      */
1284     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1285     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1286     int              jnrA,jnrB,jnrC,jnrD;
1287     int              jnrE,jnrF,jnrG,jnrH;
1288     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1289     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1290     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1291     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1292     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1293     real             rcutoff_scalar;
1294     real             *shiftvec,*fshift,*x,*f;
1295     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1296     real             scratch[4*DIM];
1297     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1298     real *           vdwioffsetptr0;
1299     real *           vdwgridioffsetptr0;
1300     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1301     real *           vdwioffsetptr1;
1302     real *           vdwgridioffsetptr1;
1303     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1304     real *           vdwioffsetptr2;
1305     real *           vdwgridioffsetptr2;
1306     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1307     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1308     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1309     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1310     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1311     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1312     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1313     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1314     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1315     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1316     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1317     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1318     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1319     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1320     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1321     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1322     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
1323     real             *charge;
1324     int              nvdwtype;
1325     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1326     int              *vdwtype;
1327     real             *vdwparam;
1328     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
1329     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
1330     __m256           c6grid_00;
1331     __m256           c6grid_01;
1332     __m256           c6grid_02;
1333     __m256           c6grid_10;
1334     __m256           c6grid_11;
1335     __m256           c6grid_12;
1336     __m256           c6grid_20;
1337     __m256           c6grid_21;
1338     __m256           c6grid_22;
1339     real             *vdwgridparam;
1340     __m256           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
1341     __m256           one_half  = _mm256_set1_ps(0.5);
1342     __m256           minus_one = _mm256_set1_ps(-1.0);
1343     __m256i          ewitab;
1344     __m128i          ewitab_lo,ewitab_hi;
1345     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1346     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1347     real             *ewtab;
1348     __m256           dummy_mask,cutoff_mask;
1349     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1350     __m256           one     = _mm256_set1_ps(1.0);
1351     __m256           two     = _mm256_set1_ps(2.0);
1352     x                = xx[0];
1353     f                = ff[0];
1354
1355     nri              = nlist->nri;
1356     iinr             = nlist->iinr;
1357     jindex           = nlist->jindex;
1358     jjnr             = nlist->jjnr;
1359     shiftidx         = nlist->shift;
1360     gid              = nlist->gid;
1361     shiftvec         = fr->shift_vec[0];
1362     fshift           = fr->fshift[0];
1363     facel            = _mm256_set1_ps(fr->epsfac);
1364     charge           = mdatoms->chargeA;
1365     nvdwtype         = fr->ntype;
1366     vdwparam         = fr->nbfp;
1367     vdwtype          = mdatoms->typeA;
1368     vdwgridparam     = fr->ljpme_c6grid;
1369     sh_lj_ewald      = _mm256_set1_ps(fr->ic->sh_lj_ewald);
1370     ewclj            = _mm256_set1_ps(fr->ewaldcoeff_lj);
1371     ewclj2           = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
1372
1373     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
1374     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1375     beta2            = _mm256_mul_ps(beta,beta);
1376     beta3            = _mm256_mul_ps(beta,beta2);
1377
1378     ewtab            = fr->ic->tabq_coul_F;
1379     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
1380     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1381
1382     /* Setup water-specific parameters */
1383     inr              = nlist->iinr[0];
1384     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
1385     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1386     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1387     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1388     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
1389
1390     jq0              = _mm256_set1_ps(charge[inr+0]);
1391     jq1              = _mm256_set1_ps(charge[inr+1]);
1392     jq2              = _mm256_set1_ps(charge[inr+2]);
1393     vdwjidx0A        = 2*vdwtype[inr+0];
1394     qq00             = _mm256_mul_ps(iq0,jq0);
1395     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1396     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1397     c6grid_00        = _mm256_set1_ps(vdwgridioffsetptr0[vdwjidx0A]);
1398     qq01             = _mm256_mul_ps(iq0,jq1);
1399     qq02             = _mm256_mul_ps(iq0,jq2);
1400     qq10             = _mm256_mul_ps(iq1,jq0);
1401     qq11             = _mm256_mul_ps(iq1,jq1);
1402     qq12             = _mm256_mul_ps(iq1,jq2);
1403     qq20             = _mm256_mul_ps(iq2,jq0);
1404     qq21             = _mm256_mul_ps(iq2,jq1);
1405     qq22             = _mm256_mul_ps(iq2,jq2);
1406
1407     /* Avoid stupid compiler warnings */
1408     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1409     j_coord_offsetA = 0;
1410     j_coord_offsetB = 0;
1411     j_coord_offsetC = 0;
1412     j_coord_offsetD = 0;
1413     j_coord_offsetE = 0;
1414     j_coord_offsetF = 0;
1415     j_coord_offsetG = 0;
1416     j_coord_offsetH = 0;
1417
1418     outeriter        = 0;
1419     inneriter        = 0;
1420
1421     for(iidx=0;iidx<4*DIM;iidx++)
1422     {
1423         scratch[iidx] = 0.0;
1424     }
1425
1426     /* Start outer loop over neighborlists */
1427     for(iidx=0; iidx<nri; iidx++)
1428     {
1429         /* Load shift vector for this list */
1430         i_shift_offset   = DIM*shiftidx[iidx];
1431
1432         /* Load limits for loop over neighbors */
1433         j_index_start    = jindex[iidx];
1434         j_index_end      = jindex[iidx+1];
1435
1436         /* Get outer coordinate index */
1437         inr              = iinr[iidx];
1438         i_coord_offset   = DIM*inr;
1439
1440         /* Load i particle coords and add shift vector */
1441         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1442                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1443
1444         fix0             = _mm256_setzero_ps();
1445         fiy0             = _mm256_setzero_ps();
1446         fiz0             = _mm256_setzero_ps();
1447         fix1             = _mm256_setzero_ps();
1448         fiy1             = _mm256_setzero_ps();
1449         fiz1             = _mm256_setzero_ps();
1450         fix2             = _mm256_setzero_ps();
1451         fiy2             = _mm256_setzero_ps();
1452         fiz2             = _mm256_setzero_ps();
1453
1454         /* Start inner kernel loop */
1455         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1456         {
1457
1458             /* Get j neighbor index, and coordinate index */
1459             jnrA             = jjnr[jidx];
1460             jnrB             = jjnr[jidx+1];
1461             jnrC             = jjnr[jidx+2];
1462             jnrD             = jjnr[jidx+3];
1463             jnrE             = jjnr[jidx+4];
1464             jnrF             = jjnr[jidx+5];
1465             jnrG             = jjnr[jidx+6];
1466             jnrH             = jjnr[jidx+7];
1467             j_coord_offsetA  = DIM*jnrA;
1468             j_coord_offsetB  = DIM*jnrB;
1469             j_coord_offsetC  = DIM*jnrC;
1470             j_coord_offsetD  = DIM*jnrD;
1471             j_coord_offsetE  = DIM*jnrE;
1472             j_coord_offsetF  = DIM*jnrF;
1473             j_coord_offsetG  = DIM*jnrG;
1474             j_coord_offsetH  = DIM*jnrH;
1475
1476             /* load j atom coordinates */
1477             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1478                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1479                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1480                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1481                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1482
1483             /* Calculate displacement vector */
1484             dx00             = _mm256_sub_ps(ix0,jx0);
1485             dy00             = _mm256_sub_ps(iy0,jy0);
1486             dz00             = _mm256_sub_ps(iz0,jz0);
1487             dx01             = _mm256_sub_ps(ix0,jx1);
1488             dy01             = _mm256_sub_ps(iy0,jy1);
1489             dz01             = _mm256_sub_ps(iz0,jz1);
1490             dx02             = _mm256_sub_ps(ix0,jx2);
1491             dy02             = _mm256_sub_ps(iy0,jy2);
1492             dz02             = _mm256_sub_ps(iz0,jz2);
1493             dx10             = _mm256_sub_ps(ix1,jx0);
1494             dy10             = _mm256_sub_ps(iy1,jy0);
1495             dz10             = _mm256_sub_ps(iz1,jz0);
1496             dx11             = _mm256_sub_ps(ix1,jx1);
1497             dy11             = _mm256_sub_ps(iy1,jy1);
1498             dz11             = _mm256_sub_ps(iz1,jz1);
1499             dx12             = _mm256_sub_ps(ix1,jx2);
1500             dy12             = _mm256_sub_ps(iy1,jy2);
1501             dz12             = _mm256_sub_ps(iz1,jz2);
1502             dx20             = _mm256_sub_ps(ix2,jx0);
1503             dy20             = _mm256_sub_ps(iy2,jy0);
1504             dz20             = _mm256_sub_ps(iz2,jz0);
1505             dx21             = _mm256_sub_ps(ix2,jx1);
1506             dy21             = _mm256_sub_ps(iy2,jy1);
1507             dz21             = _mm256_sub_ps(iz2,jz1);
1508             dx22             = _mm256_sub_ps(ix2,jx2);
1509             dy22             = _mm256_sub_ps(iy2,jy2);
1510             dz22             = _mm256_sub_ps(iz2,jz2);
1511
1512             /* Calculate squared distance and things based on it */
1513             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1514             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1515             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1516             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1517             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1518             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1519             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1520             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1521             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1522
1523             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1524             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
1525             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
1526             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1527             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
1528             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
1529             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1530             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
1531             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
1532
1533             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1534             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
1535             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
1536             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1537             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1538             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1539             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1540             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1541             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1542
1543             fjx0             = _mm256_setzero_ps();
1544             fjy0             = _mm256_setzero_ps();
1545             fjz0             = _mm256_setzero_ps();
1546             fjx1             = _mm256_setzero_ps();
1547             fjy1             = _mm256_setzero_ps();
1548             fjz1             = _mm256_setzero_ps();
1549             fjx2             = _mm256_setzero_ps();
1550             fjy2             = _mm256_setzero_ps();
1551             fjz2             = _mm256_setzero_ps();
1552
1553             /**************************
1554              * CALCULATE INTERACTIONS *
1555              **************************/
1556
1557             r00              = _mm256_mul_ps(rsq00,rinv00);
1558
1559             /* EWALD ELECTROSTATICS */
1560             
1561             /* Analytical PME correction */
1562             zeta2            = _mm256_mul_ps(beta2,rsq00);
1563             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1564             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1565             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1566             felec            = _mm256_mul_ps(qq00,felec);
1567             
1568             /* Analytical LJ-PME */
1569             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1570             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
1571             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1572             exponent         = gmx_simd_exp_r(ewcljrsq);
1573             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1574             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1575             /* f6A = 6 * C6grid * (1 - poly) */
1576             f6A              = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
1577             /* f6B = C6grid * exponent * beta^6 */
1578             f6B              = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
1579             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1580             fvdw              = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1581
1582             fscal            = _mm256_add_ps(felec,fvdw);
1583
1584             /* Calculate temporary vectorial force */
1585             tx               = _mm256_mul_ps(fscal,dx00);
1586             ty               = _mm256_mul_ps(fscal,dy00);
1587             tz               = _mm256_mul_ps(fscal,dz00);
1588
1589             /* Update vectorial force */
1590             fix0             = _mm256_add_ps(fix0,tx);
1591             fiy0             = _mm256_add_ps(fiy0,ty);
1592             fiz0             = _mm256_add_ps(fiz0,tz);
1593
1594             fjx0             = _mm256_add_ps(fjx0,tx);
1595             fjy0             = _mm256_add_ps(fjy0,ty);
1596             fjz0             = _mm256_add_ps(fjz0,tz);
1597
1598             /**************************
1599              * CALCULATE INTERACTIONS *
1600              **************************/
1601
1602             r01              = _mm256_mul_ps(rsq01,rinv01);
1603
1604             /* EWALD ELECTROSTATICS */
1605             
1606             /* Analytical PME correction */
1607             zeta2            = _mm256_mul_ps(beta2,rsq01);
1608             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
1609             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1610             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1611             felec            = _mm256_mul_ps(qq01,felec);
1612             
1613             fscal            = felec;
1614
1615             /* Calculate temporary vectorial force */
1616             tx               = _mm256_mul_ps(fscal,dx01);
1617             ty               = _mm256_mul_ps(fscal,dy01);
1618             tz               = _mm256_mul_ps(fscal,dz01);
1619
1620             /* Update vectorial force */
1621             fix0             = _mm256_add_ps(fix0,tx);
1622             fiy0             = _mm256_add_ps(fiy0,ty);
1623             fiz0             = _mm256_add_ps(fiz0,tz);
1624
1625             fjx1             = _mm256_add_ps(fjx1,tx);
1626             fjy1             = _mm256_add_ps(fjy1,ty);
1627             fjz1             = _mm256_add_ps(fjz1,tz);
1628
1629             /**************************
1630              * CALCULATE INTERACTIONS *
1631              **************************/
1632
1633             r02              = _mm256_mul_ps(rsq02,rinv02);
1634
1635             /* EWALD ELECTROSTATICS */
1636             
1637             /* Analytical PME correction */
1638             zeta2            = _mm256_mul_ps(beta2,rsq02);
1639             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
1640             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1641             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1642             felec            = _mm256_mul_ps(qq02,felec);
1643             
1644             fscal            = felec;
1645
1646             /* Calculate temporary vectorial force */
1647             tx               = _mm256_mul_ps(fscal,dx02);
1648             ty               = _mm256_mul_ps(fscal,dy02);
1649             tz               = _mm256_mul_ps(fscal,dz02);
1650
1651             /* Update vectorial force */
1652             fix0             = _mm256_add_ps(fix0,tx);
1653             fiy0             = _mm256_add_ps(fiy0,ty);
1654             fiz0             = _mm256_add_ps(fiz0,tz);
1655
1656             fjx2             = _mm256_add_ps(fjx2,tx);
1657             fjy2             = _mm256_add_ps(fjy2,ty);
1658             fjz2             = _mm256_add_ps(fjz2,tz);
1659
1660             /**************************
1661              * CALCULATE INTERACTIONS *
1662              **************************/
1663
1664             r10              = _mm256_mul_ps(rsq10,rinv10);
1665
1666             /* EWALD ELECTROSTATICS */
1667             
1668             /* Analytical PME correction */
1669             zeta2            = _mm256_mul_ps(beta2,rsq10);
1670             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1671             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1672             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1673             felec            = _mm256_mul_ps(qq10,felec);
1674             
1675             fscal            = felec;
1676
1677             /* Calculate temporary vectorial force */
1678             tx               = _mm256_mul_ps(fscal,dx10);
1679             ty               = _mm256_mul_ps(fscal,dy10);
1680             tz               = _mm256_mul_ps(fscal,dz10);
1681
1682             /* Update vectorial force */
1683             fix1             = _mm256_add_ps(fix1,tx);
1684             fiy1             = _mm256_add_ps(fiy1,ty);
1685             fiz1             = _mm256_add_ps(fiz1,tz);
1686
1687             fjx0             = _mm256_add_ps(fjx0,tx);
1688             fjy0             = _mm256_add_ps(fjy0,ty);
1689             fjz0             = _mm256_add_ps(fjz0,tz);
1690
1691             /**************************
1692              * CALCULATE INTERACTIONS *
1693              **************************/
1694
1695             r11              = _mm256_mul_ps(rsq11,rinv11);
1696
1697             /* EWALD ELECTROSTATICS */
1698             
1699             /* Analytical PME correction */
1700             zeta2            = _mm256_mul_ps(beta2,rsq11);
1701             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1702             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1703             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1704             felec            = _mm256_mul_ps(qq11,felec);
1705             
1706             fscal            = felec;
1707
1708             /* Calculate temporary vectorial force */
1709             tx               = _mm256_mul_ps(fscal,dx11);
1710             ty               = _mm256_mul_ps(fscal,dy11);
1711             tz               = _mm256_mul_ps(fscal,dz11);
1712
1713             /* Update vectorial force */
1714             fix1             = _mm256_add_ps(fix1,tx);
1715             fiy1             = _mm256_add_ps(fiy1,ty);
1716             fiz1             = _mm256_add_ps(fiz1,tz);
1717
1718             fjx1             = _mm256_add_ps(fjx1,tx);
1719             fjy1             = _mm256_add_ps(fjy1,ty);
1720             fjz1             = _mm256_add_ps(fjz1,tz);
1721
1722             /**************************
1723              * CALCULATE INTERACTIONS *
1724              **************************/
1725
1726             r12              = _mm256_mul_ps(rsq12,rinv12);
1727
1728             /* EWALD ELECTROSTATICS */
1729             
1730             /* Analytical PME correction */
1731             zeta2            = _mm256_mul_ps(beta2,rsq12);
1732             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1733             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1734             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1735             felec            = _mm256_mul_ps(qq12,felec);
1736             
1737             fscal            = felec;
1738
1739             /* Calculate temporary vectorial force */
1740             tx               = _mm256_mul_ps(fscal,dx12);
1741             ty               = _mm256_mul_ps(fscal,dy12);
1742             tz               = _mm256_mul_ps(fscal,dz12);
1743
1744             /* Update vectorial force */
1745             fix1             = _mm256_add_ps(fix1,tx);
1746             fiy1             = _mm256_add_ps(fiy1,ty);
1747             fiz1             = _mm256_add_ps(fiz1,tz);
1748
1749             fjx2             = _mm256_add_ps(fjx2,tx);
1750             fjy2             = _mm256_add_ps(fjy2,ty);
1751             fjz2             = _mm256_add_ps(fjz2,tz);
1752
1753             /**************************
1754              * CALCULATE INTERACTIONS *
1755              **************************/
1756
1757             r20              = _mm256_mul_ps(rsq20,rinv20);
1758
1759             /* EWALD ELECTROSTATICS */
1760             
1761             /* Analytical PME correction */
1762             zeta2            = _mm256_mul_ps(beta2,rsq20);
1763             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1764             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1765             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1766             felec            = _mm256_mul_ps(qq20,felec);
1767             
1768             fscal            = felec;
1769
1770             /* Calculate temporary vectorial force */
1771             tx               = _mm256_mul_ps(fscal,dx20);
1772             ty               = _mm256_mul_ps(fscal,dy20);
1773             tz               = _mm256_mul_ps(fscal,dz20);
1774
1775             /* Update vectorial force */
1776             fix2             = _mm256_add_ps(fix2,tx);
1777             fiy2             = _mm256_add_ps(fiy2,ty);
1778             fiz2             = _mm256_add_ps(fiz2,tz);
1779
1780             fjx0             = _mm256_add_ps(fjx0,tx);
1781             fjy0             = _mm256_add_ps(fjy0,ty);
1782             fjz0             = _mm256_add_ps(fjz0,tz);
1783
1784             /**************************
1785              * CALCULATE INTERACTIONS *
1786              **************************/
1787
1788             r21              = _mm256_mul_ps(rsq21,rinv21);
1789
1790             /* EWALD ELECTROSTATICS */
1791             
1792             /* Analytical PME correction */
1793             zeta2            = _mm256_mul_ps(beta2,rsq21);
1794             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1795             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1796             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1797             felec            = _mm256_mul_ps(qq21,felec);
1798             
1799             fscal            = felec;
1800
1801             /* Calculate temporary vectorial force */
1802             tx               = _mm256_mul_ps(fscal,dx21);
1803             ty               = _mm256_mul_ps(fscal,dy21);
1804             tz               = _mm256_mul_ps(fscal,dz21);
1805
1806             /* Update vectorial force */
1807             fix2             = _mm256_add_ps(fix2,tx);
1808             fiy2             = _mm256_add_ps(fiy2,ty);
1809             fiz2             = _mm256_add_ps(fiz2,tz);
1810
1811             fjx1             = _mm256_add_ps(fjx1,tx);
1812             fjy1             = _mm256_add_ps(fjy1,ty);
1813             fjz1             = _mm256_add_ps(fjz1,tz);
1814
1815             /**************************
1816              * CALCULATE INTERACTIONS *
1817              **************************/
1818
1819             r22              = _mm256_mul_ps(rsq22,rinv22);
1820
1821             /* EWALD ELECTROSTATICS */
1822             
1823             /* Analytical PME correction */
1824             zeta2            = _mm256_mul_ps(beta2,rsq22);
1825             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1826             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1827             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1828             felec            = _mm256_mul_ps(qq22,felec);
1829             
1830             fscal            = felec;
1831
1832             /* Calculate temporary vectorial force */
1833             tx               = _mm256_mul_ps(fscal,dx22);
1834             ty               = _mm256_mul_ps(fscal,dy22);
1835             tz               = _mm256_mul_ps(fscal,dz22);
1836
1837             /* Update vectorial force */
1838             fix2             = _mm256_add_ps(fix2,tx);
1839             fiy2             = _mm256_add_ps(fiy2,ty);
1840             fiz2             = _mm256_add_ps(fiz2,tz);
1841
1842             fjx2             = _mm256_add_ps(fjx2,tx);
1843             fjy2             = _mm256_add_ps(fjy2,ty);
1844             fjz2             = _mm256_add_ps(fjz2,tz);
1845
1846             fjptrA             = f+j_coord_offsetA;
1847             fjptrB             = f+j_coord_offsetB;
1848             fjptrC             = f+j_coord_offsetC;
1849             fjptrD             = f+j_coord_offsetD;
1850             fjptrE             = f+j_coord_offsetE;
1851             fjptrF             = f+j_coord_offsetF;
1852             fjptrG             = f+j_coord_offsetG;
1853             fjptrH             = f+j_coord_offsetH;
1854
1855             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1856                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1857
1858             /* Inner loop uses 527 flops */
1859         }
1860
1861         if(jidx<j_index_end)
1862         {
1863
1864             /* Get j neighbor index, and coordinate index */
1865             jnrlistA         = jjnr[jidx];
1866             jnrlistB         = jjnr[jidx+1];
1867             jnrlistC         = jjnr[jidx+2];
1868             jnrlistD         = jjnr[jidx+3];
1869             jnrlistE         = jjnr[jidx+4];
1870             jnrlistF         = jjnr[jidx+5];
1871             jnrlistG         = jjnr[jidx+6];
1872             jnrlistH         = jjnr[jidx+7];
1873             /* Sign of each element will be negative for non-real atoms.
1874              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1875              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
1876              */
1877             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
1878                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
1879                                             
1880             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1881             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1882             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1883             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1884             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
1885             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
1886             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
1887             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
1888             j_coord_offsetA  = DIM*jnrA;
1889             j_coord_offsetB  = DIM*jnrB;
1890             j_coord_offsetC  = DIM*jnrC;
1891             j_coord_offsetD  = DIM*jnrD;
1892             j_coord_offsetE  = DIM*jnrE;
1893             j_coord_offsetF  = DIM*jnrF;
1894             j_coord_offsetG  = DIM*jnrG;
1895             j_coord_offsetH  = DIM*jnrH;
1896
1897             /* load j atom coordinates */
1898             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1899                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1900                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1901                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1902                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1903
1904             /* Calculate displacement vector */
1905             dx00             = _mm256_sub_ps(ix0,jx0);
1906             dy00             = _mm256_sub_ps(iy0,jy0);
1907             dz00             = _mm256_sub_ps(iz0,jz0);
1908             dx01             = _mm256_sub_ps(ix0,jx1);
1909             dy01             = _mm256_sub_ps(iy0,jy1);
1910             dz01             = _mm256_sub_ps(iz0,jz1);
1911             dx02             = _mm256_sub_ps(ix0,jx2);
1912             dy02             = _mm256_sub_ps(iy0,jy2);
1913             dz02             = _mm256_sub_ps(iz0,jz2);
1914             dx10             = _mm256_sub_ps(ix1,jx0);
1915             dy10             = _mm256_sub_ps(iy1,jy0);
1916             dz10             = _mm256_sub_ps(iz1,jz0);
1917             dx11             = _mm256_sub_ps(ix1,jx1);
1918             dy11             = _mm256_sub_ps(iy1,jy1);
1919             dz11             = _mm256_sub_ps(iz1,jz1);
1920             dx12             = _mm256_sub_ps(ix1,jx2);
1921             dy12             = _mm256_sub_ps(iy1,jy2);
1922             dz12             = _mm256_sub_ps(iz1,jz2);
1923             dx20             = _mm256_sub_ps(ix2,jx0);
1924             dy20             = _mm256_sub_ps(iy2,jy0);
1925             dz20             = _mm256_sub_ps(iz2,jz0);
1926             dx21             = _mm256_sub_ps(ix2,jx1);
1927             dy21             = _mm256_sub_ps(iy2,jy1);
1928             dz21             = _mm256_sub_ps(iz2,jz1);
1929             dx22             = _mm256_sub_ps(ix2,jx2);
1930             dy22             = _mm256_sub_ps(iy2,jy2);
1931             dz22             = _mm256_sub_ps(iz2,jz2);
1932
1933             /* Calculate squared distance and things based on it */
1934             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1935             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1936             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1937             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1938             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1939             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1940             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1941             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1942             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1943
1944             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1945             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
1946             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
1947             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1948             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
1949             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
1950             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1951             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
1952             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
1953
1954             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1955             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
1956             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
1957             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1958             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1959             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1960             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1961             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1962             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1963
1964             fjx0             = _mm256_setzero_ps();
1965             fjy0             = _mm256_setzero_ps();
1966             fjz0             = _mm256_setzero_ps();
1967             fjx1             = _mm256_setzero_ps();
1968             fjy1             = _mm256_setzero_ps();
1969             fjz1             = _mm256_setzero_ps();
1970             fjx2             = _mm256_setzero_ps();
1971             fjy2             = _mm256_setzero_ps();
1972             fjz2             = _mm256_setzero_ps();
1973
1974             /**************************
1975              * CALCULATE INTERACTIONS *
1976              **************************/
1977
1978             r00              = _mm256_mul_ps(rsq00,rinv00);
1979             r00              = _mm256_andnot_ps(dummy_mask,r00);
1980
1981             /* EWALD ELECTROSTATICS */
1982             
1983             /* Analytical PME correction */
1984             zeta2            = _mm256_mul_ps(beta2,rsq00);
1985             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1986             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1987             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1988             felec            = _mm256_mul_ps(qq00,felec);
1989             
1990             /* Analytical LJ-PME */
1991             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1992             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
1993             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1994             exponent         = gmx_simd_exp_r(ewcljrsq);
1995             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1996             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1997             /* f6A = 6 * C6grid * (1 - poly) */
1998             f6A              = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
1999             /* f6B = C6grid * exponent * beta^6 */
2000             f6B              = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
2001             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
2002             fvdw              = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
2003
2004             fscal            = _mm256_add_ps(felec,fvdw);
2005
2006             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2007
2008             /* Calculate temporary vectorial force */
2009             tx               = _mm256_mul_ps(fscal,dx00);
2010             ty               = _mm256_mul_ps(fscal,dy00);
2011             tz               = _mm256_mul_ps(fscal,dz00);
2012
2013             /* Update vectorial force */
2014             fix0             = _mm256_add_ps(fix0,tx);
2015             fiy0             = _mm256_add_ps(fiy0,ty);
2016             fiz0             = _mm256_add_ps(fiz0,tz);
2017
2018             fjx0             = _mm256_add_ps(fjx0,tx);
2019             fjy0             = _mm256_add_ps(fjy0,ty);
2020             fjz0             = _mm256_add_ps(fjz0,tz);
2021
2022             /**************************
2023              * CALCULATE INTERACTIONS *
2024              **************************/
2025
2026             r01              = _mm256_mul_ps(rsq01,rinv01);
2027             r01              = _mm256_andnot_ps(dummy_mask,r01);
2028
2029             /* EWALD ELECTROSTATICS */
2030             
2031             /* Analytical PME correction */
2032             zeta2            = _mm256_mul_ps(beta2,rsq01);
2033             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
2034             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2035             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2036             felec            = _mm256_mul_ps(qq01,felec);
2037             
2038             fscal            = felec;
2039
2040             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2041
2042             /* Calculate temporary vectorial force */
2043             tx               = _mm256_mul_ps(fscal,dx01);
2044             ty               = _mm256_mul_ps(fscal,dy01);
2045             tz               = _mm256_mul_ps(fscal,dz01);
2046
2047             /* Update vectorial force */
2048             fix0             = _mm256_add_ps(fix0,tx);
2049             fiy0             = _mm256_add_ps(fiy0,ty);
2050             fiz0             = _mm256_add_ps(fiz0,tz);
2051
2052             fjx1             = _mm256_add_ps(fjx1,tx);
2053             fjy1             = _mm256_add_ps(fjy1,ty);
2054             fjz1             = _mm256_add_ps(fjz1,tz);
2055
2056             /**************************
2057              * CALCULATE INTERACTIONS *
2058              **************************/
2059
2060             r02              = _mm256_mul_ps(rsq02,rinv02);
2061             r02              = _mm256_andnot_ps(dummy_mask,r02);
2062
2063             /* EWALD ELECTROSTATICS */
2064             
2065             /* Analytical PME correction */
2066             zeta2            = _mm256_mul_ps(beta2,rsq02);
2067             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
2068             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2069             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2070             felec            = _mm256_mul_ps(qq02,felec);
2071             
2072             fscal            = felec;
2073
2074             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2075
2076             /* Calculate temporary vectorial force */
2077             tx               = _mm256_mul_ps(fscal,dx02);
2078             ty               = _mm256_mul_ps(fscal,dy02);
2079             tz               = _mm256_mul_ps(fscal,dz02);
2080
2081             /* Update vectorial force */
2082             fix0             = _mm256_add_ps(fix0,tx);
2083             fiy0             = _mm256_add_ps(fiy0,ty);
2084             fiz0             = _mm256_add_ps(fiz0,tz);
2085
2086             fjx2             = _mm256_add_ps(fjx2,tx);
2087             fjy2             = _mm256_add_ps(fjy2,ty);
2088             fjz2             = _mm256_add_ps(fjz2,tz);
2089
2090             /**************************
2091              * CALCULATE INTERACTIONS *
2092              **************************/
2093
2094             r10              = _mm256_mul_ps(rsq10,rinv10);
2095             r10              = _mm256_andnot_ps(dummy_mask,r10);
2096
2097             /* EWALD ELECTROSTATICS */
2098             
2099             /* Analytical PME correction */
2100             zeta2            = _mm256_mul_ps(beta2,rsq10);
2101             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
2102             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2103             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2104             felec            = _mm256_mul_ps(qq10,felec);
2105             
2106             fscal            = felec;
2107
2108             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2109
2110             /* Calculate temporary vectorial force */
2111             tx               = _mm256_mul_ps(fscal,dx10);
2112             ty               = _mm256_mul_ps(fscal,dy10);
2113             tz               = _mm256_mul_ps(fscal,dz10);
2114
2115             /* Update vectorial force */
2116             fix1             = _mm256_add_ps(fix1,tx);
2117             fiy1             = _mm256_add_ps(fiy1,ty);
2118             fiz1             = _mm256_add_ps(fiz1,tz);
2119
2120             fjx0             = _mm256_add_ps(fjx0,tx);
2121             fjy0             = _mm256_add_ps(fjy0,ty);
2122             fjz0             = _mm256_add_ps(fjz0,tz);
2123
2124             /**************************
2125              * CALCULATE INTERACTIONS *
2126              **************************/
2127
2128             r11              = _mm256_mul_ps(rsq11,rinv11);
2129             r11              = _mm256_andnot_ps(dummy_mask,r11);
2130
2131             /* EWALD ELECTROSTATICS */
2132             
2133             /* Analytical PME correction */
2134             zeta2            = _mm256_mul_ps(beta2,rsq11);
2135             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
2136             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2137             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2138             felec            = _mm256_mul_ps(qq11,felec);
2139             
2140             fscal            = felec;
2141
2142             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2143
2144             /* Calculate temporary vectorial force */
2145             tx               = _mm256_mul_ps(fscal,dx11);
2146             ty               = _mm256_mul_ps(fscal,dy11);
2147             tz               = _mm256_mul_ps(fscal,dz11);
2148
2149             /* Update vectorial force */
2150             fix1             = _mm256_add_ps(fix1,tx);
2151             fiy1             = _mm256_add_ps(fiy1,ty);
2152             fiz1             = _mm256_add_ps(fiz1,tz);
2153
2154             fjx1             = _mm256_add_ps(fjx1,tx);
2155             fjy1             = _mm256_add_ps(fjy1,ty);
2156             fjz1             = _mm256_add_ps(fjz1,tz);
2157
2158             /**************************
2159              * CALCULATE INTERACTIONS *
2160              **************************/
2161
2162             r12              = _mm256_mul_ps(rsq12,rinv12);
2163             r12              = _mm256_andnot_ps(dummy_mask,r12);
2164
2165             /* EWALD ELECTROSTATICS */
2166             
2167             /* Analytical PME correction */
2168             zeta2            = _mm256_mul_ps(beta2,rsq12);
2169             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
2170             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2171             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2172             felec            = _mm256_mul_ps(qq12,felec);
2173             
2174             fscal            = felec;
2175
2176             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2177
2178             /* Calculate temporary vectorial force */
2179             tx               = _mm256_mul_ps(fscal,dx12);
2180             ty               = _mm256_mul_ps(fscal,dy12);
2181             tz               = _mm256_mul_ps(fscal,dz12);
2182
2183             /* Update vectorial force */
2184             fix1             = _mm256_add_ps(fix1,tx);
2185             fiy1             = _mm256_add_ps(fiy1,ty);
2186             fiz1             = _mm256_add_ps(fiz1,tz);
2187
2188             fjx2             = _mm256_add_ps(fjx2,tx);
2189             fjy2             = _mm256_add_ps(fjy2,ty);
2190             fjz2             = _mm256_add_ps(fjz2,tz);
2191
2192             /**************************
2193              * CALCULATE INTERACTIONS *
2194              **************************/
2195
2196             r20              = _mm256_mul_ps(rsq20,rinv20);
2197             r20              = _mm256_andnot_ps(dummy_mask,r20);
2198
2199             /* EWALD ELECTROSTATICS */
2200             
2201             /* Analytical PME correction */
2202             zeta2            = _mm256_mul_ps(beta2,rsq20);
2203             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
2204             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2205             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2206             felec            = _mm256_mul_ps(qq20,felec);
2207             
2208             fscal            = felec;
2209
2210             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2211
2212             /* Calculate temporary vectorial force */
2213             tx               = _mm256_mul_ps(fscal,dx20);
2214             ty               = _mm256_mul_ps(fscal,dy20);
2215             tz               = _mm256_mul_ps(fscal,dz20);
2216
2217             /* Update vectorial force */
2218             fix2             = _mm256_add_ps(fix2,tx);
2219             fiy2             = _mm256_add_ps(fiy2,ty);
2220             fiz2             = _mm256_add_ps(fiz2,tz);
2221
2222             fjx0             = _mm256_add_ps(fjx0,tx);
2223             fjy0             = _mm256_add_ps(fjy0,ty);
2224             fjz0             = _mm256_add_ps(fjz0,tz);
2225
2226             /**************************
2227              * CALCULATE INTERACTIONS *
2228              **************************/
2229
2230             r21              = _mm256_mul_ps(rsq21,rinv21);
2231             r21              = _mm256_andnot_ps(dummy_mask,r21);
2232
2233             /* EWALD ELECTROSTATICS */
2234             
2235             /* Analytical PME correction */
2236             zeta2            = _mm256_mul_ps(beta2,rsq21);
2237             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2238             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2239             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2240             felec            = _mm256_mul_ps(qq21,felec);
2241             
2242             fscal            = felec;
2243
2244             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2245
2246             /* Calculate temporary vectorial force */
2247             tx               = _mm256_mul_ps(fscal,dx21);
2248             ty               = _mm256_mul_ps(fscal,dy21);
2249             tz               = _mm256_mul_ps(fscal,dz21);
2250
2251             /* Update vectorial force */
2252             fix2             = _mm256_add_ps(fix2,tx);
2253             fiy2             = _mm256_add_ps(fiy2,ty);
2254             fiz2             = _mm256_add_ps(fiz2,tz);
2255
2256             fjx1             = _mm256_add_ps(fjx1,tx);
2257             fjy1             = _mm256_add_ps(fjy1,ty);
2258             fjz1             = _mm256_add_ps(fjz1,tz);
2259
2260             /**************************
2261              * CALCULATE INTERACTIONS *
2262              **************************/
2263
2264             r22              = _mm256_mul_ps(rsq22,rinv22);
2265             r22              = _mm256_andnot_ps(dummy_mask,r22);
2266
2267             /* EWALD ELECTROSTATICS */
2268             
2269             /* Analytical PME correction */
2270             zeta2            = _mm256_mul_ps(beta2,rsq22);
2271             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2272             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2273             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2274             felec            = _mm256_mul_ps(qq22,felec);
2275             
2276             fscal            = felec;
2277
2278             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2279
2280             /* Calculate temporary vectorial force */
2281             tx               = _mm256_mul_ps(fscal,dx22);
2282             ty               = _mm256_mul_ps(fscal,dy22);
2283             tz               = _mm256_mul_ps(fscal,dz22);
2284
2285             /* Update vectorial force */
2286             fix2             = _mm256_add_ps(fix2,tx);
2287             fiy2             = _mm256_add_ps(fiy2,ty);
2288             fiz2             = _mm256_add_ps(fiz2,tz);
2289
2290             fjx2             = _mm256_add_ps(fjx2,tx);
2291             fjy2             = _mm256_add_ps(fjy2,ty);
2292             fjz2             = _mm256_add_ps(fjz2,tz);
2293
2294             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2295             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2296             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2297             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2298             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2299             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2300             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2301             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2302
2303             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2304                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2305
2306             /* Inner loop uses 536 flops */
2307         }
2308
2309         /* End of innermost loop */
2310
2311         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
2312                                                  f+i_coord_offset,fshift+i_shift_offset);
2313
2314         /* Increment number of inner iterations */
2315         inneriter                  += j_index_end - j_index_start;
2316
2317         /* Outer loop uses 18 flops */
2318     }
2319
2320     /* Increment number of outer iterations */
2321     outeriter        += nri;
2322
2323     /* Update outer/inner flops */
2324
2325     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*536);
2326 }