Remove all unnecessary HAVE_CONFIG_H
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "config.h"
39
40 #include <math.h>
41
42 #include "../nb_kernel.h"
43 #include "types/simple.h"
44 #include "gromacs/math/vec.h"
45 #include "nrnb.h"
46
47 #include "gromacs/simd/math_x86_avx_256_single.h"
48 #include "kernelutil_x86_avx_256_single.h"
49
50 /*
51  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_VF_avx_256_single
52  * Electrostatics interaction: Ewald
53  * VdW interaction:            LennardJones
54  * Geometry:                   Water3-Water3
55  * Calculate force/pot:        PotentialAndForce
56  */
57 void
58 nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_VF_avx_256_single
59                     (t_nblist                    * gmx_restrict       nlist,
60                      rvec                        * gmx_restrict          xx,
61                      rvec                        * gmx_restrict          ff,
62                      t_forcerec                  * gmx_restrict          fr,
63                      t_mdatoms                   * gmx_restrict     mdatoms,
64                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65                      t_nrnb                      * gmx_restrict        nrnb)
66 {
67     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
68      * just 0 for non-waters.
69      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
70      * jnr indices corresponding to data put in the four positions in the SIMD register.
71      */
72     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
73     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74     int              jnrA,jnrB,jnrC,jnrD;
75     int              jnrE,jnrF,jnrG,jnrH;
76     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
77     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
78     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
79     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
80     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
81     real             rcutoff_scalar;
82     real             *shiftvec,*fshift,*x,*f;
83     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
84     real             scratch[4*DIM];
85     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86     real *           vdwioffsetptr0;
87     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
88     real *           vdwioffsetptr1;
89     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
90     real *           vdwioffsetptr2;
91     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
92     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
93     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
94     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
95     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
96     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
97     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
98     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
99     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
100     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
101     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
102     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
103     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
104     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
105     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
106     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
107     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
108     real             *charge;
109     int              nvdwtype;
110     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
111     int              *vdwtype;
112     real             *vdwparam;
113     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
114     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
115     __m256i          ewitab;
116     __m128i          ewitab_lo,ewitab_hi;
117     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
118     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
119     real             *ewtab;
120     __m256           dummy_mask,cutoff_mask;
121     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
122     __m256           one     = _mm256_set1_ps(1.0);
123     __m256           two     = _mm256_set1_ps(2.0);
124     x                = xx[0];
125     f                = ff[0];
126
127     nri              = nlist->nri;
128     iinr             = nlist->iinr;
129     jindex           = nlist->jindex;
130     jjnr             = nlist->jjnr;
131     shiftidx         = nlist->shift;
132     gid              = nlist->gid;
133     shiftvec         = fr->shift_vec[0];
134     fshift           = fr->fshift[0];
135     facel            = _mm256_set1_ps(fr->epsfac);
136     charge           = mdatoms->chargeA;
137     nvdwtype         = fr->ntype;
138     vdwparam         = fr->nbfp;
139     vdwtype          = mdatoms->typeA;
140
141     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
142     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
143     beta2            = _mm256_mul_ps(beta,beta);
144     beta3            = _mm256_mul_ps(beta,beta2);
145
146     ewtab            = fr->ic->tabq_coul_FDV0;
147     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
148     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
149
150     /* Setup water-specific parameters */
151     inr              = nlist->iinr[0];
152     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
153     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
154     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
155     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
156
157     jq0              = _mm256_set1_ps(charge[inr+0]);
158     jq1              = _mm256_set1_ps(charge[inr+1]);
159     jq2              = _mm256_set1_ps(charge[inr+2]);
160     vdwjidx0A        = 2*vdwtype[inr+0];
161     qq00             = _mm256_mul_ps(iq0,jq0);
162     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
163     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
164     qq01             = _mm256_mul_ps(iq0,jq1);
165     qq02             = _mm256_mul_ps(iq0,jq2);
166     qq10             = _mm256_mul_ps(iq1,jq0);
167     qq11             = _mm256_mul_ps(iq1,jq1);
168     qq12             = _mm256_mul_ps(iq1,jq2);
169     qq20             = _mm256_mul_ps(iq2,jq0);
170     qq21             = _mm256_mul_ps(iq2,jq1);
171     qq22             = _mm256_mul_ps(iq2,jq2);
172
173     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
174     rcutoff_scalar   = fr->rcoulomb;
175     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
176     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
177
178     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
179     rvdw             = _mm256_set1_ps(fr->rvdw);
180
181     /* Avoid stupid compiler warnings */
182     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
183     j_coord_offsetA = 0;
184     j_coord_offsetB = 0;
185     j_coord_offsetC = 0;
186     j_coord_offsetD = 0;
187     j_coord_offsetE = 0;
188     j_coord_offsetF = 0;
189     j_coord_offsetG = 0;
190     j_coord_offsetH = 0;
191
192     outeriter        = 0;
193     inneriter        = 0;
194
195     for(iidx=0;iidx<4*DIM;iidx++)
196     {
197         scratch[iidx] = 0.0;
198     }
199
200     /* Start outer loop over neighborlists */
201     for(iidx=0; iidx<nri; iidx++)
202     {
203         /* Load shift vector for this list */
204         i_shift_offset   = DIM*shiftidx[iidx];
205
206         /* Load limits for loop over neighbors */
207         j_index_start    = jindex[iidx];
208         j_index_end      = jindex[iidx+1];
209
210         /* Get outer coordinate index */
211         inr              = iinr[iidx];
212         i_coord_offset   = DIM*inr;
213
214         /* Load i particle coords and add shift vector */
215         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
216                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
217
218         fix0             = _mm256_setzero_ps();
219         fiy0             = _mm256_setzero_ps();
220         fiz0             = _mm256_setzero_ps();
221         fix1             = _mm256_setzero_ps();
222         fiy1             = _mm256_setzero_ps();
223         fiz1             = _mm256_setzero_ps();
224         fix2             = _mm256_setzero_ps();
225         fiy2             = _mm256_setzero_ps();
226         fiz2             = _mm256_setzero_ps();
227
228         /* Reset potential sums */
229         velecsum         = _mm256_setzero_ps();
230         vvdwsum          = _mm256_setzero_ps();
231
232         /* Start inner kernel loop */
233         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
234         {
235
236             /* Get j neighbor index, and coordinate index */
237             jnrA             = jjnr[jidx];
238             jnrB             = jjnr[jidx+1];
239             jnrC             = jjnr[jidx+2];
240             jnrD             = jjnr[jidx+3];
241             jnrE             = jjnr[jidx+4];
242             jnrF             = jjnr[jidx+5];
243             jnrG             = jjnr[jidx+6];
244             jnrH             = jjnr[jidx+7];
245             j_coord_offsetA  = DIM*jnrA;
246             j_coord_offsetB  = DIM*jnrB;
247             j_coord_offsetC  = DIM*jnrC;
248             j_coord_offsetD  = DIM*jnrD;
249             j_coord_offsetE  = DIM*jnrE;
250             j_coord_offsetF  = DIM*jnrF;
251             j_coord_offsetG  = DIM*jnrG;
252             j_coord_offsetH  = DIM*jnrH;
253
254             /* load j atom coordinates */
255             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
256                                                  x+j_coord_offsetC,x+j_coord_offsetD,
257                                                  x+j_coord_offsetE,x+j_coord_offsetF,
258                                                  x+j_coord_offsetG,x+j_coord_offsetH,
259                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
260
261             /* Calculate displacement vector */
262             dx00             = _mm256_sub_ps(ix0,jx0);
263             dy00             = _mm256_sub_ps(iy0,jy0);
264             dz00             = _mm256_sub_ps(iz0,jz0);
265             dx01             = _mm256_sub_ps(ix0,jx1);
266             dy01             = _mm256_sub_ps(iy0,jy1);
267             dz01             = _mm256_sub_ps(iz0,jz1);
268             dx02             = _mm256_sub_ps(ix0,jx2);
269             dy02             = _mm256_sub_ps(iy0,jy2);
270             dz02             = _mm256_sub_ps(iz0,jz2);
271             dx10             = _mm256_sub_ps(ix1,jx0);
272             dy10             = _mm256_sub_ps(iy1,jy0);
273             dz10             = _mm256_sub_ps(iz1,jz0);
274             dx11             = _mm256_sub_ps(ix1,jx1);
275             dy11             = _mm256_sub_ps(iy1,jy1);
276             dz11             = _mm256_sub_ps(iz1,jz1);
277             dx12             = _mm256_sub_ps(ix1,jx2);
278             dy12             = _mm256_sub_ps(iy1,jy2);
279             dz12             = _mm256_sub_ps(iz1,jz2);
280             dx20             = _mm256_sub_ps(ix2,jx0);
281             dy20             = _mm256_sub_ps(iy2,jy0);
282             dz20             = _mm256_sub_ps(iz2,jz0);
283             dx21             = _mm256_sub_ps(ix2,jx1);
284             dy21             = _mm256_sub_ps(iy2,jy1);
285             dz21             = _mm256_sub_ps(iz2,jz1);
286             dx22             = _mm256_sub_ps(ix2,jx2);
287             dy22             = _mm256_sub_ps(iy2,jy2);
288             dz22             = _mm256_sub_ps(iz2,jz2);
289
290             /* Calculate squared distance and things based on it */
291             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
292             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
293             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
294             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
295             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
296             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
297             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
298             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
299             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
300
301             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
302             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
303             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
304             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
305             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
306             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
307             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
308             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
309             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
310
311             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
312             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
313             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
314             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
315             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
316             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
317             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
318             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
319             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
320
321             fjx0             = _mm256_setzero_ps();
322             fjy0             = _mm256_setzero_ps();
323             fjz0             = _mm256_setzero_ps();
324             fjx1             = _mm256_setzero_ps();
325             fjy1             = _mm256_setzero_ps();
326             fjz1             = _mm256_setzero_ps();
327             fjx2             = _mm256_setzero_ps();
328             fjy2             = _mm256_setzero_ps();
329             fjz2             = _mm256_setzero_ps();
330
331             /**************************
332              * CALCULATE INTERACTIONS *
333              **************************/
334
335             if (gmx_mm256_any_lt(rsq00,rcutoff2))
336             {
337
338             r00              = _mm256_mul_ps(rsq00,rinv00);
339
340             /* EWALD ELECTROSTATICS */
341             
342             /* Analytical PME correction */
343             zeta2            = _mm256_mul_ps(beta2,rsq00);
344             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
345             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
346             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
347             felec            = _mm256_mul_ps(qq00,felec);
348             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
349             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
350             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
351             velec            = _mm256_mul_ps(qq00,velec);
352             
353             /* LENNARD-JONES DISPERSION/REPULSION */
354
355             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
356             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
357             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
358             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
359                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
360             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
361
362             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
363
364             /* Update potential sum for this i atom from the interaction with this j atom. */
365             velec            = _mm256_and_ps(velec,cutoff_mask);
366             velecsum         = _mm256_add_ps(velecsum,velec);
367             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
368             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
369
370             fscal            = _mm256_add_ps(felec,fvdw);
371
372             fscal            = _mm256_and_ps(fscal,cutoff_mask);
373
374             /* Calculate temporary vectorial force */
375             tx               = _mm256_mul_ps(fscal,dx00);
376             ty               = _mm256_mul_ps(fscal,dy00);
377             tz               = _mm256_mul_ps(fscal,dz00);
378
379             /* Update vectorial force */
380             fix0             = _mm256_add_ps(fix0,tx);
381             fiy0             = _mm256_add_ps(fiy0,ty);
382             fiz0             = _mm256_add_ps(fiz0,tz);
383
384             fjx0             = _mm256_add_ps(fjx0,tx);
385             fjy0             = _mm256_add_ps(fjy0,ty);
386             fjz0             = _mm256_add_ps(fjz0,tz);
387
388             }
389
390             /**************************
391              * CALCULATE INTERACTIONS *
392              **************************/
393
394             if (gmx_mm256_any_lt(rsq01,rcutoff2))
395             {
396
397             r01              = _mm256_mul_ps(rsq01,rinv01);
398
399             /* EWALD ELECTROSTATICS */
400             
401             /* Analytical PME correction */
402             zeta2            = _mm256_mul_ps(beta2,rsq01);
403             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
404             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
405             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
406             felec            = _mm256_mul_ps(qq01,felec);
407             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
408             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
409             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv01,sh_ewald),pmecorrV);
410             velec            = _mm256_mul_ps(qq01,velec);
411             
412             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
413
414             /* Update potential sum for this i atom from the interaction with this j atom. */
415             velec            = _mm256_and_ps(velec,cutoff_mask);
416             velecsum         = _mm256_add_ps(velecsum,velec);
417
418             fscal            = felec;
419
420             fscal            = _mm256_and_ps(fscal,cutoff_mask);
421
422             /* Calculate temporary vectorial force */
423             tx               = _mm256_mul_ps(fscal,dx01);
424             ty               = _mm256_mul_ps(fscal,dy01);
425             tz               = _mm256_mul_ps(fscal,dz01);
426
427             /* Update vectorial force */
428             fix0             = _mm256_add_ps(fix0,tx);
429             fiy0             = _mm256_add_ps(fiy0,ty);
430             fiz0             = _mm256_add_ps(fiz0,tz);
431
432             fjx1             = _mm256_add_ps(fjx1,tx);
433             fjy1             = _mm256_add_ps(fjy1,ty);
434             fjz1             = _mm256_add_ps(fjz1,tz);
435
436             }
437
438             /**************************
439              * CALCULATE INTERACTIONS *
440              **************************/
441
442             if (gmx_mm256_any_lt(rsq02,rcutoff2))
443             {
444
445             r02              = _mm256_mul_ps(rsq02,rinv02);
446
447             /* EWALD ELECTROSTATICS */
448             
449             /* Analytical PME correction */
450             zeta2            = _mm256_mul_ps(beta2,rsq02);
451             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
452             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
453             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
454             felec            = _mm256_mul_ps(qq02,felec);
455             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
456             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
457             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv02,sh_ewald),pmecorrV);
458             velec            = _mm256_mul_ps(qq02,velec);
459             
460             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
461
462             /* Update potential sum for this i atom from the interaction with this j atom. */
463             velec            = _mm256_and_ps(velec,cutoff_mask);
464             velecsum         = _mm256_add_ps(velecsum,velec);
465
466             fscal            = felec;
467
468             fscal            = _mm256_and_ps(fscal,cutoff_mask);
469
470             /* Calculate temporary vectorial force */
471             tx               = _mm256_mul_ps(fscal,dx02);
472             ty               = _mm256_mul_ps(fscal,dy02);
473             tz               = _mm256_mul_ps(fscal,dz02);
474
475             /* Update vectorial force */
476             fix0             = _mm256_add_ps(fix0,tx);
477             fiy0             = _mm256_add_ps(fiy0,ty);
478             fiz0             = _mm256_add_ps(fiz0,tz);
479
480             fjx2             = _mm256_add_ps(fjx2,tx);
481             fjy2             = _mm256_add_ps(fjy2,ty);
482             fjz2             = _mm256_add_ps(fjz2,tz);
483
484             }
485
486             /**************************
487              * CALCULATE INTERACTIONS *
488              **************************/
489
490             if (gmx_mm256_any_lt(rsq10,rcutoff2))
491             {
492
493             r10              = _mm256_mul_ps(rsq10,rinv10);
494
495             /* EWALD ELECTROSTATICS */
496             
497             /* Analytical PME correction */
498             zeta2            = _mm256_mul_ps(beta2,rsq10);
499             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
500             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
501             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
502             felec            = _mm256_mul_ps(qq10,felec);
503             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
504             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
505             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
506             velec            = _mm256_mul_ps(qq10,velec);
507             
508             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
509
510             /* Update potential sum for this i atom from the interaction with this j atom. */
511             velec            = _mm256_and_ps(velec,cutoff_mask);
512             velecsum         = _mm256_add_ps(velecsum,velec);
513
514             fscal            = felec;
515
516             fscal            = _mm256_and_ps(fscal,cutoff_mask);
517
518             /* Calculate temporary vectorial force */
519             tx               = _mm256_mul_ps(fscal,dx10);
520             ty               = _mm256_mul_ps(fscal,dy10);
521             tz               = _mm256_mul_ps(fscal,dz10);
522
523             /* Update vectorial force */
524             fix1             = _mm256_add_ps(fix1,tx);
525             fiy1             = _mm256_add_ps(fiy1,ty);
526             fiz1             = _mm256_add_ps(fiz1,tz);
527
528             fjx0             = _mm256_add_ps(fjx0,tx);
529             fjy0             = _mm256_add_ps(fjy0,ty);
530             fjz0             = _mm256_add_ps(fjz0,tz);
531
532             }
533
534             /**************************
535              * CALCULATE INTERACTIONS *
536              **************************/
537
538             if (gmx_mm256_any_lt(rsq11,rcutoff2))
539             {
540
541             r11              = _mm256_mul_ps(rsq11,rinv11);
542
543             /* EWALD ELECTROSTATICS */
544             
545             /* Analytical PME correction */
546             zeta2            = _mm256_mul_ps(beta2,rsq11);
547             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
548             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
549             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
550             felec            = _mm256_mul_ps(qq11,felec);
551             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
552             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
553             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
554             velec            = _mm256_mul_ps(qq11,velec);
555             
556             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
557
558             /* Update potential sum for this i atom from the interaction with this j atom. */
559             velec            = _mm256_and_ps(velec,cutoff_mask);
560             velecsum         = _mm256_add_ps(velecsum,velec);
561
562             fscal            = felec;
563
564             fscal            = _mm256_and_ps(fscal,cutoff_mask);
565
566             /* Calculate temporary vectorial force */
567             tx               = _mm256_mul_ps(fscal,dx11);
568             ty               = _mm256_mul_ps(fscal,dy11);
569             tz               = _mm256_mul_ps(fscal,dz11);
570
571             /* Update vectorial force */
572             fix1             = _mm256_add_ps(fix1,tx);
573             fiy1             = _mm256_add_ps(fiy1,ty);
574             fiz1             = _mm256_add_ps(fiz1,tz);
575
576             fjx1             = _mm256_add_ps(fjx1,tx);
577             fjy1             = _mm256_add_ps(fjy1,ty);
578             fjz1             = _mm256_add_ps(fjz1,tz);
579
580             }
581
582             /**************************
583              * CALCULATE INTERACTIONS *
584              **************************/
585
586             if (gmx_mm256_any_lt(rsq12,rcutoff2))
587             {
588
589             r12              = _mm256_mul_ps(rsq12,rinv12);
590
591             /* EWALD ELECTROSTATICS */
592             
593             /* Analytical PME correction */
594             zeta2            = _mm256_mul_ps(beta2,rsq12);
595             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
596             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
597             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
598             felec            = _mm256_mul_ps(qq12,felec);
599             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
600             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
601             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
602             velec            = _mm256_mul_ps(qq12,velec);
603             
604             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
605
606             /* Update potential sum for this i atom from the interaction with this j atom. */
607             velec            = _mm256_and_ps(velec,cutoff_mask);
608             velecsum         = _mm256_add_ps(velecsum,velec);
609
610             fscal            = felec;
611
612             fscal            = _mm256_and_ps(fscal,cutoff_mask);
613
614             /* Calculate temporary vectorial force */
615             tx               = _mm256_mul_ps(fscal,dx12);
616             ty               = _mm256_mul_ps(fscal,dy12);
617             tz               = _mm256_mul_ps(fscal,dz12);
618
619             /* Update vectorial force */
620             fix1             = _mm256_add_ps(fix1,tx);
621             fiy1             = _mm256_add_ps(fiy1,ty);
622             fiz1             = _mm256_add_ps(fiz1,tz);
623
624             fjx2             = _mm256_add_ps(fjx2,tx);
625             fjy2             = _mm256_add_ps(fjy2,ty);
626             fjz2             = _mm256_add_ps(fjz2,tz);
627
628             }
629
630             /**************************
631              * CALCULATE INTERACTIONS *
632              **************************/
633
634             if (gmx_mm256_any_lt(rsq20,rcutoff2))
635             {
636
637             r20              = _mm256_mul_ps(rsq20,rinv20);
638
639             /* EWALD ELECTROSTATICS */
640             
641             /* Analytical PME correction */
642             zeta2            = _mm256_mul_ps(beta2,rsq20);
643             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
644             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
645             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
646             felec            = _mm256_mul_ps(qq20,felec);
647             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
648             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
649             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
650             velec            = _mm256_mul_ps(qq20,velec);
651             
652             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
653
654             /* Update potential sum for this i atom from the interaction with this j atom. */
655             velec            = _mm256_and_ps(velec,cutoff_mask);
656             velecsum         = _mm256_add_ps(velecsum,velec);
657
658             fscal            = felec;
659
660             fscal            = _mm256_and_ps(fscal,cutoff_mask);
661
662             /* Calculate temporary vectorial force */
663             tx               = _mm256_mul_ps(fscal,dx20);
664             ty               = _mm256_mul_ps(fscal,dy20);
665             tz               = _mm256_mul_ps(fscal,dz20);
666
667             /* Update vectorial force */
668             fix2             = _mm256_add_ps(fix2,tx);
669             fiy2             = _mm256_add_ps(fiy2,ty);
670             fiz2             = _mm256_add_ps(fiz2,tz);
671
672             fjx0             = _mm256_add_ps(fjx0,tx);
673             fjy0             = _mm256_add_ps(fjy0,ty);
674             fjz0             = _mm256_add_ps(fjz0,tz);
675
676             }
677
678             /**************************
679              * CALCULATE INTERACTIONS *
680              **************************/
681
682             if (gmx_mm256_any_lt(rsq21,rcutoff2))
683             {
684
685             r21              = _mm256_mul_ps(rsq21,rinv21);
686
687             /* EWALD ELECTROSTATICS */
688             
689             /* Analytical PME correction */
690             zeta2            = _mm256_mul_ps(beta2,rsq21);
691             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
692             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
693             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
694             felec            = _mm256_mul_ps(qq21,felec);
695             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
696             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
697             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
698             velec            = _mm256_mul_ps(qq21,velec);
699             
700             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
701
702             /* Update potential sum for this i atom from the interaction with this j atom. */
703             velec            = _mm256_and_ps(velec,cutoff_mask);
704             velecsum         = _mm256_add_ps(velecsum,velec);
705
706             fscal            = felec;
707
708             fscal            = _mm256_and_ps(fscal,cutoff_mask);
709
710             /* Calculate temporary vectorial force */
711             tx               = _mm256_mul_ps(fscal,dx21);
712             ty               = _mm256_mul_ps(fscal,dy21);
713             tz               = _mm256_mul_ps(fscal,dz21);
714
715             /* Update vectorial force */
716             fix2             = _mm256_add_ps(fix2,tx);
717             fiy2             = _mm256_add_ps(fiy2,ty);
718             fiz2             = _mm256_add_ps(fiz2,tz);
719
720             fjx1             = _mm256_add_ps(fjx1,tx);
721             fjy1             = _mm256_add_ps(fjy1,ty);
722             fjz1             = _mm256_add_ps(fjz1,tz);
723
724             }
725
726             /**************************
727              * CALCULATE INTERACTIONS *
728              **************************/
729
730             if (gmx_mm256_any_lt(rsq22,rcutoff2))
731             {
732
733             r22              = _mm256_mul_ps(rsq22,rinv22);
734
735             /* EWALD ELECTROSTATICS */
736             
737             /* Analytical PME correction */
738             zeta2            = _mm256_mul_ps(beta2,rsq22);
739             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
740             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
741             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
742             felec            = _mm256_mul_ps(qq22,felec);
743             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
744             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
745             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
746             velec            = _mm256_mul_ps(qq22,velec);
747             
748             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
749
750             /* Update potential sum for this i atom from the interaction with this j atom. */
751             velec            = _mm256_and_ps(velec,cutoff_mask);
752             velecsum         = _mm256_add_ps(velecsum,velec);
753
754             fscal            = felec;
755
756             fscal            = _mm256_and_ps(fscal,cutoff_mask);
757
758             /* Calculate temporary vectorial force */
759             tx               = _mm256_mul_ps(fscal,dx22);
760             ty               = _mm256_mul_ps(fscal,dy22);
761             tz               = _mm256_mul_ps(fscal,dz22);
762
763             /* Update vectorial force */
764             fix2             = _mm256_add_ps(fix2,tx);
765             fiy2             = _mm256_add_ps(fiy2,ty);
766             fiz2             = _mm256_add_ps(fiz2,tz);
767
768             fjx2             = _mm256_add_ps(fjx2,tx);
769             fjy2             = _mm256_add_ps(fjy2,ty);
770             fjz2             = _mm256_add_ps(fjz2,tz);
771
772             }
773
774             fjptrA             = f+j_coord_offsetA;
775             fjptrB             = f+j_coord_offsetB;
776             fjptrC             = f+j_coord_offsetC;
777             fjptrD             = f+j_coord_offsetD;
778             fjptrE             = f+j_coord_offsetE;
779             fjptrF             = f+j_coord_offsetF;
780             fjptrG             = f+j_coord_offsetG;
781             fjptrH             = f+j_coord_offsetH;
782
783             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
784                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
785
786             /* Inner loop uses 999 flops */
787         }
788
789         if(jidx<j_index_end)
790         {
791
792             /* Get j neighbor index, and coordinate index */
793             jnrlistA         = jjnr[jidx];
794             jnrlistB         = jjnr[jidx+1];
795             jnrlistC         = jjnr[jidx+2];
796             jnrlistD         = jjnr[jidx+3];
797             jnrlistE         = jjnr[jidx+4];
798             jnrlistF         = jjnr[jidx+5];
799             jnrlistG         = jjnr[jidx+6];
800             jnrlistH         = jjnr[jidx+7];
801             /* Sign of each element will be negative for non-real atoms.
802              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
803              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
804              */
805             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
806                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
807                                             
808             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
809             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
810             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
811             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
812             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
813             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
814             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
815             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
816             j_coord_offsetA  = DIM*jnrA;
817             j_coord_offsetB  = DIM*jnrB;
818             j_coord_offsetC  = DIM*jnrC;
819             j_coord_offsetD  = DIM*jnrD;
820             j_coord_offsetE  = DIM*jnrE;
821             j_coord_offsetF  = DIM*jnrF;
822             j_coord_offsetG  = DIM*jnrG;
823             j_coord_offsetH  = DIM*jnrH;
824
825             /* load j atom coordinates */
826             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
827                                                  x+j_coord_offsetC,x+j_coord_offsetD,
828                                                  x+j_coord_offsetE,x+j_coord_offsetF,
829                                                  x+j_coord_offsetG,x+j_coord_offsetH,
830                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
831
832             /* Calculate displacement vector */
833             dx00             = _mm256_sub_ps(ix0,jx0);
834             dy00             = _mm256_sub_ps(iy0,jy0);
835             dz00             = _mm256_sub_ps(iz0,jz0);
836             dx01             = _mm256_sub_ps(ix0,jx1);
837             dy01             = _mm256_sub_ps(iy0,jy1);
838             dz01             = _mm256_sub_ps(iz0,jz1);
839             dx02             = _mm256_sub_ps(ix0,jx2);
840             dy02             = _mm256_sub_ps(iy0,jy2);
841             dz02             = _mm256_sub_ps(iz0,jz2);
842             dx10             = _mm256_sub_ps(ix1,jx0);
843             dy10             = _mm256_sub_ps(iy1,jy0);
844             dz10             = _mm256_sub_ps(iz1,jz0);
845             dx11             = _mm256_sub_ps(ix1,jx1);
846             dy11             = _mm256_sub_ps(iy1,jy1);
847             dz11             = _mm256_sub_ps(iz1,jz1);
848             dx12             = _mm256_sub_ps(ix1,jx2);
849             dy12             = _mm256_sub_ps(iy1,jy2);
850             dz12             = _mm256_sub_ps(iz1,jz2);
851             dx20             = _mm256_sub_ps(ix2,jx0);
852             dy20             = _mm256_sub_ps(iy2,jy0);
853             dz20             = _mm256_sub_ps(iz2,jz0);
854             dx21             = _mm256_sub_ps(ix2,jx1);
855             dy21             = _mm256_sub_ps(iy2,jy1);
856             dz21             = _mm256_sub_ps(iz2,jz1);
857             dx22             = _mm256_sub_ps(ix2,jx2);
858             dy22             = _mm256_sub_ps(iy2,jy2);
859             dz22             = _mm256_sub_ps(iz2,jz2);
860
861             /* Calculate squared distance and things based on it */
862             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
863             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
864             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
865             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
866             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
867             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
868             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
869             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
870             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
871
872             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
873             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
874             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
875             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
876             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
877             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
878             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
879             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
880             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
881
882             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
883             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
884             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
885             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
886             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
887             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
888             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
889             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
890             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
891
892             fjx0             = _mm256_setzero_ps();
893             fjy0             = _mm256_setzero_ps();
894             fjz0             = _mm256_setzero_ps();
895             fjx1             = _mm256_setzero_ps();
896             fjy1             = _mm256_setzero_ps();
897             fjz1             = _mm256_setzero_ps();
898             fjx2             = _mm256_setzero_ps();
899             fjy2             = _mm256_setzero_ps();
900             fjz2             = _mm256_setzero_ps();
901
902             /**************************
903              * CALCULATE INTERACTIONS *
904              **************************/
905
906             if (gmx_mm256_any_lt(rsq00,rcutoff2))
907             {
908
909             r00              = _mm256_mul_ps(rsq00,rinv00);
910             r00              = _mm256_andnot_ps(dummy_mask,r00);
911
912             /* EWALD ELECTROSTATICS */
913             
914             /* Analytical PME correction */
915             zeta2            = _mm256_mul_ps(beta2,rsq00);
916             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
917             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
918             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
919             felec            = _mm256_mul_ps(qq00,felec);
920             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
921             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
922             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
923             velec            = _mm256_mul_ps(qq00,velec);
924             
925             /* LENNARD-JONES DISPERSION/REPULSION */
926
927             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
928             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
929             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
930             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
931                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
932             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
933
934             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
935
936             /* Update potential sum for this i atom from the interaction with this j atom. */
937             velec            = _mm256_and_ps(velec,cutoff_mask);
938             velec            = _mm256_andnot_ps(dummy_mask,velec);
939             velecsum         = _mm256_add_ps(velecsum,velec);
940             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
941             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
942             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
943
944             fscal            = _mm256_add_ps(felec,fvdw);
945
946             fscal            = _mm256_and_ps(fscal,cutoff_mask);
947
948             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
949
950             /* Calculate temporary vectorial force */
951             tx               = _mm256_mul_ps(fscal,dx00);
952             ty               = _mm256_mul_ps(fscal,dy00);
953             tz               = _mm256_mul_ps(fscal,dz00);
954
955             /* Update vectorial force */
956             fix0             = _mm256_add_ps(fix0,tx);
957             fiy0             = _mm256_add_ps(fiy0,ty);
958             fiz0             = _mm256_add_ps(fiz0,tz);
959
960             fjx0             = _mm256_add_ps(fjx0,tx);
961             fjy0             = _mm256_add_ps(fjy0,ty);
962             fjz0             = _mm256_add_ps(fjz0,tz);
963
964             }
965
966             /**************************
967              * CALCULATE INTERACTIONS *
968              **************************/
969
970             if (gmx_mm256_any_lt(rsq01,rcutoff2))
971             {
972
973             r01              = _mm256_mul_ps(rsq01,rinv01);
974             r01              = _mm256_andnot_ps(dummy_mask,r01);
975
976             /* EWALD ELECTROSTATICS */
977             
978             /* Analytical PME correction */
979             zeta2            = _mm256_mul_ps(beta2,rsq01);
980             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
981             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
982             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
983             felec            = _mm256_mul_ps(qq01,felec);
984             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
985             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
986             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv01,sh_ewald),pmecorrV);
987             velec            = _mm256_mul_ps(qq01,velec);
988             
989             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
990
991             /* Update potential sum for this i atom from the interaction with this j atom. */
992             velec            = _mm256_and_ps(velec,cutoff_mask);
993             velec            = _mm256_andnot_ps(dummy_mask,velec);
994             velecsum         = _mm256_add_ps(velecsum,velec);
995
996             fscal            = felec;
997
998             fscal            = _mm256_and_ps(fscal,cutoff_mask);
999
1000             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1001
1002             /* Calculate temporary vectorial force */
1003             tx               = _mm256_mul_ps(fscal,dx01);
1004             ty               = _mm256_mul_ps(fscal,dy01);
1005             tz               = _mm256_mul_ps(fscal,dz01);
1006
1007             /* Update vectorial force */
1008             fix0             = _mm256_add_ps(fix0,tx);
1009             fiy0             = _mm256_add_ps(fiy0,ty);
1010             fiz0             = _mm256_add_ps(fiz0,tz);
1011
1012             fjx1             = _mm256_add_ps(fjx1,tx);
1013             fjy1             = _mm256_add_ps(fjy1,ty);
1014             fjz1             = _mm256_add_ps(fjz1,tz);
1015
1016             }
1017
1018             /**************************
1019              * CALCULATE INTERACTIONS *
1020              **************************/
1021
1022             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1023             {
1024
1025             r02              = _mm256_mul_ps(rsq02,rinv02);
1026             r02              = _mm256_andnot_ps(dummy_mask,r02);
1027
1028             /* EWALD ELECTROSTATICS */
1029             
1030             /* Analytical PME correction */
1031             zeta2            = _mm256_mul_ps(beta2,rsq02);
1032             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
1033             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1034             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1035             felec            = _mm256_mul_ps(qq02,felec);
1036             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1037             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1038             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv02,sh_ewald),pmecorrV);
1039             velec            = _mm256_mul_ps(qq02,velec);
1040             
1041             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
1042
1043             /* Update potential sum for this i atom from the interaction with this j atom. */
1044             velec            = _mm256_and_ps(velec,cutoff_mask);
1045             velec            = _mm256_andnot_ps(dummy_mask,velec);
1046             velecsum         = _mm256_add_ps(velecsum,velec);
1047
1048             fscal            = felec;
1049
1050             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1051
1052             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1053
1054             /* Calculate temporary vectorial force */
1055             tx               = _mm256_mul_ps(fscal,dx02);
1056             ty               = _mm256_mul_ps(fscal,dy02);
1057             tz               = _mm256_mul_ps(fscal,dz02);
1058
1059             /* Update vectorial force */
1060             fix0             = _mm256_add_ps(fix0,tx);
1061             fiy0             = _mm256_add_ps(fiy0,ty);
1062             fiz0             = _mm256_add_ps(fiz0,tz);
1063
1064             fjx2             = _mm256_add_ps(fjx2,tx);
1065             fjy2             = _mm256_add_ps(fjy2,ty);
1066             fjz2             = _mm256_add_ps(fjz2,tz);
1067
1068             }
1069
1070             /**************************
1071              * CALCULATE INTERACTIONS *
1072              **************************/
1073
1074             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1075             {
1076
1077             r10              = _mm256_mul_ps(rsq10,rinv10);
1078             r10              = _mm256_andnot_ps(dummy_mask,r10);
1079
1080             /* EWALD ELECTROSTATICS */
1081             
1082             /* Analytical PME correction */
1083             zeta2            = _mm256_mul_ps(beta2,rsq10);
1084             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1085             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1086             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1087             felec            = _mm256_mul_ps(qq10,felec);
1088             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1089             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1090             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
1091             velec            = _mm256_mul_ps(qq10,velec);
1092             
1093             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1094
1095             /* Update potential sum for this i atom from the interaction with this j atom. */
1096             velec            = _mm256_and_ps(velec,cutoff_mask);
1097             velec            = _mm256_andnot_ps(dummy_mask,velec);
1098             velecsum         = _mm256_add_ps(velecsum,velec);
1099
1100             fscal            = felec;
1101
1102             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1103
1104             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1105
1106             /* Calculate temporary vectorial force */
1107             tx               = _mm256_mul_ps(fscal,dx10);
1108             ty               = _mm256_mul_ps(fscal,dy10);
1109             tz               = _mm256_mul_ps(fscal,dz10);
1110
1111             /* Update vectorial force */
1112             fix1             = _mm256_add_ps(fix1,tx);
1113             fiy1             = _mm256_add_ps(fiy1,ty);
1114             fiz1             = _mm256_add_ps(fiz1,tz);
1115
1116             fjx0             = _mm256_add_ps(fjx0,tx);
1117             fjy0             = _mm256_add_ps(fjy0,ty);
1118             fjz0             = _mm256_add_ps(fjz0,tz);
1119
1120             }
1121
1122             /**************************
1123              * CALCULATE INTERACTIONS *
1124              **************************/
1125
1126             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1127             {
1128
1129             r11              = _mm256_mul_ps(rsq11,rinv11);
1130             r11              = _mm256_andnot_ps(dummy_mask,r11);
1131
1132             /* EWALD ELECTROSTATICS */
1133             
1134             /* Analytical PME correction */
1135             zeta2            = _mm256_mul_ps(beta2,rsq11);
1136             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1137             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1138             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1139             felec            = _mm256_mul_ps(qq11,felec);
1140             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1141             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1142             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv11,sh_ewald),pmecorrV);
1143             velec            = _mm256_mul_ps(qq11,velec);
1144             
1145             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1146
1147             /* Update potential sum for this i atom from the interaction with this j atom. */
1148             velec            = _mm256_and_ps(velec,cutoff_mask);
1149             velec            = _mm256_andnot_ps(dummy_mask,velec);
1150             velecsum         = _mm256_add_ps(velecsum,velec);
1151
1152             fscal            = felec;
1153
1154             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1155
1156             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1157
1158             /* Calculate temporary vectorial force */
1159             tx               = _mm256_mul_ps(fscal,dx11);
1160             ty               = _mm256_mul_ps(fscal,dy11);
1161             tz               = _mm256_mul_ps(fscal,dz11);
1162
1163             /* Update vectorial force */
1164             fix1             = _mm256_add_ps(fix1,tx);
1165             fiy1             = _mm256_add_ps(fiy1,ty);
1166             fiz1             = _mm256_add_ps(fiz1,tz);
1167
1168             fjx1             = _mm256_add_ps(fjx1,tx);
1169             fjy1             = _mm256_add_ps(fjy1,ty);
1170             fjz1             = _mm256_add_ps(fjz1,tz);
1171
1172             }
1173
1174             /**************************
1175              * CALCULATE INTERACTIONS *
1176              **************************/
1177
1178             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1179             {
1180
1181             r12              = _mm256_mul_ps(rsq12,rinv12);
1182             r12              = _mm256_andnot_ps(dummy_mask,r12);
1183
1184             /* EWALD ELECTROSTATICS */
1185             
1186             /* Analytical PME correction */
1187             zeta2            = _mm256_mul_ps(beta2,rsq12);
1188             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1189             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1190             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1191             felec            = _mm256_mul_ps(qq12,felec);
1192             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1193             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1194             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv12,sh_ewald),pmecorrV);
1195             velec            = _mm256_mul_ps(qq12,velec);
1196             
1197             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1198
1199             /* Update potential sum for this i atom from the interaction with this j atom. */
1200             velec            = _mm256_and_ps(velec,cutoff_mask);
1201             velec            = _mm256_andnot_ps(dummy_mask,velec);
1202             velecsum         = _mm256_add_ps(velecsum,velec);
1203
1204             fscal            = felec;
1205
1206             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1207
1208             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1209
1210             /* Calculate temporary vectorial force */
1211             tx               = _mm256_mul_ps(fscal,dx12);
1212             ty               = _mm256_mul_ps(fscal,dy12);
1213             tz               = _mm256_mul_ps(fscal,dz12);
1214
1215             /* Update vectorial force */
1216             fix1             = _mm256_add_ps(fix1,tx);
1217             fiy1             = _mm256_add_ps(fiy1,ty);
1218             fiz1             = _mm256_add_ps(fiz1,tz);
1219
1220             fjx2             = _mm256_add_ps(fjx2,tx);
1221             fjy2             = _mm256_add_ps(fjy2,ty);
1222             fjz2             = _mm256_add_ps(fjz2,tz);
1223
1224             }
1225
1226             /**************************
1227              * CALCULATE INTERACTIONS *
1228              **************************/
1229
1230             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1231             {
1232
1233             r20              = _mm256_mul_ps(rsq20,rinv20);
1234             r20              = _mm256_andnot_ps(dummy_mask,r20);
1235
1236             /* EWALD ELECTROSTATICS */
1237             
1238             /* Analytical PME correction */
1239             zeta2            = _mm256_mul_ps(beta2,rsq20);
1240             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1241             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1242             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1243             felec            = _mm256_mul_ps(qq20,felec);
1244             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1245             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1246             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
1247             velec            = _mm256_mul_ps(qq20,velec);
1248             
1249             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1250
1251             /* Update potential sum for this i atom from the interaction with this j atom. */
1252             velec            = _mm256_and_ps(velec,cutoff_mask);
1253             velec            = _mm256_andnot_ps(dummy_mask,velec);
1254             velecsum         = _mm256_add_ps(velecsum,velec);
1255
1256             fscal            = felec;
1257
1258             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1259
1260             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1261
1262             /* Calculate temporary vectorial force */
1263             tx               = _mm256_mul_ps(fscal,dx20);
1264             ty               = _mm256_mul_ps(fscal,dy20);
1265             tz               = _mm256_mul_ps(fscal,dz20);
1266
1267             /* Update vectorial force */
1268             fix2             = _mm256_add_ps(fix2,tx);
1269             fiy2             = _mm256_add_ps(fiy2,ty);
1270             fiz2             = _mm256_add_ps(fiz2,tz);
1271
1272             fjx0             = _mm256_add_ps(fjx0,tx);
1273             fjy0             = _mm256_add_ps(fjy0,ty);
1274             fjz0             = _mm256_add_ps(fjz0,tz);
1275
1276             }
1277
1278             /**************************
1279              * CALCULATE INTERACTIONS *
1280              **************************/
1281
1282             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1283             {
1284
1285             r21              = _mm256_mul_ps(rsq21,rinv21);
1286             r21              = _mm256_andnot_ps(dummy_mask,r21);
1287
1288             /* EWALD ELECTROSTATICS */
1289             
1290             /* Analytical PME correction */
1291             zeta2            = _mm256_mul_ps(beta2,rsq21);
1292             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1293             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1294             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1295             felec            = _mm256_mul_ps(qq21,felec);
1296             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1297             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1298             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv21,sh_ewald),pmecorrV);
1299             velec            = _mm256_mul_ps(qq21,velec);
1300             
1301             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1302
1303             /* Update potential sum for this i atom from the interaction with this j atom. */
1304             velec            = _mm256_and_ps(velec,cutoff_mask);
1305             velec            = _mm256_andnot_ps(dummy_mask,velec);
1306             velecsum         = _mm256_add_ps(velecsum,velec);
1307
1308             fscal            = felec;
1309
1310             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1311
1312             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1313
1314             /* Calculate temporary vectorial force */
1315             tx               = _mm256_mul_ps(fscal,dx21);
1316             ty               = _mm256_mul_ps(fscal,dy21);
1317             tz               = _mm256_mul_ps(fscal,dz21);
1318
1319             /* Update vectorial force */
1320             fix2             = _mm256_add_ps(fix2,tx);
1321             fiy2             = _mm256_add_ps(fiy2,ty);
1322             fiz2             = _mm256_add_ps(fiz2,tz);
1323
1324             fjx1             = _mm256_add_ps(fjx1,tx);
1325             fjy1             = _mm256_add_ps(fjy1,ty);
1326             fjz1             = _mm256_add_ps(fjz1,tz);
1327
1328             }
1329
1330             /**************************
1331              * CALCULATE INTERACTIONS *
1332              **************************/
1333
1334             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1335             {
1336
1337             r22              = _mm256_mul_ps(rsq22,rinv22);
1338             r22              = _mm256_andnot_ps(dummy_mask,r22);
1339
1340             /* EWALD ELECTROSTATICS */
1341             
1342             /* Analytical PME correction */
1343             zeta2            = _mm256_mul_ps(beta2,rsq22);
1344             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1345             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1346             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1347             felec            = _mm256_mul_ps(qq22,felec);
1348             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1349             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1350             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv22,sh_ewald),pmecorrV);
1351             velec            = _mm256_mul_ps(qq22,velec);
1352             
1353             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1354
1355             /* Update potential sum for this i atom from the interaction with this j atom. */
1356             velec            = _mm256_and_ps(velec,cutoff_mask);
1357             velec            = _mm256_andnot_ps(dummy_mask,velec);
1358             velecsum         = _mm256_add_ps(velecsum,velec);
1359
1360             fscal            = felec;
1361
1362             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1363
1364             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1365
1366             /* Calculate temporary vectorial force */
1367             tx               = _mm256_mul_ps(fscal,dx22);
1368             ty               = _mm256_mul_ps(fscal,dy22);
1369             tz               = _mm256_mul_ps(fscal,dz22);
1370
1371             /* Update vectorial force */
1372             fix2             = _mm256_add_ps(fix2,tx);
1373             fiy2             = _mm256_add_ps(fiy2,ty);
1374             fiz2             = _mm256_add_ps(fiz2,tz);
1375
1376             fjx2             = _mm256_add_ps(fjx2,tx);
1377             fjy2             = _mm256_add_ps(fjy2,ty);
1378             fjz2             = _mm256_add_ps(fjz2,tz);
1379
1380             }
1381
1382             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1383             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1384             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1385             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1386             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1387             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1388             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1389             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1390
1391             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1392                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1393
1394             /* Inner loop uses 1008 flops */
1395         }
1396
1397         /* End of innermost loop */
1398
1399         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1400                                                  f+i_coord_offset,fshift+i_shift_offset);
1401
1402         ggid                        = gid[iidx];
1403         /* Update potential energies */
1404         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1405         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1406
1407         /* Increment number of inner iterations */
1408         inneriter                  += j_index_end - j_index_start;
1409
1410         /* Outer loop uses 20 flops */
1411     }
1412
1413     /* Increment number of outer iterations */
1414     outeriter        += nri;
1415
1416     /* Update outer/inner flops */
1417
1418     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*1008);
1419 }
1420 /*
1421  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_F_avx_256_single
1422  * Electrostatics interaction: Ewald
1423  * VdW interaction:            LennardJones
1424  * Geometry:                   Water3-Water3
1425  * Calculate force/pot:        Force
1426  */
1427 void
1428 nb_kernel_ElecEwSh_VdwLJSh_GeomW3W3_F_avx_256_single
1429                     (t_nblist                    * gmx_restrict       nlist,
1430                      rvec                        * gmx_restrict          xx,
1431                      rvec                        * gmx_restrict          ff,
1432                      t_forcerec                  * gmx_restrict          fr,
1433                      t_mdatoms                   * gmx_restrict     mdatoms,
1434                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1435                      t_nrnb                      * gmx_restrict        nrnb)
1436 {
1437     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1438      * just 0 for non-waters.
1439      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1440      * jnr indices corresponding to data put in the four positions in the SIMD register.
1441      */
1442     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1443     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1444     int              jnrA,jnrB,jnrC,jnrD;
1445     int              jnrE,jnrF,jnrG,jnrH;
1446     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1447     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1448     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1449     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1450     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1451     real             rcutoff_scalar;
1452     real             *shiftvec,*fshift,*x,*f;
1453     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1454     real             scratch[4*DIM];
1455     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1456     real *           vdwioffsetptr0;
1457     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1458     real *           vdwioffsetptr1;
1459     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1460     real *           vdwioffsetptr2;
1461     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1462     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1463     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1464     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1465     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1466     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1467     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1468     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1469     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1470     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1471     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1472     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1473     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1474     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1475     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1476     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1477     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
1478     real             *charge;
1479     int              nvdwtype;
1480     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1481     int              *vdwtype;
1482     real             *vdwparam;
1483     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
1484     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
1485     __m256i          ewitab;
1486     __m128i          ewitab_lo,ewitab_hi;
1487     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1488     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1489     real             *ewtab;
1490     __m256           dummy_mask,cutoff_mask;
1491     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1492     __m256           one     = _mm256_set1_ps(1.0);
1493     __m256           two     = _mm256_set1_ps(2.0);
1494     x                = xx[0];
1495     f                = ff[0];
1496
1497     nri              = nlist->nri;
1498     iinr             = nlist->iinr;
1499     jindex           = nlist->jindex;
1500     jjnr             = nlist->jjnr;
1501     shiftidx         = nlist->shift;
1502     gid              = nlist->gid;
1503     shiftvec         = fr->shift_vec[0];
1504     fshift           = fr->fshift[0];
1505     facel            = _mm256_set1_ps(fr->epsfac);
1506     charge           = mdatoms->chargeA;
1507     nvdwtype         = fr->ntype;
1508     vdwparam         = fr->nbfp;
1509     vdwtype          = mdatoms->typeA;
1510
1511     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
1512     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1513     beta2            = _mm256_mul_ps(beta,beta);
1514     beta3            = _mm256_mul_ps(beta,beta2);
1515
1516     ewtab            = fr->ic->tabq_coul_F;
1517     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
1518     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1519
1520     /* Setup water-specific parameters */
1521     inr              = nlist->iinr[0];
1522     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
1523     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1524     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1525     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1526
1527     jq0              = _mm256_set1_ps(charge[inr+0]);
1528     jq1              = _mm256_set1_ps(charge[inr+1]);
1529     jq2              = _mm256_set1_ps(charge[inr+2]);
1530     vdwjidx0A        = 2*vdwtype[inr+0];
1531     qq00             = _mm256_mul_ps(iq0,jq0);
1532     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1533     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1534     qq01             = _mm256_mul_ps(iq0,jq1);
1535     qq02             = _mm256_mul_ps(iq0,jq2);
1536     qq10             = _mm256_mul_ps(iq1,jq0);
1537     qq11             = _mm256_mul_ps(iq1,jq1);
1538     qq12             = _mm256_mul_ps(iq1,jq2);
1539     qq20             = _mm256_mul_ps(iq2,jq0);
1540     qq21             = _mm256_mul_ps(iq2,jq1);
1541     qq22             = _mm256_mul_ps(iq2,jq2);
1542
1543     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1544     rcutoff_scalar   = fr->rcoulomb;
1545     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
1546     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
1547
1548     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
1549     rvdw             = _mm256_set1_ps(fr->rvdw);
1550
1551     /* Avoid stupid compiler warnings */
1552     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1553     j_coord_offsetA = 0;
1554     j_coord_offsetB = 0;
1555     j_coord_offsetC = 0;
1556     j_coord_offsetD = 0;
1557     j_coord_offsetE = 0;
1558     j_coord_offsetF = 0;
1559     j_coord_offsetG = 0;
1560     j_coord_offsetH = 0;
1561
1562     outeriter        = 0;
1563     inneriter        = 0;
1564
1565     for(iidx=0;iidx<4*DIM;iidx++)
1566     {
1567         scratch[iidx] = 0.0;
1568     }
1569
1570     /* Start outer loop over neighborlists */
1571     for(iidx=0; iidx<nri; iidx++)
1572     {
1573         /* Load shift vector for this list */
1574         i_shift_offset   = DIM*shiftidx[iidx];
1575
1576         /* Load limits for loop over neighbors */
1577         j_index_start    = jindex[iidx];
1578         j_index_end      = jindex[iidx+1];
1579
1580         /* Get outer coordinate index */
1581         inr              = iinr[iidx];
1582         i_coord_offset   = DIM*inr;
1583
1584         /* Load i particle coords and add shift vector */
1585         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1586                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1587
1588         fix0             = _mm256_setzero_ps();
1589         fiy0             = _mm256_setzero_ps();
1590         fiz0             = _mm256_setzero_ps();
1591         fix1             = _mm256_setzero_ps();
1592         fiy1             = _mm256_setzero_ps();
1593         fiz1             = _mm256_setzero_ps();
1594         fix2             = _mm256_setzero_ps();
1595         fiy2             = _mm256_setzero_ps();
1596         fiz2             = _mm256_setzero_ps();
1597
1598         /* Start inner kernel loop */
1599         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1600         {
1601
1602             /* Get j neighbor index, and coordinate index */
1603             jnrA             = jjnr[jidx];
1604             jnrB             = jjnr[jidx+1];
1605             jnrC             = jjnr[jidx+2];
1606             jnrD             = jjnr[jidx+3];
1607             jnrE             = jjnr[jidx+4];
1608             jnrF             = jjnr[jidx+5];
1609             jnrG             = jjnr[jidx+6];
1610             jnrH             = jjnr[jidx+7];
1611             j_coord_offsetA  = DIM*jnrA;
1612             j_coord_offsetB  = DIM*jnrB;
1613             j_coord_offsetC  = DIM*jnrC;
1614             j_coord_offsetD  = DIM*jnrD;
1615             j_coord_offsetE  = DIM*jnrE;
1616             j_coord_offsetF  = DIM*jnrF;
1617             j_coord_offsetG  = DIM*jnrG;
1618             j_coord_offsetH  = DIM*jnrH;
1619
1620             /* load j atom coordinates */
1621             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1622                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1623                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1624                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1625                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1626
1627             /* Calculate displacement vector */
1628             dx00             = _mm256_sub_ps(ix0,jx0);
1629             dy00             = _mm256_sub_ps(iy0,jy0);
1630             dz00             = _mm256_sub_ps(iz0,jz0);
1631             dx01             = _mm256_sub_ps(ix0,jx1);
1632             dy01             = _mm256_sub_ps(iy0,jy1);
1633             dz01             = _mm256_sub_ps(iz0,jz1);
1634             dx02             = _mm256_sub_ps(ix0,jx2);
1635             dy02             = _mm256_sub_ps(iy0,jy2);
1636             dz02             = _mm256_sub_ps(iz0,jz2);
1637             dx10             = _mm256_sub_ps(ix1,jx0);
1638             dy10             = _mm256_sub_ps(iy1,jy0);
1639             dz10             = _mm256_sub_ps(iz1,jz0);
1640             dx11             = _mm256_sub_ps(ix1,jx1);
1641             dy11             = _mm256_sub_ps(iy1,jy1);
1642             dz11             = _mm256_sub_ps(iz1,jz1);
1643             dx12             = _mm256_sub_ps(ix1,jx2);
1644             dy12             = _mm256_sub_ps(iy1,jy2);
1645             dz12             = _mm256_sub_ps(iz1,jz2);
1646             dx20             = _mm256_sub_ps(ix2,jx0);
1647             dy20             = _mm256_sub_ps(iy2,jy0);
1648             dz20             = _mm256_sub_ps(iz2,jz0);
1649             dx21             = _mm256_sub_ps(ix2,jx1);
1650             dy21             = _mm256_sub_ps(iy2,jy1);
1651             dz21             = _mm256_sub_ps(iz2,jz1);
1652             dx22             = _mm256_sub_ps(ix2,jx2);
1653             dy22             = _mm256_sub_ps(iy2,jy2);
1654             dz22             = _mm256_sub_ps(iz2,jz2);
1655
1656             /* Calculate squared distance and things based on it */
1657             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1658             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1659             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1660             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1661             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1662             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1663             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1664             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1665             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1666
1667             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1668             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
1669             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
1670             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1671             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
1672             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
1673             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1674             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
1675             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
1676
1677             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1678             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
1679             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
1680             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1681             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1682             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1683             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1684             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1685             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1686
1687             fjx0             = _mm256_setzero_ps();
1688             fjy0             = _mm256_setzero_ps();
1689             fjz0             = _mm256_setzero_ps();
1690             fjx1             = _mm256_setzero_ps();
1691             fjy1             = _mm256_setzero_ps();
1692             fjz1             = _mm256_setzero_ps();
1693             fjx2             = _mm256_setzero_ps();
1694             fjy2             = _mm256_setzero_ps();
1695             fjz2             = _mm256_setzero_ps();
1696
1697             /**************************
1698              * CALCULATE INTERACTIONS *
1699              **************************/
1700
1701             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1702             {
1703
1704             r00              = _mm256_mul_ps(rsq00,rinv00);
1705
1706             /* EWALD ELECTROSTATICS */
1707             
1708             /* Analytical PME correction */
1709             zeta2            = _mm256_mul_ps(beta2,rsq00);
1710             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1711             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1712             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1713             felec            = _mm256_mul_ps(qq00,felec);
1714             
1715             /* LENNARD-JONES DISPERSION/REPULSION */
1716
1717             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1718             fvdw             = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
1719
1720             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1721
1722             fscal            = _mm256_add_ps(felec,fvdw);
1723
1724             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1725
1726             /* Calculate temporary vectorial force */
1727             tx               = _mm256_mul_ps(fscal,dx00);
1728             ty               = _mm256_mul_ps(fscal,dy00);
1729             tz               = _mm256_mul_ps(fscal,dz00);
1730
1731             /* Update vectorial force */
1732             fix0             = _mm256_add_ps(fix0,tx);
1733             fiy0             = _mm256_add_ps(fiy0,ty);
1734             fiz0             = _mm256_add_ps(fiz0,tz);
1735
1736             fjx0             = _mm256_add_ps(fjx0,tx);
1737             fjy0             = _mm256_add_ps(fjy0,ty);
1738             fjz0             = _mm256_add_ps(fjz0,tz);
1739
1740             }
1741
1742             /**************************
1743              * CALCULATE INTERACTIONS *
1744              **************************/
1745
1746             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1747             {
1748
1749             r01              = _mm256_mul_ps(rsq01,rinv01);
1750
1751             /* EWALD ELECTROSTATICS */
1752             
1753             /* Analytical PME correction */
1754             zeta2            = _mm256_mul_ps(beta2,rsq01);
1755             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
1756             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1757             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1758             felec            = _mm256_mul_ps(qq01,felec);
1759             
1760             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
1761
1762             fscal            = felec;
1763
1764             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1765
1766             /* Calculate temporary vectorial force */
1767             tx               = _mm256_mul_ps(fscal,dx01);
1768             ty               = _mm256_mul_ps(fscal,dy01);
1769             tz               = _mm256_mul_ps(fscal,dz01);
1770
1771             /* Update vectorial force */
1772             fix0             = _mm256_add_ps(fix0,tx);
1773             fiy0             = _mm256_add_ps(fiy0,ty);
1774             fiz0             = _mm256_add_ps(fiz0,tz);
1775
1776             fjx1             = _mm256_add_ps(fjx1,tx);
1777             fjy1             = _mm256_add_ps(fjy1,ty);
1778             fjz1             = _mm256_add_ps(fjz1,tz);
1779
1780             }
1781
1782             /**************************
1783              * CALCULATE INTERACTIONS *
1784              **************************/
1785
1786             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1787             {
1788
1789             r02              = _mm256_mul_ps(rsq02,rinv02);
1790
1791             /* EWALD ELECTROSTATICS */
1792             
1793             /* Analytical PME correction */
1794             zeta2            = _mm256_mul_ps(beta2,rsq02);
1795             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
1796             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1797             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1798             felec            = _mm256_mul_ps(qq02,felec);
1799             
1800             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
1801
1802             fscal            = felec;
1803
1804             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1805
1806             /* Calculate temporary vectorial force */
1807             tx               = _mm256_mul_ps(fscal,dx02);
1808             ty               = _mm256_mul_ps(fscal,dy02);
1809             tz               = _mm256_mul_ps(fscal,dz02);
1810
1811             /* Update vectorial force */
1812             fix0             = _mm256_add_ps(fix0,tx);
1813             fiy0             = _mm256_add_ps(fiy0,ty);
1814             fiz0             = _mm256_add_ps(fiz0,tz);
1815
1816             fjx2             = _mm256_add_ps(fjx2,tx);
1817             fjy2             = _mm256_add_ps(fjy2,ty);
1818             fjz2             = _mm256_add_ps(fjz2,tz);
1819
1820             }
1821
1822             /**************************
1823              * CALCULATE INTERACTIONS *
1824              **************************/
1825
1826             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1827             {
1828
1829             r10              = _mm256_mul_ps(rsq10,rinv10);
1830
1831             /* EWALD ELECTROSTATICS */
1832             
1833             /* Analytical PME correction */
1834             zeta2            = _mm256_mul_ps(beta2,rsq10);
1835             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1836             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1837             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1838             felec            = _mm256_mul_ps(qq10,felec);
1839             
1840             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1841
1842             fscal            = felec;
1843
1844             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1845
1846             /* Calculate temporary vectorial force */
1847             tx               = _mm256_mul_ps(fscal,dx10);
1848             ty               = _mm256_mul_ps(fscal,dy10);
1849             tz               = _mm256_mul_ps(fscal,dz10);
1850
1851             /* Update vectorial force */
1852             fix1             = _mm256_add_ps(fix1,tx);
1853             fiy1             = _mm256_add_ps(fiy1,ty);
1854             fiz1             = _mm256_add_ps(fiz1,tz);
1855
1856             fjx0             = _mm256_add_ps(fjx0,tx);
1857             fjy0             = _mm256_add_ps(fjy0,ty);
1858             fjz0             = _mm256_add_ps(fjz0,tz);
1859
1860             }
1861
1862             /**************************
1863              * CALCULATE INTERACTIONS *
1864              **************************/
1865
1866             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1867             {
1868
1869             r11              = _mm256_mul_ps(rsq11,rinv11);
1870
1871             /* EWALD ELECTROSTATICS */
1872             
1873             /* Analytical PME correction */
1874             zeta2            = _mm256_mul_ps(beta2,rsq11);
1875             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1876             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1877             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1878             felec            = _mm256_mul_ps(qq11,felec);
1879             
1880             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1881
1882             fscal            = felec;
1883
1884             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1885
1886             /* Calculate temporary vectorial force */
1887             tx               = _mm256_mul_ps(fscal,dx11);
1888             ty               = _mm256_mul_ps(fscal,dy11);
1889             tz               = _mm256_mul_ps(fscal,dz11);
1890
1891             /* Update vectorial force */
1892             fix1             = _mm256_add_ps(fix1,tx);
1893             fiy1             = _mm256_add_ps(fiy1,ty);
1894             fiz1             = _mm256_add_ps(fiz1,tz);
1895
1896             fjx1             = _mm256_add_ps(fjx1,tx);
1897             fjy1             = _mm256_add_ps(fjy1,ty);
1898             fjz1             = _mm256_add_ps(fjz1,tz);
1899
1900             }
1901
1902             /**************************
1903              * CALCULATE INTERACTIONS *
1904              **************************/
1905
1906             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1907             {
1908
1909             r12              = _mm256_mul_ps(rsq12,rinv12);
1910
1911             /* EWALD ELECTROSTATICS */
1912             
1913             /* Analytical PME correction */
1914             zeta2            = _mm256_mul_ps(beta2,rsq12);
1915             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1916             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1917             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1918             felec            = _mm256_mul_ps(qq12,felec);
1919             
1920             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1921
1922             fscal            = felec;
1923
1924             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1925
1926             /* Calculate temporary vectorial force */
1927             tx               = _mm256_mul_ps(fscal,dx12);
1928             ty               = _mm256_mul_ps(fscal,dy12);
1929             tz               = _mm256_mul_ps(fscal,dz12);
1930
1931             /* Update vectorial force */
1932             fix1             = _mm256_add_ps(fix1,tx);
1933             fiy1             = _mm256_add_ps(fiy1,ty);
1934             fiz1             = _mm256_add_ps(fiz1,tz);
1935
1936             fjx2             = _mm256_add_ps(fjx2,tx);
1937             fjy2             = _mm256_add_ps(fjy2,ty);
1938             fjz2             = _mm256_add_ps(fjz2,tz);
1939
1940             }
1941
1942             /**************************
1943              * CALCULATE INTERACTIONS *
1944              **************************/
1945
1946             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1947             {
1948
1949             r20              = _mm256_mul_ps(rsq20,rinv20);
1950
1951             /* EWALD ELECTROSTATICS */
1952             
1953             /* Analytical PME correction */
1954             zeta2            = _mm256_mul_ps(beta2,rsq20);
1955             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1956             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1957             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1958             felec            = _mm256_mul_ps(qq20,felec);
1959             
1960             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1961
1962             fscal            = felec;
1963
1964             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1965
1966             /* Calculate temporary vectorial force */
1967             tx               = _mm256_mul_ps(fscal,dx20);
1968             ty               = _mm256_mul_ps(fscal,dy20);
1969             tz               = _mm256_mul_ps(fscal,dz20);
1970
1971             /* Update vectorial force */
1972             fix2             = _mm256_add_ps(fix2,tx);
1973             fiy2             = _mm256_add_ps(fiy2,ty);
1974             fiz2             = _mm256_add_ps(fiz2,tz);
1975
1976             fjx0             = _mm256_add_ps(fjx0,tx);
1977             fjy0             = _mm256_add_ps(fjy0,ty);
1978             fjz0             = _mm256_add_ps(fjz0,tz);
1979
1980             }
1981
1982             /**************************
1983              * CALCULATE INTERACTIONS *
1984              **************************/
1985
1986             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1987             {
1988
1989             r21              = _mm256_mul_ps(rsq21,rinv21);
1990
1991             /* EWALD ELECTROSTATICS */
1992             
1993             /* Analytical PME correction */
1994             zeta2            = _mm256_mul_ps(beta2,rsq21);
1995             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1996             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1997             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1998             felec            = _mm256_mul_ps(qq21,felec);
1999             
2000             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2001
2002             fscal            = felec;
2003
2004             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2005
2006             /* Calculate temporary vectorial force */
2007             tx               = _mm256_mul_ps(fscal,dx21);
2008             ty               = _mm256_mul_ps(fscal,dy21);
2009             tz               = _mm256_mul_ps(fscal,dz21);
2010
2011             /* Update vectorial force */
2012             fix2             = _mm256_add_ps(fix2,tx);
2013             fiy2             = _mm256_add_ps(fiy2,ty);
2014             fiz2             = _mm256_add_ps(fiz2,tz);
2015
2016             fjx1             = _mm256_add_ps(fjx1,tx);
2017             fjy1             = _mm256_add_ps(fjy1,ty);
2018             fjz1             = _mm256_add_ps(fjz1,tz);
2019
2020             }
2021
2022             /**************************
2023              * CALCULATE INTERACTIONS *
2024              **************************/
2025
2026             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2027             {
2028
2029             r22              = _mm256_mul_ps(rsq22,rinv22);
2030
2031             /* EWALD ELECTROSTATICS */
2032             
2033             /* Analytical PME correction */
2034             zeta2            = _mm256_mul_ps(beta2,rsq22);
2035             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2036             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2037             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2038             felec            = _mm256_mul_ps(qq22,felec);
2039             
2040             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2041
2042             fscal            = felec;
2043
2044             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2045
2046             /* Calculate temporary vectorial force */
2047             tx               = _mm256_mul_ps(fscal,dx22);
2048             ty               = _mm256_mul_ps(fscal,dy22);
2049             tz               = _mm256_mul_ps(fscal,dz22);
2050
2051             /* Update vectorial force */
2052             fix2             = _mm256_add_ps(fix2,tx);
2053             fiy2             = _mm256_add_ps(fiy2,ty);
2054             fiz2             = _mm256_add_ps(fiz2,tz);
2055
2056             fjx2             = _mm256_add_ps(fjx2,tx);
2057             fjy2             = _mm256_add_ps(fjy2,ty);
2058             fjz2             = _mm256_add_ps(fjz2,tz);
2059
2060             }
2061
2062             fjptrA             = f+j_coord_offsetA;
2063             fjptrB             = f+j_coord_offsetB;
2064             fjptrC             = f+j_coord_offsetC;
2065             fjptrD             = f+j_coord_offsetD;
2066             fjptrE             = f+j_coord_offsetE;
2067             fjptrF             = f+j_coord_offsetF;
2068             fjptrG             = f+j_coord_offsetG;
2069             fjptrH             = f+j_coord_offsetH;
2070
2071             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2072                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2073
2074             /* Inner loop uses 538 flops */
2075         }
2076
2077         if(jidx<j_index_end)
2078         {
2079
2080             /* Get j neighbor index, and coordinate index */
2081             jnrlistA         = jjnr[jidx];
2082             jnrlistB         = jjnr[jidx+1];
2083             jnrlistC         = jjnr[jidx+2];
2084             jnrlistD         = jjnr[jidx+3];
2085             jnrlistE         = jjnr[jidx+4];
2086             jnrlistF         = jjnr[jidx+5];
2087             jnrlistG         = jjnr[jidx+6];
2088             jnrlistH         = jjnr[jidx+7];
2089             /* Sign of each element will be negative for non-real atoms.
2090              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2091              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
2092              */
2093             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
2094                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
2095                                             
2096             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
2097             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
2098             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
2099             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
2100             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
2101             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
2102             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
2103             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
2104             j_coord_offsetA  = DIM*jnrA;
2105             j_coord_offsetB  = DIM*jnrB;
2106             j_coord_offsetC  = DIM*jnrC;
2107             j_coord_offsetD  = DIM*jnrD;
2108             j_coord_offsetE  = DIM*jnrE;
2109             j_coord_offsetF  = DIM*jnrF;
2110             j_coord_offsetG  = DIM*jnrG;
2111             j_coord_offsetH  = DIM*jnrH;
2112
2113             /* load j atom coordinates */
2114             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
2115                                                  x+j_coord_offsetC,x+j_coord_offsetD,
2116                                                  x+j_coord_offsetE,x+j_coord_offsetF,
2117                                                  x+j_coord_offsetG,x+j_coord_offsetH,
2118                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
2119
2120             /* Calculate displacement vector */
2121             dx00             = _mm256_sub_ps(ix0,jx0);
2122             dy00             = _mm256_sub_ps(iy0,jy0);
2123             dz00             = _mm256_sub_ps(iz0,jz0);
2124             dx01             = _mm256_sub_ps(ix0,jx1);
2125             dy01             = _mm256_sub_ps(iy0,jy1);
2126             dz01             = _mm256_sub_ps(iz0,jz1);
2127             dx02             = _mm256_sub_ps(ix0,jx2);
2128             dy02             = _mm256_sub_ps(iy0,jy2);
2129             dz02             = _mm256_sub_ps(iz0,jz2);
2130             dx10             = _mm256_sub_ps(ix1,jx0);
2131             dy10             = _mm256_sub_ps(iy1,jy0);
2132             dz10             = _mm256_sub_ps(iz1,jz0);
2133             dx11             = _mm256_sub_ps(ix1,jx1);
2134             dy11             = _mm256_sub_ps(iy1,jy1);
2135             dz11             = _mm256_sub_ps(iz1,jz1);
2136             dx12             = _mm256_sub_ps(ix1,jx2);
2137             dy12             = _mm256_sub_ps(iy1,jy2);
2138             dz12             = _mm256_sub_ps(iz1,jz2);
2139             dx20             = _mm256_sub_ps(ix2,jx0);
2140             dy20             = _mm256_sub_ps(iy2,jy0);
2141             dz20             = _mm256_sub_ps(iz2,jz0);
2142             dx21             = _mm256_sub_ps(ix2,jx1);
2143             dy21             = _mm256_sub_ps(iy2,jy1);
2144             dz21             = _mm256_sub_ps(iz2,jz1);
2145             dx22             = _mm256_sub_ps(ix2,jx2);
2146             dy22             = _mm256_sub_ps(iy2,jy2);
2147             dz22             = _mm256_sub_ps(iz2,jz2);
2148
2149             /* Calculate squared distance and things based on it */
2150             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
2151             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
2152             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
2153             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
2154             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2155             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2156             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
2157             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2158             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2159
2160             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
2161             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
2162             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
2163             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
2164             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
2165             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
2166             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
2167             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
2168             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
2169
2170             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
2171             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
2172             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
2173             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
2174             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
2175             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
2176             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
2177             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
2178             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
2179
2180             fjx0             = _mm256_setzero_ps();
2181             fjy0             = _mm256_setzero_ps();
2182             fjz0             = _mm256_setzero_ps();
2183             fjx1             = _mm256_setzero_ps();
2184             fjy1             = _mm256_setzero_ps();
2185             fjz1             = _mm256_setzero_ps();
2186             fjx2             = _mm256_setzero_ps();
2187             fjy2             = _mm256_setzero_ps();
2188             fjz2             = _mm256_setzero_ps();
2189
2190             /**************************
2191              * CALCULATE INTERACTIONS *
2192              **************************/
2193
2194             if (gmx_mm256_any_lt(rsq00,rcutoff2))
2195             {
2196
2197             r00              = _mm256_mul_ps(rsq00,rinv00);
2198             r00              = _mm256_andnot_ps(dummy_mask,r00);
2199
2200             /* EWALD ELECTROSTATICS */
2201             
2202             /* Analytical PME correction */
2203             zeta2            = _mm256_mul_ps(beta2,rsq00);
2204             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
2205             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2206             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2207             felec            = _mm256_mul_ps(qq00,felec);
2208             
2209             /* LENNARD-JONES DISPERSION/REPULSION */
2210
2211             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
2212             fvdw             = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
2213
2214             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
2215
2216             fscal            = _mm256_add_ps(felec,fvdw);
2217
2218             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2219
2220             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2221
2222             /* Calculate temporary vectorial force */
2223             tx               = _mm256_mul_ps(fscal,dx00);
2224             ty               = _mm256_mul_ps(fscal,dy00);
2225             tz               = _mm256_mul_ps(fscal,dz00);
2226
2227             /* Update vectorial force */
2228             fix0             = _mm256_add_ps(fix0,tx);
2229             fiy0             = _mm256_add_ps(fiy0,ty);
2230             fiz0             = _mm256_add_ps(fiz0,tz);
2231
2232             fjx0             = _mm256_add_ps(fjx0,tx);
2233             fjy0             = _mm256_add_ps(fjy0,ty);
2234             fjz0             = _mm256_add_ps(fjz0,tz);
2235
2236             }
2237
2238             /**************************
2239              * CALCULATE INTERACTIONS *
2240              **************************/
2241
2242             if (gmx_mm256_any_lt(rsq01,rcutoff2))
2243             {
2244
2245             r01              = _mm256_mul_ps(rsq01,rinv01);
2246             r01              = _mm256_andnot_ps(dummy_mask,r01);
2247
2248             /* EWALD ELECTROSTATICS */
2249             
2250             /* Analytical PME correction */
2251             zeta2            = _mm256_mul_ps(beta2,rsq01);
2252             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
2253             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2254             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2255             felec            = _mm256_mul_ps(qq01,felec);
2256             
2257             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
2258
2259             fscal            = felec;
2260
2261             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2262
2263             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2264
2265             /* Calculate temporary vectorial force */
2266             tx               = _mm256_mul_ps(fscal,dx01);
2267             ty               = _mm256_mul_ps(fscal,dy01);
2268             tz               = _mm256_mul_ps(fscal,dz01);
2269
2270             /* Update vectorial force */
2271             fix0             = _mm256_add_ps(fix0,tx);
2272             fiy0             = _mm256_add_ps(fiy0,ty);
2273             fiz0             = _mm256_add_ps(fiz0,tz);
2274
2275             fjx1             = _mm256_add_ps(fjx1,tx);
2276             fjy1             = _mm256_add_ps(fjy1,ty);
2277             fjz1             = _mm256_add_ps(fjz1,tz);
2278
2279             }
2280
2281             /**************************
2282              * CALCULATE INTERACTIONS *
2283              **************************/
2284
2285             if (gmx_mm256_any_lt(rsq02,rcutoff2))
2286             {
2287
2288             r02              = _mm256_mul_ps(rsq02,rinv02);
2289             r02              = _mm256_andnot_ps(dummy_mask,r02);
2290
2291             /* EWALD ELECTROSTATICS */
2292             
2293             /* Analytical PME correction */
2294             zeta2            = _mm256_mul_ps(beta2,rsq02);
2295             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
2296             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2297             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2298             felec            = _mm256_mul_ps(qq02,felec);
2299             
2300             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
2301
2302             fscal            = felec;
2303
2304             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2305
2306             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2307
2308             /* Calculate temporary vectorial force */
2309             tx               = _mm256_mul_ps(fscal,dx02);
2310             ty               = _mm256_mul_ps(fscal,dy02);
2311             tz               = _mm256_mul_ps(fscal,dz02);
2312
2313             /* Update vectorial force */
2314             fix0             = _mm256_add_ps(fix0,tx);
2315             fiy0             = _mm256_add_ps(fiy0,ty);
2316             fiz0             = _mm256_add_ps(fiz0,tz);
2317
2318             fjx2             = _mm256_add_ps(fjx2,tx);
2319             fjy2             = _mm256_add_ps(fjy2,ty);
2320             fjz2             = _mm256_add_ps(fjz2,tz);
2321
2322             }
2323
2324             /**************************
2325              * CALCULATE INTERACTIONS *
2326              **************************/
2327
2328             if (gmx_mm256_any_lt(rsq10,rcutoff2))
2329             {
2330
2331             r10              = _mm256_mul_ps(rsq10,rinv10);
2332             r10              = _mm256_andnot_ps(dummy_mask,r10);
2333
2334             /* EWALD ELECTROSTATICS */
2335             
2336             /* Analytical PME correction */
2337             zeta2            = _mm256_mul_ps(beta2,rsq10);
2338             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
2339             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2340             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2341             felec            = _mm256_mul_ps(qq10,felec);
2342             
2343             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
2344
2345             fscal            = felec;
2346
2347             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2348
2349             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2350
2351             /* Calculate temporary vectorial force */
2352             tx               = _mm256_mul_ps(fscal,dx10);
2353             ty               = _mm256_mul_ps(fscal,dy10);
2354             tz               = _mm256_mul_ps(fscal,dz10);
2355
2356             /* Update vectorial force */
2357             fix1             = _mm256_add_ps(fix1,tx);
2358             fiy1             = _mm256_add_ps(fiy1,ty);
2359             fiz1             = _mm256_add_ps(fiz1,tz);
2360
2361             fjx0             = _mm256_add_ps(fjx0,tx);
2362             fjy0             = _mm256_add_ps(fjy0,ty);
2363             fjz0             = _mm256_add_ps(fjz0,tz);
2364
2365             }
2366
2367             /**************************
2368              * CALCULATE INTERACTIONS *
2369              **************************/
2370
2371             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2372             {
2373
2374             r11              = _mm256_mul_ps(rsq11,rinv11);
2375             r11              = _mm256_andnot_ps(dummy_mask,r11);
2376
2377             /* EWALD ELECTROSTATICS */
2378             
2379             /* Analytical PME correction */
2380             zeta2            = _mm256_mul_ps(beta2,rsq11);
2381             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
2382             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2383             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2384             felec            = _mm256_mul_ps(qq11,felec);
2385             
2386             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2387
2388             fscal            = felec;
2389
2390             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2391
2392             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2393
2394             /* Calculate temporary vectorial force */
2395             tx               = _mm256_mul_ps(fscal,dx11);
2396             ty               = _mm256_mul_ps(fscal,dy11);
2397             tz               = _mm256_mul_ps(fscal,dz11);
2398
2399             /* Update vectorial force */
2400             fix1             = _mm256_add_ps(fix1,tx);
2401             fiy1             = _mm256_add_ps(fiy1,ty);
2402             fiz1             = _mm256_add_ps(fiz1,tz);
2403
2404             fjx1             = _mm256_add_ps(fjx1,tx);
2405             fjy1             = _mm256_add_ps(fjy1,ty);
2406             fjz1             = _mm256_add_ps(fjz1,tz);
2407
2408             }
2409
2410             /**************************
2411              * CALCULATE INTERACTIONS *
2412              **************************/
2413
2414             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2415             {
2416
2417             r12              = _mm256_mul_ps(rsq12,rinv12);
2418             r12              = _mm256_andnot_ps(dummy_mask,r12);
2419
2420             /* EWALD ELECTROSTATICS */
2421             
2422             /* Analytical PME correction */
2423             zeta2            = _mm256_mul_ps(beta2,rsq12);
2424             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
2425             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2426             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2427             felec            = _mm256_mul_ps(qq12,felec);
2428             
2429             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2430
2431             fscal            = felec;
2432
2433             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2434
2435             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2436
2437             /* Calculate temporary vectorial force */
2438             tx               = _mm256_mul_ps(fscal,dx12);
2439             ty               = _mm256_mul_ps(fscal,dy12);
2440             tz               = _mm256_mul_ps(fscal,dz12);
2441
2442             /* Update vectorial force */
2443             fix1             = _mm256_add_ps(fix1,tx);
2444             fiy1             = _mm256_add_ps(fiy1,ty);
2445             fiz1             = _mm256_add_ps(fiz1,tz);
2446
2447             fjx2             = _mm256_add_ps(fjx2,tx);
2448             fjy2             = _mm256_add_ps(fjy2,ty);
2449             fjz2             = _mm256_add_ps(fjz2,tz);
2450
2451             }
2452
2453             /**************************
2454              * CALCULATE INTERACTIONS *
2455              **************************/
2456
2457             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2458             {
2459
2460             r20              = _mm256_mul_ps(rsq20,rinv20);
2461             r20              = _mm256_andnot_ps(dummy_mask,r20);
2462
2463             /* EWALD ELECTROSTATICS */
2464             
2465             /* Analytical PME correction */
2466             zeta2            = _mm256_mul_ps(beta2,rsq20);
2467             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
2468             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2469             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2470             felec            = _mm256_mul_ps(qq20,felec);
2471             
2472             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
2473
2474             fscal            = felec;
2475
2476             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2477
2478             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2479
2480             /* Calculate temporary vectorial force */
2481             tx               = _mm256_mul_ps(fscal,dx20);
2482             ty               = _mm256_mul_ps(fscal,dy20);
2483             tz               = _mm256_mul_ps(fscal,dz20);
2484
2485             /* Update vectorial force */
2486             fix2             = _mm256_add_ps(fix2,tx);
2487             fiy2             = _mm256_add_ps(fiy2,ty);
2488             fiz2             = _mm256_add_ps(fiz2,tz);
2489
2490             fjx0             = _mm256_add_ps(fjx0,tx);
2491             fjy0             = _mm256_add_ps(fjy0,ty);
2492             fjz0             = _mm256_add_ps(fjz0,tz);
2493
2494             }
2495
2496             /**************************
2497              * CALCULATE INTERACTIONS *
2498              **************************/
2499
2500             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2501             {
2502
2503             r21              = _mm256_mul_ps(rsq21,rinv21);
2504             r21              = _mm256_andnot_ps(dummy_mask,r21);
2505
2506             /* EWALD ELECTROSTATICS */
2507             
2508             /* Analytical PME correction */
2509             zeta2            = _mm256_mul_ps(beta2,rsq21);
2510             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2511             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2512             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2513             felec            = _mm256_mul_ps(qq21,felec);
2514             
2515             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2516
2517             fscal            = felec;
2518
2519             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2520
2521             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2522
2523             /* Calculate temporary vectorial force */
2524             tx               = _mm256_mul_ps(fscal,dx21);
2525             ty               = _mm256_mul_ps(fscal,dy21);
2526             tz               = _mm256_mul_ps(fscal,dz21);
2527
2528             /* Update vectorial force */
2529             fix2             = _mm256_add_ps(fix2,tx);
2530             fiy2             = _mm256_add_ps(fiy2,ty);
2531             fiz2             = _mm256_add_ps(fiz2,tz);
2532
2533             fjx1             = _mm256_add_ps(fjx1,tx);
2534             fjy1             = _mm256_add_ps(fjy1,ty);
2535             fjz1             = _mm256_add_ps(fjz1,tz);
2536
2537             }
2538
2539             /**************************
2540              * CALCULATE INTERACTIONS *
2541              **************************/
2542
2543             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2544             {
2545
2546             r22              = _mm256_mul_ps(rsq22,rinv22);
2547             r22              = _mm256_andnot_ps(dummy_mask,r22);
2548
2549             /* EWALD ELECTROSTATICS */
2550             
2551             /* Analytical PME correction */
2552             zeta2            = _mm256_mul_ps(beta2,rsq22);
2553             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2554             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2555             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2556             felec            = _mm256_mul_ps(qq22,felec);
2557             
2558             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2559
2560             fscal            = felec;
2561
2562             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2563
2564             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2565
2566             /* Calculate temporary vectorial force */
2567             tx               = _mm256_mul_ps(fscal,dx22);
2568             ty               = _mm256_mul_ps(fscal,dy22);
2569             tz               = _mm256_mul_ps(fscal,dz22);
2570
2571             /* Update vectorial force */
2572             fix2             = _mm256_add_ps(fix2,tx);
2573             fiy2             = _mm256_add_ps(fiy2,ty);
2574             fiz2             = _mm256_add_ps(fiz2,tz);
2575
2576             fjx2             = _mm256_add_ps(fjx2,tx);
2577             fjy2             = _mm256_add_ps(fjy2,ty);
2578             fjz2             = _mm256_add_ps(fjz2,tz);
2579
2580             }
2581
2582             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2583             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2584             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2585             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2586             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2587             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2588             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2589             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2590
2591             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2592                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2593
2594             /* Inner loop uses 547 flops */
2595         }
2596
2597         /* End of innermost loop */
2598
2599         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
2600                                                  f+i_coord_offset,fshift+i_shift_offset);
2601
2602         /* Increment number of inner iterations */
2603         inneriter                  += j_index_end - j_index_start;
2604
2605         /* Outer loop uses 18 flops */
2606     }
2607
2608     /* Increment number of outer iterations */
2609     outeriter        += nri;
2610
2611     /* Update outer/inner flops */
2612
2613     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*547);
2614 }