Remove all unnecessary HAVE_CONFIG_H
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "config.h"
39
40 #include <math.h>
41
42 #include "../nb_kernel.h"
43 #include "types/simple.h"
44 #include "gromacs/math/vec.h"
45 #include "nrnb.h"
46
47 #include "gromacs/simd/math_x86_avx_256_single.h"
48 #include "kernelutil_x86_avx_256_single.h"
49
50 /*
51  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_VF_avx_256_single
52  * Electrostatics interaction: Ewald
53  * VdW interaction:            LennardJones
54  * Geometry:                   Water3-Water3
55  * Calculate force/pot:        PotentialAndForce
56  */
57 void
58 nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_VF_avx_256_single
59                     (t_nblist                    * gmx_restrict       nlist,
60                      rvec                        * gmx_restrict          xx,
61                      rvec                        * gmx_restrict          ff,
62                      t_forcerec                  * gmx_restrict          fr,
63                      t_mdatoms                   * gmx_restrict     mdatoms,
64                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65                      t_nrnb                      * gmx_restrict        nrnb)
66 {
67     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
68      * just 0 for non-waters.
69      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
70      * jnr indices corresponding to data put in the four positions in the SIMD register.
71      */
72     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
73     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74     int              jnrA,jnrB,jnrC,jnrD;
75     int              jnrE,jnrF,jnrG,jnrH;
76     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
77     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
78     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
79     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
80     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
81     real             rcutoff_scalar;
82     real             *shiftvec,*fshift,*x,*f;
83     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
84     real             scratch[4*DIM];
85     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86     real *           vdwioffsetptr0;
87     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
88     real *           vdwioffsetptr1;
89     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
90     real *           vdwioffsetptr2;
91     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
92     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
93     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
94     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
95     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
96     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
97     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
98     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
99     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
100     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
101     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
102     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
103     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
104     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
105     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
106     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
107     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
108     real             *charge;
109     int              nvdwtype;
110     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
111     int              *vdwtype;
112     real             *vdwparam;
113     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
114     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
115     __m256i          ewitab;
116     __m128i          ewitab_lo,ewitab_hi;
117     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
118     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
119     real             *ewtab;
120     __m256           rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
121     real             rswitch_scalar,d_scalar;
122     __m256           dummy_mask,cutoff_mask;
123     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
124     __m256           one     = _mm256_set1_ps(1.0);
125     __m256           two     = _mm256_set1_ps(2.0);
126     x                = xx[0];
127     f                = ff[0];
128
129     nri              = nlist->nri;
130     iinr             = nlist->iinr;
131     jindex           = nlist->jindex;
132     jjnr             = nlist->jjnr;
133     shiftidx         = nlist->shift;
134     gid              = nlist->gid;
135     shiftvec         = fr->shift_vec[0];
136     fshift           = fr->fshift[0];
137     facel            = _mm256_set1_ps(fr->epsfac);
138     charge           = mdatoms->chargeA;
139     nvdwtype         = fr->ntype;
140     vdwparam         = fr->nbfp;
141     vdwtype          = mdatoms->typeA;
142
143     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
144     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
145     beta2            = _mm256_mul_ps(beta,beta);
146     beta3            = _mm256_mul_ps(beta,beta2);
147
148     ewtab            = fr->ic->tabq_coul_FDV0;
149     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
150     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
151
152     /* Setup water-specific parameters */
153     inr              = nlist->iinr[0];
154     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
155     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
156     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
157     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
158
159     jq0              = _mm256_set1_ps(charge[inr+0]);
160     jq1              = _mm256_set1_ps(charge[inr+1]);
161     jq2              = _mm256_set1_ps(charge[inr+2]);
162     vdwjidx0A        = 2*vdwtype[inr+0];
163     qq00             = _mm256_mul_ps(iq0,jq0);
164     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
165     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
166     qq01             = _mm256_mul_ps(iq0,jq1);
167     qq02             = _mm256_mul_ps(iq0,jq2);
168     qq10             = _mm256_mul_ps(iq1,jq0);
169     qq11             = _mm256_mul_ps(iq1,jq1);
170     qq12             = _mm256_mul_ps(iq1,jq2);
171     qq20             = _mm256_mul_ps(iq2,jq0);
172     qq21             = _mm256_mul_ps(iq2,jq1);
173     qq22             = _mm256_mul_ps(iq2,jq2);
174
175     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
176     rcutoff_scalar   = fr->rcoulomb;
177     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
178     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
179
180     rswitch_scalar   = fr->rcoulomb_switch;
181     rswitch          = _mm256_set1_ps(rswitch_scalar);
182     /* Setup switch parameters */
183     d_scalar         = rcutoff_scalar-rswitch_scalar;
184     d                = _mm256_set1_ps(d_scalar);
185     swV3             = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
186     swV4             = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
187     swV5             = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
188     swF2             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
189     swF3             = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
190     swF4             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
191
192     /* Avoid stupid compiler warnings */
193     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
194     j_coord_offsetA = 0;
195     j_coord_offsetB = 0;
196     j_coord_offsetC = 0;
197     j_coord_offsetD = 0;
198     j_coord_offsetE = 0;
199     j_coord_offsetF = 0;
200     j_coord_offsetG = 0;
201     j_coord_offsetH = 0;
202
203     outeriter        = 0;
204     inneriter        = 0;
205
206     for(iidx=0;iidx<4*DIM;iidx++)
207     {
208         scratch[iidx] = 0.0;
209     }
210
211     /* Start outer loop over neighborlists */
212     for(iidx=0; iidx<nri; iidx++)
213     {
214         /* Load shift vector for this list */
215         i_shift_offset   = DIM*shiftidx[iidx];
216
217         /* Load limits for loop over neighbors */
218         j_index_start    = jindex[iidx];
219         j_index_end      = jindex[iidx+1];
220
221         /* Get outer coordinate index */
222         inr              = iinr[iidx];
223         i_coord_offset   = DIM*inr;
224
225         /* Load i particle coords and add shift vector */
226         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
227                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
228
229         fix0             = _mm256_setzero_ps();
230         fiy0             = _mm256_setzero_ps();
231         fiz0             = _mm256_setzero_ps();
232         fix1             = _mm256_setzero_ps();
233         fiy1             = _mm256_setzero_ps();
234         fiz1             = _mm256_setzero_ps();
235         fix2             = _mm256_setzero_ps();
236         fiy2             = _mm256_setzero_ps();
237         fiz2             = _mm256_setzero_ps();
238
239         /* Reset potential sums */
240         velecsum         = _mm256_setzero_ps();
241         vvdwsum          = _mm256_setzero_ps();
242
243         /* Start inner kernel loop */
244         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
245         {
246
247             /* Get j neighbor index, and coordinate index */
248             jnrA             = jjnr[jidx];
249             jnrB             = jjnr[jidx+1];
250             jnrC             = jjnr[jidx+2];
251             jnrD             = jjnr[jidx+3];
252             jnrE             = jjnr[jidx+4];
253             jnrF             = jjnr[jidx+5];
254             jnrG             = jjnr[jidx+6];
255             jnrH             = jjnr[jidx+7];
256             j_coord_offsetA  = DIM*jnrA;
257             j_coord_offsetB  = DIM*jnrB;
258             j_coord_offsetC  = DIM*jnrC;
259             j_coord_offsetD  = DIM*jnrD;
260             j_coord_offsetE  = DIM*jnrE;
261             j_coord_offsetF  = DIM*jnrF;
262             j_coord_offsetG  = DIM*jnrG;
263             j_coord_offsetH  = DIM*jnrH;
264
265             /* load j atom coordinates */
266             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
267                                                  x+j_coord_offsetC,x+j_coord_offsetD,
268                                                  x+j_coord_offsetE,x+j_coord_offsetF,
269                                                  x+j_coord_offsetG,x+j_coord_offsetH,
270                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
271
272             /* Calculate displacement vector */
273             dx00             = _mm256_sub_ps(ix0,jx0);
274             dy00             = _mm256_sub_ps(iy0,jy0);
275             dz00             = _mm256_sub_ps(iz0,jz0);
276             dx01             = _mm256_sub_ps(ix0,jx1);
277             dy01             = _mm256_sub_ps(iy0,jy1);
278             dz01             = _mm256_sub_ps(iz0,jz1);
279             dx02             = _mm256_sub_ps(ix0,jx2);
280             dy02             = _mm256_sub_ps(iy0,jy2);
281             dz02             = _mm256_sub_ps(iz0,jz2);
282             dx10             = _mm256_sub_ps(ix1,jx0);
283             dy10             = _mm256_sub_ps(iy1,jy0);
284             dz10             = _mm256_sub_ps(iz1,jz0);
285             dx11             = _mm256_sub_ps(ix1,jx1);
286             dy11             = _mm256_sub_ps(iy1,jy1);
287             dz11             = _mm256_sub_ps(iz1,jz1);
288             dx12             = _mm256_sub_ps(ix1,jx2);
289             dy12             = _mm256_sub_ps(iy1,jy2);
290             dz12             = _mm256_sub_ps(iz1,jz2);
291             dx20             = _mm256_sub_ps(ix2,jx0);
292             dy20             = _mm256_sub_ps(iy2,jy0);
293             dz20             = _mm256_sub_ps(iz2,jz0);
294             dx21             = _mm256_sub_ps(ix2,jx1);
295             dy21             = _mm256_sub_ps(iy2,jy1);
296             dz21             = _mm256_sub_ps(iz2,jz1);
297             dx22             = _mm256_sub_ps(ix2,jx2);
298             dy22             = _mm256_sub_ps(iy2,jy2);
299             dz22             = _mm256_sub_ps(iz2,jz2);
300
301             /* Calculate squared distance and things based on it */
302             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
303             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
304             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
305             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
306             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
307             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
308             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
309             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
310             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
311
312             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
313             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
314             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
315             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
316             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
317             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
318             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
319             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
320             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
321
322             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
323             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
324             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
325             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
326             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
327             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
328             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
329             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
330             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
331
332             fjx0             = _mm256_setzero_ps();
333             fjy0             = _mm256_setzero_ps();
334             fjz0             = _mm256_setzero_ps();
335             fjx1             = _mm256_setzero_ps();
336             fjy1             = _mm256_setzero_ps();
337             fjz1             = _mm256_setzero_ps();
338             fjx2             = _mm256_setzero_ps();
339             fjy2             = _mm256_setzero_ps();
340             fjz2             = _mm256_setzero_ps();
341
342             /**************************
343              * CALCULATE INTERACTIONS *
344              **************************/
345
346             if (gmx_mm256_any_lt(rsq00,rcutoff2))
347             {
348
349             r00              = _mm256_mul_ps(rsq00,rinv00);
350
351             /* EWALD ELECTROSTATICS */
352             
353             /* Analytical PME correction */
354             zeta2            = _mm256_mul_ps(beta2,rsq00);
355             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
356             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
357             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
358             felec            = _mm256_mul_ps(qq00,felec);
359             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
360             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
361             velec            = _mm256_sub_ps(rinv00,pmecorrV);
362             velec            = _mm256_mul_ps(qq00,velec);
363             
364             /* LENNARD-JONES DISPERSION/REPULSION */
365
366             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
367             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
368             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
369             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
370             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
371
372             d                = _mm256_sub_ps(r00,rswitch);
373             d                = _mm256_max_ps(d,_mm256_setzero_ps());
374             d2               = _mm256_mul_ps(d,d);
375             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
376
377             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
378
379             /* Evaluate switch function */
380             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
381             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
382             fvdw             = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
383             velec            = _mm256_mul_ps(velec,sw);
384             vvdw             = _mm256_mul_ps(vvdw,sw);
385             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
386
387             /* Update potential sum for this i atom from the interaction with this j atom. */
388             velec            = _mm256_and_ps(velec,cutoff_mask);
389             velecsum         = _mm256_add_ps(velecsum,velec);
390             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
391             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
392
393             fscal            = _mm256_add_ps(felec,fvdw);
394
395             fscal            = _mm256_and_ps(fscal,cutoff_mask);
396
397             /* Calculate temporary vectorial force */
398             tx               = _mm256_mul_ps(fscal,dx00);
399             ty               = _mm256_mul_ps(fscal,dy00);
400             tz               = _mm256_mul_ps(fscal,dz00);
401
402             /* Update vectorial force */
403             fix0             = _mm256_add_ps(fix0,tx);
404             fiy0             = _mm256_add_ps(fiy0,ty);
405             fiz0             = _mm256_add_ps(fiz0,tz);
406
407             fjx0             = _mm256_add_ps(fjx0,tx);
408             fjy0             = _mm256_add_ps(fjy0,ty);
409             fjz0             = _mm256_add_ps(fjz0,tz);
410
411             }
412
413             /**************************
414              * CALCULATE INTERACTIONS *
415              **************************/
416
417             if (gmx_mm256_any_lt(rsq01,rcutoff2))
418             {
419
420             r01              = _mm256_mul_ps(rsq01,rinv01);
421
422             /* EWALD ELECTROSTATICS */
423             
424             /* Analytical PME correction */
425             zeta2            = _mm256_mul_ps(beta2,rsq01);
426             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
427             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
428             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
429             felec            = _mm256_mul_ps(qq01,felec);
430             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
431             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
432             velec            = _mm256_sub_ps(rinv01,pmecorrV);
433             velec            = _mm256_mul_ps(qq01,velec);
434             
435             d                = _mm256_sub_ps(r01,rswitch);
436             d                = _mm256_max_ps(d,_mm256_setzero_ps());
437             d2               = _mm256_mul_ps(d,d);
438             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
439
440             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
441
442             /* Evaluate switch function */
443             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
444             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv01,_mm256_mul_ps(velec,dsw)) );
445             velec            = _mm256_mul_ps(velec,sw);
446             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
447
448             /* Update potential sum for this i atom from the interaction with this j atom. */
449             velec            = _mm256_and_ps(velec,cutoff_mask);
450             velecsum         = _mm256_add_ps(velecsum,velec);
451
452             fscal            = felec;
453
454             fscal            = _mm256_and_ps(fscal,cutoff_mask);
455
456             /* Calculate temporary vectorial force */
457             tx               = _mm256_mul_ps(fscal,dx01);
458             ty               = _mm256_mul_ps(fscal,dy01);
459             tz               = _mm256_mul_ps(fscal,dz01);
460
461             /* Update vectorial force */
462             fix0             = _mm256_add_ps(fix0,tx);
463             fiy0             = _mm256_add_ps(fiy0,ty);
464             fiz0             = _mm256_add_ps(fiz0,tz);
465
466             fjx1             = _mm256_add_ps(fjx1,tx);
467             fjy1             = _mm256_add_ps(fjy1,ty);
468             fjz1             = _mm256_add_ps(fjz1,tz);
469
470             }
471
472             /**************************
473              * CALCULATE INTERACTIONS *
474              **************************/
475
476             if (gmx_mm256_any_lt(rsq02,rcutoff2))
477             {
478
479             r02              = _mm256_mul_ps(rsq02,rinv02);
480
481             /* EWALD ELECTROSTATICS */
482             
483             /* Analytical PME correction */
484             zeta2            = _mm256_mul_ps(beta2,rsq02);
485             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
486             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
487             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
488             felec            = _mm256_mul_ps(qq02,felec);
489             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
490             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
491             velec            = _mm256_sub_ps(rinv02,pmecorrV);
492             velec            = _mm256_mul_ps(qq02,velec);
493             
494             d                = _mm256_sub_ps(r02,rswitch);
495             d                = _mm256_max_ps(d,_mm256_setzero_ps());
496             d2               = _mm256_mul_ps(d,d);
497             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
498
499             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
500
501             /* Evaluate switch function */
502             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
503             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv02,_mm256_mul_ps(velec,dsw)) );
504             velec            = _mm256_mul_ps(velec,sw);
505             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
506
507             /* Update potential sum for this i atom from the interaction with this j atom. */
508             velec            = _mm256_and_ps(velec,cutoff_mask);
509             velecsum         = _mm256_add_ps(velecsum,velec);
510
511             fscal            = felec;
512
513             fscal            = _mm256_and_ps(fscal,cutoff_mask);
514
515             /* Calculate temporary vectorial force */
516             tx               = _mm256_mul_ps(fscal,dx02);
517             ty               = _mm256_mul_ps(fscal,dy02);
518             tz               = _mm256_mul_ps(fscal,dz02);
519
520             /* Update vectorial force */
521             fix0             = _mm256_add_ps(fix0,tx);
522             fiy0             = _mm256_add_ps(fiy0,ty);
523             fiz0             = _mm256_add_ps(fiz0,tz);
524
525             fjx2             = _mm256_add_ps(fjx2,tx);
526             fjy2             = _mm256_add_ps(fjy2,ty);
527             fjz2             = _mm256_add_ps(fjz2,tz);
528
529             }
530
531             /**************************
532              * CALCULATE INTERACTIONS *
533              **************************/
534
535             if (gmx_mm256_any_lt(rsq10,rcutoff2))
536             {
537
538             r10              = _mm256_mul_ps(rsq10,rinv10);
539
540             /* EWALD ELECTROSTATICS */
541             
542             /* Analytical PME correction */
543             zeta2            = _mm256_mul_ps(beta2,rsq10);
544             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
545             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
546             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
547             felec            = _mm256_mul_ps(qq10,felec);
548             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
549             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
550             velec            = _mm256_sub_ps(rinv10,pmecorrV);
551             velec            = _mm256_mul_ps(qq10,velec);
552             
553             d                = _mm256_sub_ps(r10,rswitch);
554             d                = _mm256_max_ps(d,_mm256_setzero_ps());
555             d2               = _mm256_mul_ps(d,d);
556             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
557
558             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
559
560             /* Evaluate switch function */
561             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
562             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
563             velec            = _mm256_mul_ps(velec,sw);
564             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
565
566             /* Update potential sum for this i atom from the interaction with this j atom. */
567             velec            = _mm256_and_ps(velec,cutoff_mask);
568             velecsum         = _mm256_add_ps(velecsum,velec);
569
570             fscal            = felec;
571
572             fscal            = _mm256_and_ps(fscal,cutoff_mask);
573
574             /* Calculate temporary vectorial force */
575             tx               = _mm256_mul_ps(fscal,dx10);
576             ty               = _mm256_mul_ps(fscal,dy10);
577             tz               = _mm256_mul_ps(fscal,dz10);
578
579             /* Update vectorial force */
580             fix1             = _mm256_add_ps(fix1,tx);
581             fiy1             = _mm256_add_ps(fiy1,ty);
582             fiz1             = _mm256_add_ps(fiz1,tz);
583
584             fjx0             = _mm256_add_ps(fjx0,tx);
585             fjy0             = _mm256_add_ps(fjy0,ty);
586             fjz0             = _mm256_add_ps(fjz0,tz);
587
588             }
589
590             /**************************
591              * CALCULATE INTERACTIONS *
592              **************************/
593
594             if (gmx_mm256_any_lt(rsq11,rcutoff2))
595             {
596
597             r11              = _mm256_mul_ps(rsq11,rinv11);
598
599             /* EWALD ELECTROSTATICS */
600             
601             /* Analytical PME correction */
602             zeta2            = _mm256_mul_ps(beta2,rsq11);
603             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
604             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
605             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
606             felec            = _mm256_mul_ps(qq11,felec);
607             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
608             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
609             velec            = _mm256_sub_ps(rinv11,pmecorrV);
610             velec            = _mm256_mul_ps(qq11,velec);
611             
612             d                = _mm256_sub_ps(r11,rswitch);
613             d                = _mm256_max_ps(d,_mm256_setzero_ps());
614             d2               = _mm256_mul_ps(d,d);
615             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
616
617             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
618
619             /* Evaluate switch function */
620             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
621             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
622             velec            = _mm256_mul_ps(velec,sw);
623             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
624
625             /* Update potential sum for this i atom from the interaction with this j atom. */
626             velec            = _mm256_and_ps(velec,cutoff_mask);
627             velecsum         = _mm256_add_ps(velecsum,velec);
628
629             fscal            = felec;
630
631             fscal            = _mm256_and_ps(fscal,cutoff_mask);
632
633             /* Calculate temporary vectorial force */
634             tx               = _mm256_mul_ps(fscal,dx11);
635             ty               = _mm256_mul_ps(fscal,dy11);
636             tz               = _mm256_mul_ps(fscal,dz11);
637
638             /* Update vectorial force */
639             fix1             = _mm256_add_ps(fix1,tx);
640             fiy1             = _mm256_add_ps(fiy1,ty);
641             fiz1             = _mm256_add_ps(fiz1,tz);
642
643             fjx1             = _mm256_add_ps(fjx1,tx);
644             fjy1             = _mm256_add_ps(fjy1,ty);
645             fjz1             = _mm256_add_ps(fjz1,tz);
646
647             }
648
649             /**************************
650              * CALCULATE INTERACTIONS *
651              **************************/
652
653             if (gmx_mm256_any_lt(rsq12,rcutoff2))
654             {
655
656             r12              = _mm256_mul_ps(rsq12,rinv12);
657
658             /* EWALD ELECTROSTATICS */
659             
660             /* Analytical PME correction */
661             zeta2            = _mm256_mul_ps(beta2,rsq12);
662             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
663             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
664             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
665             felec            = _mm256_mul_ps(qq12,felec);
666             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
667             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
668             velec            = _mm256_sub_ps(rinv12,pmecorrV);
669             velec            = _mm256_mul_ps(qq12,velec);
670             
671             d                = _mm256_sub_ps(r12,rswitch);
672             d                = _mm256_max_ps(d,_mm256_setzero_ps());
673             d2               = _mm256_mul_ps(d,d);
674             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
675
676             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
677
678             /* Evaluate switch function */
679             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
680             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
681             velec            = _mm256_mul_ps(velec,sw);
682             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
683
684             /* Update potential sum for this i atom from the interaction with this j atom. */
685             velec            = _mm256_and_ps(velec,cutoff_mask);
686             velecsum         = _mm256_add_ps(velecsum,velec);
687
688             fscal            = felec;
689
690             fscal            = _mm256_and_ps(fscal,cutoff_mask);
691
692             /* Calculate temporary vectorial force */
693             tx               = _mm256_mul_ps(fscal,dx12);
694             ty               = _mm256_mul_ps(fscal,dy12);
695             tz               = _mm256_mul_ps(fscal,dz12);
696
697             /* Update vectorial force */
698             fix1             = _mm256_add_ps(fix1,tx);
699             fiy1             = _mm256_add_ps(fiy1,ty);
700             fiz1             = _mm256_add_ps(fiz1,tz);
701
702             fjx2             = _mm256_add_ps(fjx2,tx);
703             fjy2             = _mm256_add_ps(fjy2,ty);
704             fjz2             = _mm256_add_ps(fjz2,tz);
705
706             }
707
708             /**************************
709              * CALCULATE INTERACTIONS *
710              **************************/
711
712             if (gmx_mm256_any_lt(rsq20,rcutoff2))
713             {
714
715             r20              = _mm256_mul_ps(rsq20,rinv20);
716
717             /* EWALD ELECTROSTATICS */
718             
719             /* Analytical PME correction */
720             zeta2            = _mm256_mul_ps(beta2,rsq20);
721             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
722             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
723             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
724             felec            = _mm256_mul_ps(qq20,felec);
725             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
726             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
727             velec            = _mm256_sub_ps(rinv20,pmecorrV);
728             velec            = _mm256_mul_ps(qq20,velec);
729             
730             d                = _mm256_sub_ps(r20,rswitch);
731             d                = _mm256_max_ps(d,_mm256_setzero_ps());
732             d2               = _mm256_mul_ps(d,d);
733             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
734
735             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
736
737             /* Evaluate switch function */
738             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
739             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
740             velec            = _mm256_mul_ps(velec,sw);
741             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
742
743             /* Update potential sum for this i atom from the interaction with this j atom. */
744             velec            = _mm256_and_ps(velec,cutoff_mask);
745             velecsum         = _mm256_add_ps(velecsum,velec);
746
747             fscal            = felec;
748
749             fscal            = _mm256_and_ps(fscal,cutoff_mask);
750
751             /* Calculate temporary vectorial force */
752             tx               = _mm256_mul_ps(fscal,dx20);
753             ty               = _mm256_mul_ps(fscal,dy20);
754             tz               = _mm256_mul_ps(fscal,dz20);
755
756             /* Update vectorial force */
757             fix2             = _mm256_add_ps(fix2,tx);
758             fiy2             = _mm256_add_ps(fiy2,ty);
759             fiz2             = _mm256_add_ps(fiz2,tz);
760
761             fjx0             = _mm256_add_ps(fjx0,tx);
762             fjy0             = _mm256_add_ps(fjy0,ty);
763             fjz0             = _mm256_add_ps(fjz0,tz);
764
765             }
766
767             /**************************
768              * CALCULATE INTERACTIONS *
769              **************************/
770
771             if (gmx_mm256_any_lt(rsq21,rcutoff2))
772             {
773
774             r21              = _mm256_mul_ps(rsq21,rinv21);
775
776             /* EWALD ELECTROSTATICS */
777             
778             /* Analytical PME correction */
779             zeta2            = _mm256_mul_ps(beta2,rsq21);
780             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
781             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
782             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
783             felec            = _mm256_mul_ps(qq21,felec);
784             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
785             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
786             velec            = _mm256_sub_ps(rinv21,pmecorrV);
787             velec            = _mm256_mul_ps(qq21,velec);
788             
789             d                = _mm256_sub_ps(r21,rswitch);
790             d                = _mm256_max_ps(d,_mm256_setzero_ps());
791             d2               = _mm256_mul_ps(d,d);
792             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
793
794             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
795
796             /* Evaluate switch function */
797             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
798             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
799             velec            = _mm256_mul_ps(velec,sw);
800             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
801
802             /* Update potential sum for this i atom from the interaction with this j atom. */
803             velec            = _mm256_and_ps(velec,cutoff_mask);
804             velecsum         = _mm256_add_ps(velecsum,velec);
805
806             fscal            = felec;
807
808             fscal            = _mm256_and_ps(fscal,cutoff_mask);
809
810             /* Calculate temporary vectorial force */
811             tx               = _mm256_mul_ps(fscal,dx21);
812             ty               = _mm256_mul_ps(fscal,dy21);
813             tz               = _mm256_mul_ps(fscal,dz21);
814
815             /* Update vectorial force */
816             fix2             = _mm256_add_ps(fix2,tx);
817             fiy2             = _mm256_add_ps(fiy2,ty);
818             fiz2             = _mm256_add_ps(fiz2,tz);
819
820             fjx1             = _mm256_add_ps(fjx1,tx);
821             fjy1             = _mm256_add_ps(fjy1,ty);
822             fjz1             = _mm256_add_ps(fjz1,tz);
823
824             }
825
826             /**************************
827              * CALCULATE INTERACTIONS *
828              **************************/
829
830             if (gmx_mm256_any_lt(rsq22,rcutoff2))
831             {
832
833             r22              = _mm256_mul_ps(rsq22,rinv22);
834
835             /* EWALD ELECTROSTATICS */
836             
837             /* Analytical PME correction */
838             zeta2            = _mm256_mul_ps(beta2,rsq22);
839             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
840             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
841             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
842             felec            = _mm256_mul_ps(qq22,felec);
843             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
844             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
845             velec            = _mm256_sub_ps(rinv22,pmecorrV);
846             velec            = _mm256_mul_ps(qq22,velec);
847             
848             d                = _mm256_sub_ps(r22,rswitch);
849             d                = _mm256_max_ps(d,_mm256_setzero_ps());
850             d2               = _mm256_mul_ps(d,d);
851             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
852
853             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
854
855             /* Evaluate switch function */
856             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
857             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
858             velec            = _mm256_mul_ps(velec,sw);
859             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
860
861             /* Update potential sum for this i atom from the interaction with this j atom. */
862             velec            = _mm256_and_ps(velec,cutoff_mask);
863             velecsum         = _mm256_add_ps(velecsum,velec);
864
865             fscal            = felec;
866
867             fscal            = _mm256_and_ps(fscal,cutoff_mask);
868
869             /* Calculate temporary vectorial force */
870             tx               = _mm256_mul_ps(fscal,dx22);
871             ty               = _mm256_mul_ps(fscal,dy22);
872             tz               = _mm256_mul_ps(fscal,dz22);
873
874             /* Update vectorial force */
875             fix2             = _mm256_add_ps(fix2,tx);
876             fiy2             = _mm256_add_ps(fiy2,ty);
877             fiz2             = _mm256_add_ps(fiz2,tz);
878
879             fjx2             = _mm256_add_ps(fjx2,tx);
880             fjy2             = _mm256_add_ps(fjy2,ty);
881             fjz2             = _mm256_add_ps(fjz2,tz);
882
883             }
884
885             fjptrA             = f+j_coord_offsetA;
886             fjptrB             = f+j_coord_offsetB;
887             fjptrC             = f+j_coord_offsetC;
888             fjptrD             = f+j_coord_offsetD;
889             fjptrE             = f+j_coord_offsetE;
890             fjptrF             = f+j_coord_offsetF;
891             fjptrG             = f+j_coord_offsetG;
892             fjptrH             = f+j_coord_offsetH;
893
894             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
895                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
896
897             /* Inner loop uses 990 flops */
898         }
899
900         if(jidx<j_index_end)
901         {
902
903             /* Get j neighbor index, and coordinate index */
904             jnrlistA         = jjnr[jidx];
905             jnrlistB         = jjnr[jidx+1];
906             jnrlistC         = jjnr[jidx+2];
907             jnrlistD         = jjnr[jidx+3];
908             jnrlistE         = jjnr[jidx+4];
909             jnrlistF         = jjnr[jidx+5];
910             jnrlistG         = jjnr[jidx+6];
911             jnrlistH         = jjnr[jidx+7];
912             /* Sign of each element will be negative for non-real atoms.
913              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
914              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
915              */
916             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
917                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
918                                             
919             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
920             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
921             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
922             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
923             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
924             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
925             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
926             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
927             j_coord_offsetA  = DIM*jnrA;
928             j_coord_offsetB  = DIM*jnrB;
929             j_coord_offsetC  = DIM*jnrC;
930             j_coord_offsetD  = DIM*jnrD;
931             j_coord_offsetE  = DIM*jnrE;
932             j_coord_offsetF  = DIM*jnrF;
933             j_coord_offsetG  = DIM*jnrG;
934             j_coord_offsetH  = DIM*jnrH;
935
936             /* load j atom coordinates */
937             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
938                                                  x+j_coord_offsetC,x+j_coord_offsetD,
939                                                  x+j_coord_offsetE,x+j_coord_offsetF,
940                                                  x+j_coord_offsetG,x+j_coord_offsetH,
941                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
942
943             /* Calculate displacement vector */
944             dx00             = _mm256_sub_ps(ix0,jx0);
945             dy00             = _mm256_sub_ps(iy0,jy0);
946             dz00             = _mm256_sub_ps(iz0,jz0);
947             dx01             = _mm256_sub_ps(ix0,jx1);
948             dy01             = _mm256_sub_ps(iy0,jy1);
949             dz01             = _mm256_sub_ps(iz0,jz1);
950             dx02             = _mm256_sub_ps(ix0,jx2);
951             dy02             = _mm256_sub_ps(iy0,jy2);
952             dz02             = _mm256_sub_ps(iz0,jz2);
953             dx10             = _mm256_sub_ps(ix1,jx0);
954             dy10             = _mm256_sub_ps(iy1,jy0);
955             dz10             = _mm256_sub_ps(iz1,jz0);
956             dx11             = _mm256_sub_ps(ix1,jx1);
957             dy11             = _mm256_sub_ps(iy1,jy1);
958             dz11             = _mm256_sub_ps(iz1,jz1);
959             dx12             = _mm256_sub_ps(ix1,jx2);
960             dy12             = _mm256_sub_ps(iy1,jy2);
961             dz12             = _mm256_sub_ps(iz1,jz2);
962             dx20             = _mm256_sub_ps(ix2,jx0);
963             dy20             = _mm256_sub_ps(iy2,jy0);
964             dz20             = _mm256_sub_ps(iz2,jz0);
965             dx21             = _mm256_sub_ps(ix2,jx1);
966             dy21             = _mm256_sub_ps(iy2,jy1);
967             dz21             = _mm256_sub_ps(iz2,jz1);
968             dx22             = _mm256_sub_ps(ix2,jx2);
969             dy22             = _mm256_sub_ps(iy2,jy2);
970             dz22             = _mm256_sub_ps(iz2,jz2);
971
972             /* Calculate squared distance and things based on it */
973             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
974             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
975             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
976             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
977             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
978             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
979             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
980             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
981             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
982
983             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
984             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
985             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
986             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
987             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
988             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
989             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
990             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
991             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
992
993             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
994             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
995             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
996             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
997             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
998             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
999             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1000             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1001             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1002
1003             fjx0             = _mm256_setzero_ps();
1004             fjy0             = _mm256_setzero_ps();
1005             fjz0             = _mm256_setzero_ps();
1006             fjx1             = _mm256_setzero_ps();
1007             fjy1             = _mm256_setzero_ps();
1008             fjz1             = _mm256_setzero_ps();
1009             fjx2             = _mm256_setzero_ps();
1010             fjy2             = _mm256_setzero_ps();
1011             fjz2             = _mm256_setzero_ps();
1012
1013             /**************************
1014              * CALCULATE INTERACTIONS *
1015              **************************/
1016
1017             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1018             {
1019
1020             r00              = _mm256_mul_ps(rsq00,rinv00);
1021             r00              = _mm256_andnot_ps(dummy_mask,r00);
1022
1023             /* EWALD ELECTROSTATICS */
1024             
1025             /* Analytical PME correction */
1026             zeta2            = _mm256_mul_ps(beta2,rsq00);
1027             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1028             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1029             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1030             felec            = _mm256_mul_ps(qq00,felec);
1031             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1032             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1033             velec            = _mm256_sub_ps(rinv00,pmecorrV);
1034             velec            = _mm256_mul_ps(qq00,velec);
1035             
1036             /* LENNARD-JONES DISPERSION/REPULSION */
1037
1038             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1039             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
1040             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
1041             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
1042             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
1043
1044             d                = _mm256_sub_ps(r00,rswitch);
1045             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1046             d2               = _mm256_mul_ps(d,d);
1047             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1048
1049             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1050
1051             /* Evaluate switch function */
1052             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1053             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
1054             fvdw             = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
1055             velec            = _mm256_mul_ps(velec,sw);
1056             vvdw             = _mm256_mul_ps(vvdw,sw);
1057             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1058
1059             /* Update potential sum for this i atom from the interaction with this j atom. */
1060             velec            = _mm256_and_ps(velec,cutoff_mask);
1061             velec            = _mm256_andnot_ps(dummy_mask,velec);
1062             velecsum         = _mm256_add_ps(velecsum,velec);
1063             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
1064             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
1065             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
1066
1067             fscal            = _mm256_add_ps(felec,fvdw);
1068
1069             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1070
1071             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1072
1073             /* Calculate temporary vectorial force */
1074             tx               = _mm256_mul_ps(fscal,dx00);
1075             ty               = _mm256_mul_ps(fscal,dy00);
1076             tz               = _mm256_mul_ps(fscal,dz00);
1077
1078             /* Update vectorial force */
1079             fix0             = _mm256_add_ps(fix0,tx);
1080             fiy0             = _mm256_add_ps(fiy0,ty);
1081             fiz0             = _mm256_add_ps(fiz0,tz);
1082
1083             fjx0             = _mm256_add_ps(fjx0,tx);
1084             fjy0             = _mm256_add_ps(fjy0,ty);
1085             fjz0             = _mm256_add_ps(fjz0,tz);
1086
1087             }
1088
1089             /**************************
1090              * CALCULATE INTERACTIONS *
1091              **************************/
1092
1093             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1094             {
1095
1096             r01              = _mm256_mul_ps(rsq01,rinv01);
1097             r01              = _mm256_andnot_ps(dummy_mask,r01);
1098
1099             /* EWALD ELECTROSTATICS */
1100             
1101             /* Analytical PME correction */
1102             zeta2            = _mm256_mul_ps(beta2,rsq01);
1103             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
1104             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1105             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1106             felec            = _mm256_mul_ps(qq01,felec);
1107             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1108             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1109             velec            = _mm256_sub_ps(rinv01,pmecorrV);
1110             velec            = _mm256_mul_ps(qq01,velec);
1111             
1112             d                = _mm256_sub_ps(r01,rswitch);
1113             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1114             d2               = _mm256_mul_ps(d,d);
1115             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1116
1117             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1118
1119             /* Evaluate switch function */
1120             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1121             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv01,_mm256_mul_ps(velec,dsw)) );
1122             velec            = _mm256_mul_ps(velec,sw);
1123             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
1124
1125             /* Update potential sum for this i atom from the interaction with this j atom. */
1126             velec            = _mm256_and_ps(velec,cutoff_mask);
1127             velec            = _mm256_andnot_ps(dummy_mask,velec);
1128             velecsum         = _mm256_add_ps(velecsum,velec);
1129
1130             fscal            = felec;
1131
1132             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1133
1134             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1135
1136             /* Calculate temporary vectorial force */
1137             tx               = _mm256_mul_ps(fscal,dx01);
1138             ty               = _mm256_mul_ps(fscal,dy01);
1139             tz               = _mm256_mul_ps(fscal,dz01);
1140
1141             /* Update vectorial force */
1142             fix0             = _mm256_add_ps(fix0,tx);
1143             fiy0             = _mm256_add_ps(fiy0,ty);
1144             fiz0             = _mm256_add_ps(fiz0,tz);
1145
1146             fjx1             = _mm256_add_ps(fjx1,tx);
1147             fjy1             = _mm256_add_ps(fjy1,ty);
1148             fjz1             = _mm256_add_ps(fjz1,tz);
1149
1150             }
1151
1152             /**************************
1153              * CALCULATE INTERACTIONS *
1154              **************************/
1155
1156             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1157             {
1158
1159             r02              = _mm256_mul_ps(rsq02,rinv02);
1160             r02              = _mm256_andnot_ps(dummy_mask,r02);
1161
1162             /* EWALD ELECTROSTATICS */
1163             
1164             /* Analytical PME correction */
1165             zeta2            = _mm256_mul_ps(beta2,rsq02);
1166             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
1167             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1168             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1169             felec            = _mm256_mul_ps(qq02,felec);
1170             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1171             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1172             velec            = _mm256_sub_ps(rinv02,pmecorrV);
1173             velec            = _mm256_mul_ps(qq02,velec);
1174             
1175             d                = _mm256_sub_ps(r02,rswitch);
1176             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1177             d2               = _mm256_mul_ps(d,d);
1178             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1179
1180             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1181
1182             /* Evaluate switch function */
1183             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1184             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv02,_mm256_mul_ps(velec,dsw)) );
1185             velec            = _mm256_mul_ps(velec,sw);
1186             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
1187
1188             /* Update potential sum for this i atom from the interaction with this j atom. */
1189             velec            = _mm256_and_ps(velec,cutoff_mask);
1190             velec            = _mm256_andnot_ps(dummy_mask,velec);
1191             velecsum         = _mm256_add_ps(velecsum,velec);
1192
1193             fscal            = felec;
1194
1195             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1196
1197             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1198
1199             /* Calculate temporary vectorial force */
1200             tx               = _mm256_mul_ps(fscal,dx02);
1201             ty               = _mm256_mul_ps(fscal,dy02);
1202             tz               = _mm256_mul_ps(fscal,dz02);
1203
1204             /* Update vectorial force */
1205             fix0             = _mm256_add_ps(fix0,tx);
1206             fiy0             = _mm256_add_ps(fiy0,ty);
1207             fiz0             = _mm256_add_ps(fiz0,tz);
1208
1209             fjx2             = _mm256_add_ps(fjx2,tx);
1210             fjy2             = _mm256_add_ps(fjy2,ty);
1211             fjz2             = _mm256_add_ps(fjz2,tz);
1212
1213             }
1214
1215             /**************************
1216              * CALCULATE INTERACTIONS *
1217              **************************/
1218
1219             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1220             {
1221
1222             r10              = _mm256_mul_ps(rsq10,rinv10);
1223             r10              = _mm256_andnot_ps(dummy_mask,r10);
1224
1225             /* EWALD ELECTROSTATICS */
1226             
1227             /* Analytical PME correction */
1228             zeta2            = _mm256_mul_ps(beta2,rsq10);
1229             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1230             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1231             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1232             felec            = _mm256_mul_ps(qq10,felec);
1233             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1234             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1235             velec            = _mm256_sub_ps(rinv10,pmecorrV);
1236             velec            = _mm256_mul_ps(qq10,velec);
1237             
1238             d                = _mm256_sub_ps(r10,rswitch);
1239             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1240             d2               = _mm256_mul_ps(d,d);
1241             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1242
1243             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1244
1245             /* Evaluate switch function */
1246             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1247             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
1248             velec            = _mm256_mul_ps(velec,sw);
1249             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1250
1251             /* Update potential sum for this i atom from the interaction with this j atom. */
1252             velec            = _mm256_and_ps(velec,cutoff_mask);
1253             velec            = _mm256_andnot_ps(dummy_mask,velec);
1254             velecsum         = _mm256_add_ps(velecsum,velec);
1255
1256             fscal            = felec;
1257
1258             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1259
1260             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1261
1262             /* Calculate temporary vectorial force */
1263             tx               = _mm256_mul_ps(fscal,dx10);
1264             ty               = _mm256_mul_ps(fscal,dy10);
1265             tz               = _mm256_mul_ps(fscal,dz10);
1266
1267             /* Update vectorial force */
1268             fix1             = _mm256_add_ps(fix1,tx);
1269             fiy1             = _mm256_add_ps(fiy1,ty);
1270             fiz1             = _mm256_add_ps(fiz1,tz);
1271
1272             fjx0             = _mm256_add_ps(fjx0,tx);
1273             fjy0             = _mm256_add_ps(fjy0,ty);
1274             fjz0             = _mm256_add_ps(fjz0,tz);
1275
1276             }
1277
1278             /**************************
1279              * CALCULATE INTERACTIONS *
1280              **************************/
1281
1282             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1283             {
1284
1285             r11              = _mm256_mul_ps(rsq11,rinv11);
1286             r11              = _mm256_andnot_ps(dummy_mask,r11);
1287
1288             /* EWALD ELECTROSTATICS */
1289             
1290             /* Analytical PME correction */
1291             zeta2            = _mm256_mul_ps(beta2,rsq11);
1292             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1293             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1294             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1295             felec            = _mm256_mul_ps(qq11,felec);
1296             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1297             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1298             velec            = _mm256_sub_ps(rinv11,pmecorrV);
1299             velec            = _mm256_mul_ps(qq11,velec);
1300             
1301             d                = _mm256_sub_ps(r11,rswitch);
1302             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1303             d2               = _mm256_mul_ps(d,d);
1304             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1305
1306             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1307
1308             /* Evaluate switch function */
1309             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1310             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
1311             velec            = _mm256_mul_ps(velec,sw);
1312             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1313
1314             /* Update potential sum for this i atom from the interaction with this j atom. */
1315             velec            = _mm256_and_ps(velec,cutoff_mask);
1316             velec            = _mm256_andnot_ps(dummy_mask,velec);
1317             velecsum         = _mm256_add_ps(velecsum,velec);
1318
1319             fscal            = felec;
1320
1321             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1322
1323             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1324
1325             /* Calculate temporary vectorial force */
1326             tx               = _mm256_mul_ps(fscal,dx11);
1327             ty               = _mm256_mul_ps(fscal,dy11);
1328             tz               = _mm256_mul_ps(fscal,dz11);
1329
1330             /* Update vectorial force */
1331             fix1             = _mm256_add_ps(fix1,tx);
1332             fiy1             = _mm256_add_ps(fiy1,ty);
1333             fiz1             = _mm256_add_ps(fiz1,tz);
1334
1335             fjx1             = _mm256_add_ps(fjx1,tx);
1336             fjy1             = _mm256_add_ps(fjy1,ty);
1337             fjz1             = _mm256_add_ps(fjz1,tz);
1338
1339             }
1340
1341             /**************************
1342              * CALCULATE INTERACTIONS *
1343              **************************/
1344
1345             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1346             {
1347
1348             r12              = _mm256_mul_ps(rsq12,rinv12);
1349             r12              = _mm256_andnot_ps(dummy_mask,r12);
1350
1351             /* EWALD ELECTROSTATICS */
1352             
1353             /* Analytical PME correction */
1354             zeta2            = _mm256_mul_ps(beta2,rsq12);
1355             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1356             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1357             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1358             felec            = _mm256_mul_ps(qq12,felec);
1359             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1360             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1361             velec            = _mm256_sub_ps(rinv12,pmecorrV);
1362             velec            = _mm256_mul_ps(qq12,velec);
1363             
1364             d                = _mm256_sub_ps(r12,rswitch);
1365             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1366             d2               = _mm256_mul_ps(d,d);
1367             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1368
1369             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1370
1371             /* Evaluate switch function */
1372             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1373             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
1374             velec            = _mm256_mul_ps(velec,sw);
1375             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1376
1377             /* Update potential sum for this i atom from the interaction with this j atom. */
1378             velec            = _mm256_and_ps(velec,cutoff_mask);
1379             velec            = _mm256_andnot_ps(dummy_mask,velec);
1380             velecsum         = _mm256_add_ps(velecsum,velec);
1381
1382             fscal            = felec;
1383
1384             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1385
1386             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1387
1388             /* Calculate temporary vectorial force */
1389             tx               = _mm256_mul_ps(fscal,dx12);
1390             ty               = _mm256_mul_ps(fscal,dy12);
1391             tz               = _mm256_mul_ps(fscal,dz12);
1392
1393             /* Update vectorial force */
1394             fix1             = _mm256_add_ps(fix1,tx);
1395             fiy1             = _mm256_add_ps(fiy1,ty);
1396             fiz1             = _mm256_add_ps(fiz1,tz);
1397
1398             fjx2             = _mm256_add_ps(fjx2,tx);
1399             fjy2             = _mm256_add_ps(fjy2,ty);
1400             fjz2             = _mm256_add_ps(fjz2,tz);
1401
1402             }
1403
1404             /**************************
1405              * CALCULATE INTERACTIONS *
1406              **************************/
1407
1408             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1409             {
1410
1411             r20              = _mm256_mul_ps(rsq20,rinv20);
1412             r20              = _mm256_andnot_ps(dummy_mask,r20);
1413
1414             /* EWALD ELECTROSTATICS */
1415             
1416             /* Analytical PME correction */
1417             zeta2            = _mm256_mul_ps(beta2,rsq20);
1418             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1419             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1420             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1421             felec            = _mm256_mul_ps(qq20,felec);
1422             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1423             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1424             velec            = _mm256_sub_ps(rinv20,pmecorrV);
1425             velec            = _mm256_mul_ps(qq20,velec);
1426             
1427             d                = _mm256_sub_ps(r20,rswitch);
1428             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1429             d2               = _mm256_mul_ps(d,d);
1430             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1431
1432             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1433
1434             /* Evaluate switch function */
1435             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1436             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
1437             velec            = _mm256_mul_ps(velec,sw);
1438             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1439
1440             /* Update potential sum for this i atom from the interaction with this j atom. */
1441             velec            = _mm256_and_ps(velec,cutoff_mask);
1442             velec            = _mm256_andnot_ps(dummy_mask,velec);
1443             velecsum         = _mm256_add_ps(velecsum,velec);
1444
1445             fscal            = felec;
1446
1447             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1448
1449             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1450
1451             /* Calculate temporary vectorial force */
1452             tx               = _mm256_mul_ps(fscal,dx20);
1453             ty               = _mm256_mul_ps(fscal,dy20);
1454             tz               = _mm256_mul_ps(fscal,dz20);
1455
1456             /* Update vectorial force */
1457             fix2             = _mm256_add_ps(fix2,tx);
1458             fiy2             = _mm256_add_ps(fiy2,ty);
1459             fiz2             = _mm256_add_ps(fiz2,tz);
1460
1461             fjx0             = _mm256_add_ps(fjx0,tx);
1462             fjy0             = _mm256_add_ps(fjy0,ty);
1463             fjz0             = _mm256_add_ps(fjz0,tz);
1464
1465             }
1466
1467             /**************************
1468              * CALCULATE INTERACTIONS *
1469              **************************/
1470
1471             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1472             {
1473
1474             r21              = _mm256_mul_ps(rsq21,rinv21);
1475             r21              = _mm256_andnot_ps(dummy_mask,r21);
1476
1477             /* EWALD ELECTROSTATICS */
1478             
1479             /* Analytical PME correction */
1480             zeta2            = _mm256_mul_ps(beta2,rsq21);
1481             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1482             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1483             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1484             felec            = _mm256_mul_ps(qq21,felec);
1485             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1486             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1487             velec            = _mm256_sub_ps(rinv21,pmecorrV);
1488             velec            = _mm256_mul_ps(qq21,velec);
1489             
1490             d                = _mm256_sub_ps(r21,rswitch);
1491             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1492             d2               = _mm256_mul_ps(d,d);
1493             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1494
1495             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1496
1497             /* Evaluate switch function */
1498             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1499             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
1500             velec            = _mm256_mul_ps(velec,sw);
1501             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1502
1503             /* Update potential sum for this i atom from the interaction with this j atom. */
1504             velec            = _mm256_and_ps(velec,cutoff_mask);
1505             velec            = _mm256_andnot_ps(dummy_mask,velec);
1506             velecsum         = _mm256_add_ps(velecsum,velec);
1507
1508             fscal            = felec;
1509
1510             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1511
1512             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1513
1514             /* Calculate temporary vectorial force */
1515             tx               = _mm256_mul_ps(fscal,dx21);
1516             ty               = _mm256_mul_ps(fscal,dy21);
1517             tz               = _mm256_mul_ps(fscal,dz21);
1518
1519             /* Update vectorial force */
1520             fix2             = _mm256_add_ps(fix2,tx);
1521             fiy2             = _mm256_add_ps(fiy2,ty);
1522             fiz2             = _mm256_add_ps(fiz2,tz);
1523
1524             fjx1             = _mm256_add_ps(fjx1,tx);
1525             fjy1             = _mm256_add_ps(fjy1,ty);
1526             fjz1             = _mm256_add_ps(fjz1,tz);
1527
1528             }
1529
1530             /**************************
1531              * CALCULATE INTERACTIONS *
1532              **************************/
1533
1534             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1535             {
1536
1537             r22              = _mm256_mul_ps(rsq22,rinv22);
1538             r22              = _mm256_andnot_ps(dummy_mask,r22);
1539
1540             /* EWALD ELECTROSTATICS */
1541             
1542             /* Analytical PME correction */
1543             zeta2            = _mm256_mul_ps(beta2,rsq22);
1544             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1545             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1546             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1547             felec            = _mm256_mul_ps(qq22,felec);
1548             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1549             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1550             velec            = _mm256_sub_ps(rinv22,pmecorrV);
1551             velec            = _mm256_mul_ps(qq22,velec);
1552             
1553             d                = _mm256_sub_ps(r22,rswitch);
1554             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1555             d2               = _mm256_mul_ps(d,d);
1556             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1557
1558             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1559
1560             /* Evaluate switch function */
1561             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1562             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
1563             velec            = _mm256_mul_ps(velec,sw);
1564             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1565
1566             /* Update potential sum for this i atom from the interaction with this j atom. */
1567             velec            = _mm256_and_ps(velec,cutoff_mask);
1568             velec            = _mm256_andnot_ps(dummy_mask,velec);
1569             velecsum         = _mm256_add_ps(velecsum,velec);
1570
1571             fscal            = felec;
1572
1573             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1574
1575             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1576
1577             /* Calculate temporary vectorial force */
1578             tx               = _mm256_mul_ps(fscal,dx22);
1579             ty               = _mm256_mul_ps(fscal,dy22);
1580             tz               = _mm256_mul_ps(fscal,dz22);
1581
1582             /* Update vectorial force */
1583             fix2             = _mm256_add_ps(fix2,tx);
1584             fiy2             = _mm256_add_ps(fiy2,ty);
1585             fiz2             = _mm256_add_ps(fiz2,tz);
1586
1587             fjx2             = _mm256_add_ps(fjx2,tx);
1588             fjy2             = _mm256_add_ps(fjy2,ty);
1589             fjz2             = _mm256_add_ps(fjz2,tz);
1590
1591             }
1592
1593             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1594             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1595             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1596             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1597             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1598             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1599             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1600             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1601
1602             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1603                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1604
1605             /* Inner loop uses 999 flops */
1606         }
1607
1608         /* End of innermost loop */
1609
1610         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1611                                                  f+i_coord_offset,fshift+i_shift_offset);
1612
1613         ggid                        = gid[iidx];
1614         /* Update potential energies */
1615         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1616         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1617
1618         /* Increment number of inner iterations */
1619         inneriter                  += j_index_end - j_index_start;
1620
1621         /* Outer loop uses 20 flops */
1622     }
1623
1624     /* Increment number of outer iterations */
1625     outeriter        += nri;
1626
1627     /* Update outer/inner flops */
1628
1629     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*999);
1630 }
1631 /*
1632  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_F_avx_256_single
1633  * Electrostatics interaction: Ewald
1634  * VdW interaction:            LennardJones
1635  * Geometry:                   Water3-Water3
1636  * Calculate force/pot:        Force
1637  */
1638 void
1639 nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_F_avx_256_single
1640                     (t_nblist                    * gmx_restrict       nlist,
1641                      rvec                        * gmx_restrict          xx,
1642                      rvec                        * gmx_restrict          ff,
1643                      t_forcerec                  * gmx_restrict          fr,
1644                      t_mdatoms                   * gmx_restrict     mdatoms,
1645                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1646                      t_nrnb                      * gmx_restrict        nrnb)
1647 {
1648     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1649      * just 0 for non-waters.
1650      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1651      * jnr indices corresponding to data put in the four positions in the SIMD register.
1652      */
1653     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1654     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1655     int              jnrA,jnrB,jnrC,jnrD;
1656     int              jnrE,jnrF,jnrG,jnrH;
1657     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1658     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1659     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1660     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1661     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1662     real             rcutoff_scalar;
1663     real             *shiftvec,*fshift,*x,*f;
1664     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1665     real             scratch[4*DIM];
1666     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1667     real *           vdwioffsetptr0;
1668     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1669     real *           vdwioffsetptr1;
1670     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1671     real *           vdwioffsetptr2;
1672     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1673     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1674     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1675     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1676     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1677     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1678     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1679     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1680     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1681     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1682     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1683     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1684     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1685     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1686     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1687     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1688     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
1689     real             *charge;
1690     int              nvdwtype;
1691     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1692     int              *vdwtype;
1693     real             *vdwparam;
1694     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
1695     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
1696     __m256i          ewitab;
1697     __m128i          ewitab_lo,ewitab_hi;
1698     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1699     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1700     real             *ewtab;
1701     __m256           rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
1702     real             rswitch_scalar,d_scalar;
1703     __m256           dummy_mask,cutoff_mask;
1704     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1705     __m256           one     = _mm256_set1_ps(1.0);
1706     __m256           two     = _mm256_set1_ps(2.0);
1707     x                = xx[0];
1708     f                = ff[0];
1709
1710     nri              = nlist->nri;
1711     iinr             = nlist->iinr;
1712     jindex           = nlist->jindex;
1713     jjnr             = nlist->jjnr;
1714     shiftidx         = nlist->shift;
1715     gid              = nlist->gid;
1716     shiftvec         = fr->shift_vec[0];
1717     fshift           = fr->fshift[0];
1718     facel            = _mm256_set1_ps(fr->epsfac);
1719     charge           = mdatoms->chargeA;
1720     nvdwtype         = fr->ntype;
1721     vdwparam         = fr->nbfp;
1722     vdwtype          = mdatoms->typeA;
1723
1724     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
1725     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1726     beta2            = _mm256_mul_ps(beta,beta);
1727     beta3            = _mm256_mul_ps(beta,beta2);
1728
1729     ewtab            = fr->ic->tabq_coul_FDV0;
1730     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
1731     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1732
1733     /* Setup water-specific parameters */
1734     inr              = nlist->iinr[0];
1735     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
1736     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1737     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1738     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1739
1740     jq0              = _mm256_set1_ps(charge[inr+0]);
1741     jq1              = _mm256_set1_ps(charge[inr+1]);
1742     jq2              = _mm256_set1_ps(charge[inr+2]);
1743     vdwjidx0A        = 2*vdwtype[inr+0];
1744     qq00             = _mm256_mul_ps(iq0,jq0);
1745     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1746     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1747     qq01             = _mm256_mul_ps(iq0,jq1);
1748     qq02             = _mm256_mul_ps(iq0,jq2);
1749     qq10             = _mm256_mul_ps(iq1,jq0);
1750     qq11             = _mm256_mul_ps(iq1,jq1);
1751     qq12             = _mm256_mul_ps(iq1,jq2);
1752     qq20             = _mm256_mul_ps(iq2,jq0);
1753     qq21             = _mm256_mul_ps(iq2,jq1);
1754     qq22             = _mm256_mul_ps(iq2,jq2);
1755
1756     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1757     rcutoff_scalar   = fr->rcoulomb;
1758     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
1759     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
1760
1761     rswitch_scalar   = fr->rcoulomb_switch;
1762     rswitch          = _mm256_set1_ps(rswitch_scalar);
1763     /* Setup switch parameters */
1764     d_scalar         = rcutoff_scalar-rswitch_scalar;
1765     d                = _mm256_set1_ps(d_scalar);
1766     swV3             = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
1767     swV4             = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1768     swV5             = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1769     swF2             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
1770     swF3             = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1771     swF4             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1772
1773     /* Avoid stupid compiler warnings */
1774     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1775     j_coord_offsetA = 0;
1776     j_coord_offsetB = 0;
1777     j_coord_offsetC = 0;
1778     j_coord_offsetD = 0;
1779     j_coord_offsetE = 0;
1780     j_coord_offsetF = 0;
1781     j_coord_offsetG = 0;
1782     j_coord_offsetH = 0;
1783
1784     outeriter        = 0;
1785     inneriter        = 0;
1786
1787     for(iidx=0;iidx<4*DIM;iidx++)
1788     {
1789         scratch[iidx] = 0.0;
1790     }
1791
1792     /* Start outer loop over neighborlists */
1793     for(iidx=0; iidx<nri; iidx++)
1794     {
1795         /* Load shift vector for this list */
1796         i_shift_offset   = DIM*shiftidx[iidx];
1797
1798         /* Load limits for loop over neighbors */
1799         j_index_start    = jindex[iidx];
1800         j_index_end      = jindex[iidx+1];
1801
1802         /* Get outer coordinate index */
1803         inr              = iinr[iidx];
1804         i_coord_offset   = DIM*inr;
1805
1806         /* Load i particle coords and add shift vector */
1807         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1808                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1809
1810         fix0             = _mm256_setzero_ps();
1811         fiy0             = _mm256_setzero_ps();
1812         fiz0             = _mm256_setzero_ps();
1813         fix1             = _mm256_setzero_ps();
1814         fiy1             = _mm256_setzero_ps();
1815         fiz1             = _mm256_setzero_ps();
1816         fix2             = _mm256_setzero_ps();
1817         fiy2             = _mm256_setzero_ps();
1818         fiz2             = _mm256_setzero_ps();
1819
1820         /* Start inner kernel loop */
1821         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1822         {
1823
1824             /* Get j neighbor index, and coordinate index */
1825             jnrA             = jjnr[jidx];
1826             jnrB             = jjnr[jidx+1];
1827             jnrC             = jjnr[jidx+2];
1828             jnrD             = jjnr[jidx+3];
1829             jnrE             = jjnr[jidx+4];
1830             jnrF             = jjnr[jidx+5];
1831             jnrG             = jjnr[jidx+6];
1832             jnrH             = jjnr[jidx+7];
1833             j_coord_offsetA  = DIM*jnrA;
1834             j_coord_offsetB  = DIM*jnrB;
1835             j_coord_offsetC  = DIM*jnrC;
1836             j_coord_offsetD  = DIM*jnrD;
1837             j_coord_offsetE  = DIM*jnrE;
1838             j_coord_offsetF  = DIM*jnrF;
1839             j_coord_offsetG  = DIM*jnrG;
1840             j_coord_offsetH  = DIM*jnrH;
1841
1842             /* load j atom coordinates */
1843             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1844                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1845                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1846                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1847                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1848
1849             /* Calculate displacement vector */
1850             dx00             = _mm256_sub_ps(ix0,jx0);
1851             dy00             = _mm256_sub_ps(iy0,jy0);
1852             dz00             = _mm256_sub_ps(iz0,jz0);
1853             dx01             = _mm256_sub_ps(ix0,jx1);
1854             dy01             = _mm256_sub_ps(iy0,jy1);
1855             dz01             = _mm256_sub_ps(iz0,jz1);
1856             dx02             = _mm256_sub_ps(ix0,jx2);
1857             dy02             = _mm256_sub_ps(iy0,jy2);
1858             dz02             = _mm256_sub_ps(iz0,jz2);
1859             dx10             = _mm256_sub_ps(ix1,jx0);
1860             dy10             = _mm256_sub_ps(iy1,jy0);
1861             dz10             = _mm256_sub_ps(iz1,jz0);
1862             dx11             = _mm256_sub_ps(ix1,jx1);
1863             dy11             = _mm256_sub_ps(iy1,jy1);
1864             dz11             = _mm256_sub_ps(iz1,jz1);
1865             dx12             = _mm256_sub_ps(ix1,jx2);
1866             dy12             = _mm256_sub_ps(iy1,jy2);
1867             dz12             = _mm256_sub_ps(iz1,jz2);
1868             dx20             = _mm256_sub_ps(ix2,jx0);
1869             dy20             = _mm256_sub_ps(iy2,jy0);
1870             dz20             = _mm256_sub_ps(iz2,jz0);
1871             dx21             = _mm256_sub_ps(ix2,jx1);
1872             dy21             = _mm256_sub_ps(iy2,jy1);
1873             dz21             = _mm256_sub_ps(iz2,jz1);
1874             dx22             = _mm256_sub_ps(ix2,jx2);
1875             dy22             = _mm256_sub_ps(iy2,jy2);
1876             dz22             = _mm256_sub_ps(iz2,jz2);
1877
1878             /* Calculate squared distance and things based on it */
1879             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1880             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1881             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1882             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1883             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1884             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1885             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1886             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1887             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1888
1889             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1890             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
1891             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
1892             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1893             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
1894             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
1895             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1896             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
1897             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
1898
1899             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1900             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
1901             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
1902             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1903             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1904             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1905             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1906             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1907             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1908
1909             fjx0             = _mm256_setzero_ps();
1910             fjy0             = _mm256_setzero_ps();
1911             fjz0             = _mm256_setzero_ps();
1912             fjx1             = _mm256_setzero_ps();
1913             fjy1             = _mm256_setzero_ps();
1914             fjz1             = _mm256_setzero_ps();
1915             fjx2             = _mm256_setzero_ps();
1916             fjy2             = _mm256_setzero_ps();
1917             fjz2             = _mm256_setzero_ps();
1918
1919             /**************************
1920              * CALCULATE INTERACTIONS *
1921              **************************/
1922
1923             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1924             {
1925
1926             r00              = _mm256_mul_ps(rsq00,rinv00);
1927
1928             /* EWALD ELECTROSTATICS */
1929             
1930             /* Analytical PME correction */
1931             zeta2            = _mm256_mul_ps(beta2,rsq00);
1932             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1933             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1934             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1935             felec            = _mm256_mul_ps(qq00,felec);
1936             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1937             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1938             velec            = _mm256_sub_ps(rinv00,pmecorrV);
1939             velec            = _mm256_mul_ps(qq00,velec);
1940             
1941             /* LENNARD-JONES DISPERSION/REPULSION */
1942
1943             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1944             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
1945             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
1946             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
1947             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
1948
1949             d                = _mm256_sub_ps(r00,rswitch);
1950             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1951             d2               = _mm256_mul_ps(d,d);
1952             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1953
1954             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1955
1956             /* Evaluate switch function */
1957             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1958             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
1959             fvdw             = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
1960             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1961
1962             fscal            = _mm256_add_ps(felec,fvdw);
1963
1964             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1965
1966             /* Calculate temporary vectorial force */
1967             tx               = _mm256_mul_ps(fscal,dx00);
1968             ty               = _mm256_mul_ps(fscal,dy00);
1969             tz               = _mm256_mul_ps(fscal,dz00);
1970
1971             /* Update vectorial force */
1972             fix0             = _mm256_add_ps(fix0,tx);
1973             fiy0             = _mm256_add_ps(fiy0,ty);
1974             fiz0             = _mm256_add_ps(fiz0,tz);
1975
1976             fjx0             = _mm256_add_ps(fjx0,tx);
1977             fjy0             = _mm256_add_ps(fjy0,ty);
1978             fjz0             = _mm256_add_ps(fjz0,tz);
1979
1980             }
1981
1982             /**************************
1983              * CALCULATE INTERACTIONS *
1984              **************************/
1985
1986             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1987             {
1988
1989             r01              = _mm256_mul_ps(rsq01,rinv01);
1990
1991             /* EWALD ELECTROSTATICS */
1992             
1993             /* Analytical PME correction */
1994             zeta2            = _mm256_mul_ps(beta2,rsq01);
1995             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
1996             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1997             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1998             felec            = _mm256_mul_ps(qq01,felec);
1999             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2000             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2001             velec            = _mm256_sub_ps(rinv01,pmecorrV);
2002             velec            = _mm256_mul_ps(qq01,velec);
2003             
2004             d                = _mm256_sub_ps(r01,rswitch);
2005             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2006             d2               = _mm256_mul_ps(d,d);
2007             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2008
2009             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2010
2011             /* Evaluate switch function */
2012             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2013             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv01,_mm256_mul_ps(velec,dsw)) );
2014             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
2015
2016             fscal            = felec;
2017
2018             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2019
2020             /* Calculate temporary vectorial force */
2021             tx               = _mm256_mul_ps(fscal,dx01);
2022             ty               = _mm256_mul_ps(fscal,dy01);
2023             tz               = _mm256_mul_ps(fscal,dz01);
2024
2025             /* Update vectorial force */
2026             fix0             = _mm256_add_ps(fix0,tx);
2027             fiy0             = _mm256_add_ps(fiy0,ty);
2028             fiz0             = _mm256_add_ps(fiz0,tz);
2029
2030             fjx1             = _mm256_add_ps(fjx1,tx);
2031             fjy1             = _mm256_add_ps(fjy1,ty);
2032             fjz1             = _mm256_add_ps(fjz1,tz);
2033
2034             }
2035
2036             /**************************
2037              * CALCULATE INTERACTIONS *
2038              **************************/
2039
2040             if (gmx_mm256_any_lt(rsq02,rcutoff2))
2041             {
2042
2043             r02              = _mm256_mul_ps(rsq02,rinv02);
2044
2045             /* EWALD ELECTROSTATICS */
2046             
2047             /* Analytical PME correction */
2048             zeta2            = _mm256_mul_ps(beta2,rsq02);
2049             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
2050             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2051             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2052             felec            = _mm256_mul_ps(qq02,felec);
2053             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2054             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2055             velec            = _mm256_sub_ps(rinv02,pmecorrV);
2056             velec            = _mm256_mul_ps(qq02,velec);
2057             
2058             d                = _mm256_sub_ps(r02,rswitch);
2059             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2060             d2               = _mm256_mul_ps(d,d);
2061             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2062
2063             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2064
2065             /* Evaluate switch function */
2066             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2067             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv02,_mm256_mul_ps(velec,dsw)) );
2068             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
2069
2070             fscal            = felec;
2071
2072             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2073
2074             /* Calculate temporary vectorial force */
2075             tx               = _mm256_mul_ps(fscal,dx02);
2076             ty               = _mm256_mul_ps(fscal,dy02);
2077             tz               = _mm256_mul_ps(fscal,dz02);
2078
2079             /* Update vectorial force */
2080             fix0             = _mm256_add_ps(fix0,tx);
2081             fiy0             = _mm256_add_ps(fiy0,ty);
2082             fiz0             = _mm256_add_ps(fiz0,tz);
2083
2084             fjx2             = _mm256_add_ps(fjx2,tx);
2085             fjy2             = _mm256_add_ps(fjy2,ty);
2086             fjz2             = _mm256_add_ps(fjz2,tz);
2087
2088             }
2089
2090             /**************************
2091              * CALCULATE INTERACTIONS *
2092              **************************/
2093
2094             if (gmx_mm256_any_lt(rsq10,rcutoff2))
2095             {
2096
2097             r10              = _mm256_mul_ps(rsq10,rinv10);
2098
2099             /* EWALD ELECTROSTATICS */
2100             
2101             /* Analytical PME correction */
2102             zeta2            = _mm256_mul_ps(beta2,rsq10);
2103             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
2104             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2105             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2106             felec            = _mm256_mul_ps(qq10,felec);
2107             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2108             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2109             velec            = _mm256_sub_ps(rinv10,pmecorrV);
2110             velec            = _mm256_mul_ps(qq10,velec);
2111             
2112             d                = _mm256_sub_ps(r10,rswitch);
2113             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2114             d2               = _mm256_mul_ps(d,d);
2115             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2116
2117             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2118
2119             /* Evaluate switch function */
2120             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2121             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
2122             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
2123
2124             fscal            = felec;
2125
2126             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2127
2128             /* Calculate temporary vectorial force */
2129             tx               = _mm256_mul_ps(fscal,dx10);
2130             ty               = _mm256_mul_ps(fscal,dy10);
2131             tz               = _mm256_mul_ps(fscal,dz10);
2132
2133             /* Update vectorial force */
2134             fix1             = _mm256_add_ps(fix1,tx);
2135             fiy1             = _mm256_add_ps(fiy1,ty);
2136             fiz1             = _mm256_add_ps(fiz1,tz);
2137
2138             fjx0             = _mm256_add_ps(fjx0,tx);
2139             fjy0             = _mm256_add_ps(fjy0,ty);
2140             fjz0             = _mm256_add_ps(fjz0,tz);
2141
2142             }
2143
2144             /**************************
2145              * CALCULATE INTERACTIONS *
2146              **************************/
2147
2148             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2149             {
2150
2151             r11              = _mm256_mul_ps(rsq11,rinv11);
2152
2153             /* EWALD ELECTROSTATICS */
2154             
2155             /* Analytical PME correction */
2156             zeta2            = _mm256_mul_ps(beta2,rsq11);
2157             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
2158             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2159             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2160             felec            = _mm256_mul_ps(qq11,felec);
2161             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2162             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2163             velec            = _mm256_sub_ps(rinv11,pmecorrV);
2164             velec            = _mm256_mul_ps(qq11,velec);
2165             
2166             d                = _mm256_sub_ps(r11,rswitch);
2167             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2168             d2               = _mm256_mul_ps(d,d);
2169             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2170
2171             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2172
2173             /* Evaluate switch function */
2174             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2175             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
2176             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2177
2178             fscal            = felec;
2179
2180             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2181
2182             /* Calculate temporary vectorial force */
2183             tx               = _mm256_mul_ps(fscal,dx11);
2184             ty               = _mm256_mul_ps(fscal,dy11);
2185             tz               = _mm256_mul_ps(fscal,dz11);
2186
2187             /* Update vectorial force */
2188             fix1             = _mm256_add_ps(fix1,tx);
2189             fiy1             = _mm256_add_ps(fiy1,ty);
2190             fiz1             = _mm256_add_ps(fiz1,tz);
2191
2192             fjx1             = _mm256_add_ps(fjx1,tx);
2193             fjy1             = _mm256_add_ps(fjy1,ty);
2194             fjz1             = _mm256_add_ps(fjz1,tz);
2195
2196             }
2197
2198             /**************************
2199              * CALCULATE INTERACTIONS *
2200              **************************/
2201
2202             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2203             {
2204
2205             r12              = _mm256_mul_ps(rsq12,rinv12);
2206
2207             /* EWALD ELECTROSTATICS */
2208             
2209             /* Analytical PME correction */
2210             zeta2            = _mm256_mul_ps(beta2,rsq12);
2211             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
2212             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2213             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2214             felec            = _mm256_mul_ps(qq12,felec);
2215             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2216             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2217             velec            = _mm256_sub_ps(rinv12,pmecorrV);
2218             velec            = _mm256_mul_ps(qq12,velec);
2219             
2220             d                = _mm256_sub_ps(r12,rswitch);
2221             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2222             d2               = _mm256_mul_ps(d,d);
2223             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2224
2225             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2226
2227             /* Evaluate switch function */
2228             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2229             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
2230             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2231
2232             fscal            = felec;
2233
2234             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2235
2236             /* Calculate temporary vectorial force */
2237             tx               = _mm256_mul_ps(fscal,dx12);
2238             ty               = _mm256_mul_ps(fscal,dy12);
2239             tz               = _mm256_mul_ps(fscal,dz12);
2240
2241             /* Update vectorial force */
2242             fix1             = _mm256_add_ps(fix1,tx);
2243             fiy1             = _mm256_add_ps(fiy1,ty);
2244             fiz1             = _mm256_add_ps(fiz1,tz);
2245
2246             fjx2             = _mm256_add_ps(fjx2,tx);
2247             fjy2             = _mm256_add_ps(fjy2,ty);
2248             fjz2             = _mm256_add_ps(fjz2,tz);
2249
2250             }
2251
2252             /**************************
2253              * CALCULATE INTERACTIONS *
2254              **************************/
2255
2256             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2257             {
2258
2259             r20              = _mm256_mul_ps(rsq20,rinv20);
2260
2261             /* EWALD ELECTROSTATICS */
2262             
2263             /* Analytical PME correction */
2264             zeta2            = _mm256_mul_ps(beta2,rsq20);
2265             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
2266             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2267             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2268             felec            = _mm256_mul_ps(qq20,felec);
2269             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2270             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2271             velec            = _mm256_sub_ps(rinv20,pmecorrV);
2272             velec            = _mm256_mul_ps(qq20,velec);
2273             
2274             d                = _mm256_sub_ps(r20,rswitch);
2275             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2276             d2               = _mm256_mul_ps(d,d);
2277             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2278
2279             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2280
2281             /* Evaluate switch function */
2282             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2283             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
2284             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
2285
2286             fscal            = felec;
2287
2288             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2289
2290             /* Calculate temporary vectorial force */
2291             tx               = _mm256_mul_ps(fscal,dx20);
2292             ty               = _mm256_mul_ps(fscal,dy20);
2293             tz               = _mm256_mul_ps(fscal,dz20);
2294
2295             /* Update vectorial force */
2296             fix2             = _mm256_add_ps(fix2,tx);
2297             fiy2             = _mm256_add_ps(fiy2,ty);
2298             fiz2             = _mm256_add_ps(fiz2,tz);
2299
2300             fjx0             = _mm256_add_ps(fjx0,tx);
2301             fjy0             = _mm256_add_ps(fjy0,ty);
2302             fjz0             = _mm256_add_ps(fjz0,tz);
2303
2304             }
2305
2306             /**************************
2307              * CALCULATE INTERACTIONS *
2308              **************************/
2309
2310             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2311             {
2312
2313             r21              = _mm256_mul_ps(rsq21,rinv21);
2314
2315             /* EWALD ELECTROSTATICS */
2316             
2317             /* Analytical PME correction */
2318             zeta2            = _mm256_mul_ps(beta2,rsq21);
2319             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2320             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2321             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2322             felec            = _mm256_mul_ps(qq21,felec);
2323             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2324             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2325             velec            = _mm256_sub_ps(rinv21,pmecorrV);
2326             velec            = _mm256_mul_ps(qq21,velec);
2327             
2328             d                = _mm256_sub_ps(r21,rswitch);
2329             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2330             d2               = _mm256_mul_ps(d,d);
2331             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2332
2333             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2334
2335             /* Evaluate switch function */
2336             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2337             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
2338             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2339
2340             fscal            = felec;
2341
2342             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2343
2344             /* Calculate temporary vectorial force */
2345             tx               = _mm256_mul_ps(fscal,dx21);
2346             ty               = _mm256_mul_ps(fscal,dy21);
2347             tz               = _mm256_mul_ps(fscal,dz21);
2348
2349             /* Update vectorial force */
2350             fix2             = _mm256_add_ps(fix2,tx);
2351             fiy2             = _mm256_add_ps(fiy2,ty);
2352             fiz2             = _mm256_add_ps(fiz2,tz);
2353
2354             fjx1             = _mm256_add_ps(fjx1,tx);
2355             fjy1             = _mm256_add_ps(fjy1,ty);
2356             fjz1             = _mm256_add_ps(fjz1,tz);
2357
2358             }
2359
2360             /**************************
2361              * CALCULATE INTERACTIONS *
2362              **************************/
2363
2364             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2365             {
2366
2367             r22              = _mm256_mul_ps(rsq22,rinv22);
2368
2369             /* EWALD ELECTROSTATICS */
2370             
2371             /* Analytical PME correction */
2372             zeta2            = _mm256_mul_ps(beta2,rsq22);
2373             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2374             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2375             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2376             felec            = _mm256_mul_ps(qq22,felec);
2377             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2378             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2379             velec            = _mm256_sub_ps(rinv22,pmecorrV);
2380             velec            = _mm256_mul_ps(qq22,velec);
2381             
2382             d                = _mm256_sub_ps(r22,rswitch);
2383             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2384             d2               = _mm256_mul_ps(d,d);
2385             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2386
2387             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2388
2389             /* Evaluate switch function */
2390             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2391             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
2392             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2393
2394             fscal            = felec;
2395
2396             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2397
2398             /* Calculate temporary vectorial force */
2399             tx               = _mm256_mul_ps(fscal,dx22);
2400             ty               = _mm256_mul_ps(fscal,dy22);
2401             tz               = _mm256_mul_ps(fscal,dz22);
2402
2403             /* Update vectorial force */
2404             fix2             = _mm256_add_ps(fix2,tx);
2405             fiy2             = _mm256_add_ps(fiy2,ty);
2406             fiz2             = _mm256_add_ps(fiz2,tz);
2407
2408             fjx2             = _mm256_add_ps(fjx2,tx);
2409             fjy2             = _mm256_add_ps(fjy2,ty);
2410             fjz2             = _mm256_add_ps(fjz2,tz);
2411
2412             }
2413
2414             fjptrA             = f+j_coord_offsetA;
2415             fjptrB             = f+j_coord_offsetB;
2416             fjptrC             = f+j_coord_offsetC;
2417             fjptrD             = f+j_coord_offsetD;
2418             fjptrE             = f+j_coord_offsetE;
2419             fjptrF             = f+j_coord_offsetF;
2420             fjptrG             = f+j_coord_offsetG;
2421             fjptrH             = f+j_coord_offsetH;
2422
2423             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2424                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2425
2426             /* Inner loop uses 960 flops */
2427         }
2428
2429         if(jidx<j_index_end)
2430         {
2431
2432             /* Get j neighbor index, and coordinate index */
2433             jnrlistA         = jjnr[jidx];
2434             jnrlistB         = jjnr[jidx+1];
2435             jnrlistC         = jjnr[jidx+2];
2436             jnrlistD         = jjnr[jidx+3];
2437             jnrlistE         = jjnr[jidx+4];
2438             jnrlistF         = jjnr[jidx+5];
2439             jnrlistG         = jjnr[jidx+6];
2440             jnrlistH         = jjnr[jidx+7];
2441             /* Sign of each element will be negative for non-real atoms.
2442              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2443              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
2444              */
2445             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
2446                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
2447                                             
2448             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
2449             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
2450             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
2451             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
2452             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
2453             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
2454             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
2455             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
2456             j_coord_offsetA  = DIM*jnrA;
2457             j_coord_offsetB  = DIM*jnrB;
2458             j_coord_offsetC  = DIM*jnrC;
2459             j_coord_offsetD  = DIM*jnrD;
2460             j_coord_offsetE  = DIM*jnrE;
2461             j_coord_offsetF  = DIM*jnrF;
2462             j_coord_offsetG  = DIM*jnrG;
2463             j_coord_offsetH  = DIM*jnrH;
2464
2465             /* load j atom coordinates */
2466             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
2467                                                  x+j_coord_offsetC,x+j_coord_offsetD,
2468                                                  x+j_coord_offsetE,x+j_coord_offsetF,
2469                                                  x+j_coord_offsetG,x+j_coord_offsetH,
2470                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
2471
2472             /* Calculate displacement vector */
2473             dx00             = _mm256_sub_ps(ix0,jx0);
2474             dy00             = _mm256_sub_ps(iy0,jy0);
2475             dz00             = _mm256_sub_ps(iz0,jz0);
2476             dx01             = _mm256_sub_ps(ix0,jx1);
2477             dy01             = _mm256_sub_ps(iy0,jy1);
2478             dz01             = _mm256_sub_ps(iz0,jz1);
2479             dx02             = _mm256_sub_ps(ix0,jx2);
2480             dy02             = _mm256_sub_ps(iy0,jy2);
2481             dz02             = _mm256_sub_ps(iz0,jz2);
2482             dx10             = _mm256_sub_ps(ix1,jx0);
2483             dy10             = _mm256_sub_ps(iy1,jy0);
2484             dz10             = _mm256_sub_ps(iz1,jz0);
2485             dx11             = _mm256_sub_ps(ix1,jx1);
2486             dy11             = _mm256_sub_ps(iy1,jy1);
2487             dz11             = _mm256_sub_ps(iz1,jz1);
2488             dx12             = _mm256_sub_ps(ix1,jx2);
2489             dy12             = _mm256_sub_ps(iy1,jy2);
2490             dz12             = _mm256_sub_ps(iz1,jz2);
2491             dx20             = _mm256_sub_ps(ix2,jx0);
2492             dy20             = _mm256_sub_ps(iy2,jy0);
2493             dz20             = _mm256_sub_ps(iz2,jz0);
2494             dx21             = _mm256_sub_ps(ix2,jx1);
2495             dy21             = _mm256_sub_ps(iy2,jy1);
2496             dz21             = _mm256_sub_ps(iz2,jz1);
2497             dx22             = _mm256_sub_ps(ix2,jx2);
2498             dy22             = _mm256_sub_ps(iy2,jy2);
2499             dz22             = _mm256_sub_ps(iz2,jz2);
2500
2501             /* Calculate squared distance and things based on it */
2502             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
2503             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
2504             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
2505             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
2506             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2507             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2508             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
2509             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2510             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2511
2512             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
2513             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
2514             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
2515             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
2516             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
2517             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
2518             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
2519             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
2520             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
2521
2522             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
2523             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
2524             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
2525             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
2526             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
2527             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
2528             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
2529             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
2530             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
2531
2532             fjx0             = _mm256_setzero_ps();
2533             fjy0             = _mm256_setzero_ps();
2534             fjz0             = _mm256_setzero_ps();
2535             fjx1             = _mm256_setzero_ps();
2536             fjy1             = _mm256_setzero_ps();
2537             fjz1             = _mm256_setzero_ps();
2538             fjx2             = _mm256_setzero_ps();
2539             fjy2             = _mm256_setzero_ps();
2540             fjz2             = _mm256_setzero_ps();
2541
2542             /**************************
2543              * CALCULATE INTERACTIONS *
2544              **************************/
2545
2546             if (gmx_mm256_any_lt(rsq00,rcutoff2))
2547             {
2548
2549             r00              = _mm256_mul_ps(rsq00,rinv00);
2550             r00              = _mm256_andnot_ps(dummy_mask,r00);
2551
2552             /* EWALD ELECTROSTATICS */
2553             
2554             /* Analytical PME correction */
2555             zeta2            = _mm256_mul_ps(beta2,rsq00);
2556             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
2557             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2558             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2559             felec            = _mm256_mul_ps(qq00,felec);
2560             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2561             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2562             velec            = _mm256_sub_ps(rinv00,pmecorrV);
2563             velec            = _mm256_mul_ps(qq00,velec);
2564             
2565             /* LENNARD-JONES DISPERSION/REPULSION */
2566
2567             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
2568             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
2569             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
2570             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
2571             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
2572
2573             d                = _mm256_sub_ps(r00,rswitch);
2574             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2575             d2               = _mm256_mul_ps(d,d);
2576             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2577
2578             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2579
2580             /* Evaluate switch function */
2581             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2582             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
2583             fvdw             = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
2584             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
2585
2586             fscal            = _mm256_add_ps(felec,fvdw);
2587
2588             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2589
2590             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2591
2592             /* Calculate temporary vectorial force */
2593             tx               = _mm256_mul_ps(fscal,dx00);
2594             ty               = _mm256_mul_ps(fscal,dy00);
2595             tz               = _mm256_mul_ps(fscal,dz00);
2596
2597             /* Update vectorial force */
2598             fix0             = _mm256_add_ps(fix0,tx);
2599             fiy0             = _mm256_add_ps(fiy0,ty);
2600             fiz0             = _mm256_add_ps(fiz0,tz);
2601
2602             fjx0             = _mm256_add_ps(fjx0,tx);
2603             fjy0             = _mm256_add_ps(fjy0,ty);
2604             fjz0             = _mm256_add_ps(fjz0,tz);
2605
2606             }
2607
2608             /**************************
2609              * CALCULATE INTERACTIONS *
2610              **************************/
2611
2612             if (gmx_mm256_any_lt(rsq01,rcutoff2))
2613             {
2614
2615             r01              = _mm256_mul_ps(rsq01,rinv01);
2616             r01              = _mm256_andnot_ps(dummy_mask,r01);
2617
2618             /* EWALD ELECTROSTATICS */
2619             
2620             /* Analytical PME correction */
2621             zeta2            = _mm256_mul_ps(beta2,rsq01);
2622             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
2623             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2624             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2625             felec            = _mm256_mul_ps(qq01,felec);
2626             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2627             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2628             velec            = _mm256_sub_ps(rinv01,pmecorrV);
2629             velec            = _mm256_mul_ps(qq01,velec);
2630             
2631             d                = _mm256_sub_ps(r01,rswitch);
2632             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2633             d2               = _mm256_mul_ps(d,d);
2634             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2635
2636             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2637
2638             /* Evaluate switch function */
2639             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2640             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv01,_mm256_mul_ps(velec,dsw)) );
2641             cutoff_mask      = _mm256_cmp_ps(rsq01,rcutoff2,_CMP_LT_OQ);
2642
2643             fscal            = felec;
2644
2645             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2646
2647             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2648
2649             /* Calculate temporary vectorial force */
2650             tx               = _mm256_mul_ps(fscal,dx01);
2651             ty               = _mm256_mul_ps(fscal,dy01);
2652             tz               = _mm256_mul_ps(fscal,dz01);
2653
2654             /* Update vectorial force */
2655             fix0             = _mm256_add_ps(fix0,tx);
2656             fiy0             = _mm256_add_ps(fiy0,ty);
2657             fiz0             = _mm256_add_ps(fiz0,tz);
2658
2659             fjx1             = _mm256_add_ps(fjx1,tx);
2660             fjy1             = _mm256_add_ps(fjy1,ty);
2661             fjz1             = _mm256_add_ps(fjz1,tz);
2662
2663             }
2664
2665             /**************************
2666              * CALCULATE INTERACTIONS *
2667              **************************/
2668
2669             if (gmx_mm256_any_lt(rsq02,rcutoff2))
2670             {
2671
2672             r02              = _mm256_mul_ps(rsq02,rinv02);
2673             r02              = _mm256_andnot_ps(dummy_mask,r02);
2674
2675             /* EWALD ELECTROSTATICS */
2676             
2677             /* Analytical PME correction */
2678             zeta2            = _mm256_mul_ps(beta2,rsq02);
2679             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
2680             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2681             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2682             felec            = _mm256_mul_ps(qq02,felec);
2683             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2684             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2685             velec            = _mm256_sub_ps(rinv02,pmecorrV);
2686             velec            = _mm256_mul_ps(qq02,velec);
2687             
2688             d                = _mm256_sub_ps(r02,rswitch);
2689             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2690             d2               = _mm256_mul_ps(d,d);
2691             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2692
2693             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2694
2695             /* Evaluate switch function */
2696             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2697             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv02,_mm256_mul_ps(velec,dsw)) );
2698             cutoff_mask      = _mm256_cmp_ps(rsq02,rcutoff2,_CMP_LT_OQ);
2699
2700             fscal            = felec;
2701
2702             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2703
2704             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2705
2706             /* Calculate temporary vectorial force */
2707             tx               = _mm256_mul_ps(fscal,dx02);
2708             ty               = _mm256_mul_ps(fscal,dy02);
2709             tz               = _mm256_mul_ps(fscal,dz02);
2710
2711             /* Update vectorial force */
2712             fix0             = _mm256_add_ps(fix0,tx);
2713             fiy0             = _mm256_add_ps(fiy0,ty);
2714             fiz0             = _mm256_add_ps(fiz0,tz);
2715
2716             fjx2             = _mm256_add_ps(fjx2,tx);
2717             fjy2             = _mm256_add_ps(fjy2,ty);
2718             fjz2             = _mm256_add_ps(fjz2,tz);
2719
2720             }
2721
2722             /**************************
2723              * CALCULATE INTERACTIONS *
2724              **************************/
2725
2726             if (gmx_mm256_any_lt(rsq10,rcutoff2))
2727             {
2728
2729             r10              = _mm256_mul_ps(rsq10,rinv10);
2730             r10              = _mm256_andnot_ps(dummy_mask,r10);
2731
2732             /* EWALD ELECTROSTATICS */
2733             
2734             /* Analytical PME correction */
2735             zeta2            = _mm256_mul_ps(beta2,rsq10);
2736             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
2737             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2738             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2739             felec            = _mm256_mul_ps(qq10,felec);
2740             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2741             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2742             velec            = _mm256_sub_ps(rinv10,pmecorrV);
2743             velec            = _mm256_mul_ps(qq10,velec);
2744             
2745             d                = _mm256_sub_ps(r10,rswitch);
2746             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2747             d2               = _mm256_mul_ps(d,d);
2748             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2749
2750             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2751
2752             /* Evaluate switch function */
2753             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2754             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
2755             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
2756
2757             fscal            = felec;
2758
2759             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2760
2761             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2762
2763             /* Calculate temporary vectorial force */
2764             tx               = _mm256_mul_ps(fscal,dx10);
2765             ty               = _mm256_mul_ps(fscal,dy10);
2766             tz               = _mm256_mul_ps(fscal,dz10);
2767
2768             /* Update vectorial force */
2769             fix1             = _mm256_add_ps(fix1,tx);
2770             fiy1             = _mm256_add_ps(fiy1,ty);
2771             fiz1             = _mm256_add_ps(fiz1,tz);
2772
2773             fjx0             = _mm256_add_ps(fjx0,tx);
2774             fjy0             = _mm256_add_ps(fjy0,ty);
2775             fjz0             = _mm256_add_ps(fjz0,tz);
2776
2777             }
2778
2779             /**************************
2780              * CALCULATE INTERACTIONS *
2781              **************************/
2782
2783             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2784             {
2785
2786             r11              = _mm256_mul_ps(rsq11,rinv11);
2787             r11              = _mm256_andnot_ps(dummy_mask,r11);
2788
2789             /* EWALD ELECTROSTATICS */
2790             
2791             /* Analytical PME correction */
2792             zeta2            = _mm256_mul_ps(beta2,rsq11);
2793             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
2794             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2795             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2796             felec            = _mm256_mul_ps(qq11,felec);
2797             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2798             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2799             velec            = _mm256_sub_ps(rinv11,pmecorrV);
2800             velec            = _mm256_mul_ps(qq11,velec);
2801             
2802             d                = _mm256_sub_ps(r11,rswitch);
2803             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2804             d2               = _mm256_mul_ps(d,d);
2805             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2806
2807             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2808
2809             /* Evaluate switch function */
2810             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2811             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
2812             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2813
2814             fscal            = felec;
2815
2816             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2817
2818             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2819
2820             /* Calculate temporary vectorial force */
2821             tx               = _mm256_mul_ps(fscal,dx11);
2822             ty               = _mm256_mul_ps(fscal,dy11);
2823             tz               = _mm256_mul_ps(fscal,dz11);
2824
2825             /* Update vectorial force */
2826             fix1             = _mm256_add_ps(fix1,tx);
2827             fiy1             = _mm256_add_ps(fiy1,ty);
2828             fiz1             = _mm256_add_ps(fiz1,tz);
2829
2830             fjx1             = _mm256_add_ps(fjx1,tx);
2831             fjy1             = _mm256_add_ps(fjy1,ty);
2832             fjz1             = _mm256_add_ps(fjz1,tz);
2833
2834             }
2835
2836             /**************************
2837              * CALCULATE INTERACTIONS *
2838              **************************/
2839
2840             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2841             {
2842
2843             r12              = _mm256_mul_ps(rsq12,rinv12);
2844             r12              = _mm256_andnot_ps(dummy_mask,r12);
2845
2846             /* EWALD ELECTROSTATICS */
2847             
2848             /* Analytical PME correction */
2849             zeta2            = _mm256_mul_ps(beta2,rsq12);
2850             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
2851             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2852             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2853             felec            = _mm256_mul_ps(qq12,felec);
2854             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2855             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2856             velec            = _mm256_sub_ps(rinv12,pmecorrV);
2857             velec            = _mm256_mul_ps(qq12,velec);
2858             
2859             d                = _mm256_sub_ps(r12,rswitch);
2860             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2861             d2               = _mm256_mul_ps(d,d);
2862             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2863
2864             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2865
2866             /* Evaluate switch function */
2867             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2868             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
2869             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2870
2871             fscal            = felec;
2872
2873             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2874
2875             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2876
2877             /* Calculate temporary vectorial force */
2878             tx               = _mm256_mul_ps(fscal,dx12);
2879             ty               = _mm256_mul_ps(fscal,dy12);
2880             tz               = _mm256_mul_ps(fscal,dz12);
2881
2882             /* Update vectorial force */
2883             fix1             = _mm256_add_ps(fix1,tx);
2884             fiy1             = _mm256_add_ps(fiy1,ty);
2885             fiz1             = _mm256_add_ps(fiz1,tz);
2886
2887             fjx2             = _mm256_add_ps(fjx2,tx);
2888             fjy2             = _mm256_add_ps(fjy2,ty);
2889             fjz2             = _mm256_add_ps(fjz2,tz);
2890
2891             }
2892
2893             /**************************
2894              * CALCULATE INTERACTIONS *
2895              **************************/
2896
2897             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2898             {
2899
2900             r20              = _mm256_mul_ps(rsq20,rinv20);
2901             r20              = _mm256_andnot_ps(dummy_mask,r20);
2902
2903             /* EWALD ELECTROSTATICS */
2904             
2905             /* Analytical PME correction */
2906             zeta2            = _mm256_mul_ps(beta2,rsq20);
2907             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
2908             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2909             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2910             felec            = _mm256_mul_ps(qq20,felec);
2911             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2912             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2913             velec            = _mm256_sub_ps(rinv20,pmecorrV);
2914             velec            = _mm256_mul_ps(qq20,velec);
2915             
2916             d                = _mm256_sub_ps(r20,rswitch);
2917             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2918             d2               = _mm256_mul_ps(d,d);
2919             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2920
2921             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2922
2923             /* Evaluate switch function */
2924             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2925             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
2926             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
2927
2928             fscal            = felec;
2929
2930             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2931
2932             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2933
2934             /* Calculate temporary vectorial force */
2935             tx               = _mm256_mul_ps(fscal,dx20);
2936             ty               = _mm256_mul_ps(fscal,dy20);
2937             tz               = _mm256_mul_ps(fscal,dz20);
2938
2939             /* Update vectorial force */
2940             fix2             = _mm256_add_ps(fix2,tx);
2941             fiy2             = _mm256_add_ps(fiy2,ty);
2942             fiz2             = _mm256_add_ps(fiz2,tz);
2943
2944             fjx0             = _mm256_add_ps(fjx0,tx);
2945             fjy0             = _mm256_add_ps(fjy0,ty);
2946             fjz0             = _mm256_add_ps(fjz0,tz);
2947
2948             }
2949
2950             /**************************
2951              * CALCULATE INTERACTIONS *
2952              **************************/
2953
2954             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2955             {
2956
2957             r21              = _mm256_mul_ps(rsq21,rinv21);
2958             r21              = _mm256_andnot_ps(dummy_mask,r21);
2959
2960             /* EWALD ELECTROSTATICS */
2961             
2962             /* Analytical PME correction */
2963             zeta2            = _mm256_mul_ps(beta2,rsq21);
2964             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2965             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2966             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2967             felec            = _mm256_mul_ps(qq21,felec);
2968             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
2969             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2970             velec            = _mm256_sub_ps(rinv21,pmecorrV);
2971             velec            = _mm256_mul_ps(qq21,velec);
2972             
2973             d                = _mm256_sub_ps(r21,rswitch);
2974             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2975             d2               = _mm256_mul_ps(d,d);
2976             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2977
2978             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2979
2980             /* Evaluate switch function */
2981             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2982             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
2983             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2984
2985             fscal            = felec;
2986
2987             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2988
2989             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2990
2991             /* Calculate temporary vectorial force */
2992             tx               = _mm256_mul_ps(fscal,dx21);
2993             ty               = _mm256_mul_ps(fscal,dy21);
2994             tz               = _mm256_mul_ps(fscal,dz21);
2995
2996             /* Update vectorial force */
2997             fix2             = _mm256_add_ps(fix2,tx);
2998             fiy2             = _mm256_add_ps(fiy2,ty);
2999             fiz2             = _mm256_add_ps(fiz2,tz);
3000
3001             fjx1             = _mm256_add_ps(fjx1,tx);
3002             fjy1             = _mm256_add_ps(fjy1,ty);
3003             fjz1             = _mm256_add_ps(fjz1,tz);
3004
3005             }
3006
3007             /**************************
3008              * CALCULATE INTERACTIONS *
3009              **************************/
3010
3011             if (gmx_mm256_any_lt(rsq22,rcutoff2))
3012             {
3013
3014             r22              = _mm256_mul_ps(rsq22,rinv22);
3015             r22              = _mm256_andnot_ps(dummy_mask,r22);
3016
3017             /* EWALD ELECTROSTATICS */
3018             
3019             /* Analytical PME correction */
3020             zeta2            = _mm256_mul_ps(beta2,rsq22);
3021             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
3022             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
3023             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
3024             felec            = _mm256_mul_ps(qq22,felec);
3025             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
3026             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
3027             velec            = _mm256_sub_ps(rinv22,pmecorrV);
3028             velec            = _mm256_mul_ps(qq22,velec);
3029             
3030             d                = _mm256_sub_ps(r22,rswitch);
3031             d                = _mm256_max_ps(d,_mm256_setzero_ps());
3032             d2               = _mm256_mul_ps(d,d);
3033             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
3034
3035             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
3036
3037             /* Evaluate switch function */
3038             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3039             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
3040             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
3041
3042             fscal            = felec;
3043
3044             fscal            = _mm256_and_ps(fscal,cutoff_mask);
3045
3046             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
3047
3048             /* Calculate temporary vectorial force */
3049             tx               = _mm256_mul_ps(fscal,dx22);
3050             ty               = _mm256_mul_ps(fscal,dy22);
3051             tz               = _mm256_mul_ps(fscal,dz22);
3052
3053             /* Update vectorial force */
3054             fix2             = _mm256_add_ps(fix2,tx);
3055             fiy2             = _mm256_add_ps(fiy2,ty);
3056             fiz2             = _mm256_add_ps(fiz2,tz);
3057
3058             fjx2             = _mm256_add_ps(fjx2,tx);
3059             fjy2             = _mm256_add_ps(fjy2,ty);
3060             fjz2             = _mm256_add_ps(fjz2,tz);
3061
3062             }
3063
3064             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
3065             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
3066             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
3067             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
3068             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
3069             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
3070             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
3071             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
3072
3073             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
3074                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
3075
3076             /* Inner loop uses 969 flops */
3077         }
3078
3079         /* End of innermost loop */
3080
3081         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
3082                                                  f+i_coord_offset,fshift+i_shift_offset);
3083
3084         /* Increment number of inner iterations */
3085         inneriter                  += j_index_end - j_index_start;
3086
3087         /* Outer loop uses 18 flops */
3088     }
3089
3090     /* Increment number of outer iterations */
3091     outeriter        += nri;
3092
3093     /* Update outer/inner flops */
3094
3095     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*969);
3096 }