Compile nonbonded kernels as C++
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_avx_256_single.cpp
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2017,2018, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "gmxpre.h"
39
40 #include "config.h"
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "gromacs/gmxlib/nrnb.h"
46
47 #include "kernelutil_x86_avx_256_single.h"
48
49 /*
50  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_VF_avx_256_single
51  * Electrostatics interaction: Ewald
52  * VdW interaction:            LennardJones
53  * Geometry:                   Water4-Water4
54  * Calculate force/pot:        PotentialAndForce
55  */
56 void
57 nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_VF_avx_256_single
58                     (t_nblist                    * gmx_restrict       nlist,
59                      rvec                        * gmx_restrict          xx,
60                      rvec                        * gmx_restrict          ff,
61                      struct t_forcerec           * gmx_restrict          fr,
62                      t_mdatoms                   * gmx_restrict     mdatoms,
63                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
64                      t_nrnb                      * gmx_restrict        nrnb)
65 {
66     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
67      * just 0 for non-waters.
68      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
69      * jnr indices corresponding to data put in the four positions in the SIMD register.
70      */
71     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
72     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
73     int              jnrA,jnrB,jnrC,jnrD;
74     int              jnrE,jnrF,jnrG,jnrH;
75     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
76     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
77     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
78     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
79     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
80     real             rcutoff_scalar;
81     real             *shiftvec,*fshift,*x,*f;
82     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
83     real             scratch[4*DIM];
84     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
85     real *           vdwioffsetptr0;
86     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
87     real *           vdwioffsetptr1;
88     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
89     real *           vdwioffsetptr2;
90     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
91     real *           vdwioffsetptr3;
92     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
93     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
94     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
95     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
96     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
97     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
98     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
99     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
100     __m256           jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
101     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
102     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
103     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
104     __m256           dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
105     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
106     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
107     __m256           dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
108     __m256           dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
109     __m256           dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
110     __m256           dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
111     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
112     real             *charge;
113     int              nvdwtype;
114     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
115     int              *vdwtype;
116     real             *vdwparam;
117     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
118     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
119     __m256i          ewitab;
120     __m128i          ewitab_lo,ewitab_hi;
121     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
122     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
123     real             *ewtab;
124     __m256           rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
125     real             rswitch_scalar,d_scalar;
126     __m256           dummy_mask,cutoff_mask;
127     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
128     __m256           one     = _mm256_set1_ps(1.0);
129     __m256           two     = _mm256_set1_ps(2.0);
130     x                = xx[0];
131     f                = ff[0];
132
133     nri              = nlist->nri;
134     iinr             = nlist->iinr;
135     jindex           = nlist->jindex;
136     jjnr             = nlist->jjnr;
137     shiftidx         = nlist->shift;
138     gid              = nlist->gid;
139     shiftvec         = fr->shift_vec[0];
140     fshift           = fr->fshift[0];
141     facel            = _mm256_set1_ps(fr->ic->epsfac);
142     charge           = mdatoms->chargeA;
143     nvdwtype         = fr->ntype;
144     vdwparam         = fr->nbfp;
145     vdwtype          = mdatoms->typeA;
146
147     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
148     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
149     beta2            = _mm256_mul_ps(beta,beta);
150     beta3            = _mm256_mul_ps(beta,beta2);
151
152     ewtab            = fr->ic->tabq_coul_FDV0;
153     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
154     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
155
156     /* Setup water-specific parameters */
157     inr              = nlist->iinr[0];
158     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
159     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
160     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
161     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
162
163     jq1              = _mm256_set1_ps(charge[inr+1]);
164     jq2              = _mm256_set1_ps(charge[inr+2]);
165     jq3              = _mm256_set1_ps(charge[inr+3]);
166     vdwjidx0A        = 2*vdwtype[inr+0];
167     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
168     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
169     qq11             = _mm256_mul_ps(iq1,jq1);
170     qq12             = _mm256_mul_ps(iq1,jq2);
171     qq13             = _mm256_mul_ps(iq1,jq3);
172     qq21             = _mm256_mul_ps(iq2,jq1);
173     qq22             = _mm256_mul_ps(iq2,jq2);
174     qq23             = _mm256_mul_ps(iq2,jq3);
175     qq31             = _mm256_mul_ps(iq3,jq1);
176     qq32             = _mm256_mul_ps(iq3,jq2);
177     qq33             = _mm256_mul_ps(iq3,jq3);
178
179     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
180     rcutoff_scalar   = fr->ic->rcoulomb;
181     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
182     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
183
184     rswitch_scalar   = fr->ic->rcoulomb_switch;
185     rswitch          = _mm256_set1_ps(rswitch_scalar);
186     /* Setup switch parameters */
187     d_scalar         = rcutoff_scalar-rswitch_scalar;
188     d                = _mm256_set1_ps(d_scalar);
189     swV3             = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
190     swV4             = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
191     swV5             = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
192     swF2             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
193     swF3             = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
194     swF4             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
195
196     /* Avoid stupid compiler warnings */
197     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
198     j_coord_offsetA = 0;
199     j_coord_offsetB = 0;
200     j_coord_offsetC = 0;
201     j_coord_offsetD = 0;
202     j_coord_offsetE = 0;
203     j_coord_offsetF = 0;
204     j_coord_offsetG = 0;
205     j_coord_offsetH = 0;
206
207     outeriter        = 0;
208     inneriter        = 0;
209
210     for(iidx=0;iidx<4*DIM;iidx++)
211     {
212         scratch[iidx] = 0.0;
213     }
214
215     /* Start outer loop over neighborlists */
216     for(iidx=0; iidx<nri; iidx++)
217     {
218         /* Load shift vector for this list */
219         i_shift_offset   = DIM*shiftidx[iidx];
220
221         /* Load limits for loop over neighbors */
222         j_index_start    = jindex[iidx];
223         j_index_end      = jindex[iidx+1];
224
225         /* Get outer coordinate index */
226         inr              = iinr[iidx];
227         i_coord_offset   = DIM*inr;
228
229         /* Load i particle coords and add shift vector */
230         gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
231                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
232
233         fix0             = _mm256_setzero_ps();
234         fiy0             = _mm256_setzero_ps();
235         fiz0             = _mm256_setzero_ps();
236         fix1             = _mm256_setzero_ps();
237         fiy1             = _mm256_setzero_ps();
238         fiz1             = _mm256_setzero_ps();
239         fix2             = _mm256_setzero_ps();
240         fiy2             = _mm256_setzero_ps();
241         fiz2             = _mm256_setzero_ps();
242         fix3             = _mm256_setzero_ps();
243         fiy3             = _mm256_setzero_ps();
244         fiz3             = _mm256_setzero_ps();
245
246         /* Reset potential sums */
247         velecsum         = _mm256_setzero_ps();
248         vvdwsum          = _mm256_setzero_ps();
249
250         /* Start inner kernel loop */
251         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
252         {
253
254             /* Get j neighbor index, and coordinate index */
255             jnrA             = jjnr[jidx];
256             jnrB             = jjnr[jidx+1];
257             jnrC             = jjnr[jidx+2];
258             jnrD             = jjnr[jidx+3];
259             jnrE             = jjnr[jidx+4];
260             jnrF             = jjnr[jidx+5];
261             jnrG             = jjnr[jidx+6];
262             jnrH             = jjnr[jidx+7];
263             j_coord_offsetA  = DIM*jnrA;
264             j_coord_offsetB  = DIM*jnrB;
265             j_coord_offsetC  = DIM*jnrC;
266             j_coord_offsetD  = DIM*jnrD;
267             j_coord_offsetE  = DIM*jnrE;
268             j_coord_offsetF  = DIM*jnrF;
269             j_coord_offsetG  = DIM*jnrG;
270             j_coord_offsetH  = DIM*jnrH;
271
272             /* load j atom coordinates */
273             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
274                                                  x+j_coord_offsetC,x+j_coord_offsetD,
275                                                  x+j_coord_offsetE,x+j_coord_offsetF,
276                                                  x+j_coord_offsetG,x+j_coord_offsetH,
277                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
278                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
279
280             /* Calculate displacement vector */
281             dx00             = _mm256_sub_ps(ix0,jx0);
282             dy00             = _mm256_sub_ps(iy0,jy0);
283             dz00             = _mm256_sub_ps(iz0,jz0);
284             dx11             = _mm256_sub_ps(ix1,jx1);
285             dy11             = _mm256_sub_ps(iy1,jy1);
286             dz11             = _mm256_sub_ps(iz1,jz1);
287             dx12             = _mm256_sub_ps(ix1,jx2);
288             dy12             = _mm256_sub_ps(iy1,jy2);
289             dz12             = _mm256_sub_ps(iz1,jz2);
290             dx13             = _mm256_sub_ps(ix1,jx3);
291             dy13             = _mm256_sub_ps(iy1,jy3);
292             dz13             = _mm256_sub_ps(iz1,jz3);
293             dx21             = _mm256_sub_ps(ix2,jx1);
294             dy21             = _mm256_sub_ps(iy2,jy1);
295             dz21             = _mm256_sub_ps(iz2,jz1);
296             dx22             = _mm256_sub_ps(ix2,jx2);
297             dy22             = _mm256_sub_ps(iy2,jy2);
298             dz22             = _mm256_sub_ps(iz2,jz2);
299             dx23             = _mm256_sub_ps(ix2,jx3);
300             dy23             = _mm256_sub_ps(iy2,jy3);
301             dz23             = _mm256_sub_ps(iz2,jz3);
302             dx31             = _mm256_sub_ps(ix3,jx1);
303             dy31             = _mm256_sub_ps(iy3,jy1);
304             dz31             = _mm256_sub_ps(iz3,jz1);
305             dx32             = _mm256_sub_ps(ix3,jx2);
306             dy32             = _mm256_sub_ps(iy3,jy2);
307             dz32             = _mm256_sub_ps(iz3,jz2);
308             dx33             = _mm256_sub_ps(ix3,jx3);
309             dy33             = _mm256_sub_ps(iy3,jy3);
310             dz33             = _mm256_sub_ps(iz3,jz3);
311
312             /* Calculate squared distance and things based on it */
313             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
314             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
315             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
316             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
317             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
318             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
319             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
320             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
321             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
322             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
323
324             rinv00           = avx256_invsqrt_f(rsq00);
325             rinv11           = avx256_invsqrt_f(rsq11);
326             rinv12           = avx256_invsqrt_f(rsq12);
327             rinv13           = avx256_invsqrt_f(rsq13);
328             rinv21           = avx256_invsqrt_f(rsq21);
329             rinv22           = avx256_invsqrt_f(rsq22);
330             rinv23           = avx256_invsqrt_f(rsq23);
331             rinv31           = avx256_invsqrt_f(rsq31);
332             rinv32           = avx256_invsqrt_f(rsq32);
333             rinv33           = avx256_invsqrt_f(rsq33);
334
335             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
336             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
337             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
338             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
339             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
340             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
341             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
342             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
343             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
344             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
345
346             fjx0             = _mm256_setzero_ps();
347             fjy0             = _mm256_setzero_ps();
348             fjz0             = _mm256_setzero_ps();
349             fjx1             = _mm256_setzero_ps();
350             fjy1             = _mm256_setzero_ps();
351             fjz1             = _mm256_setzero_ps();
352             fjx2             = _mm256_setzero_ps();
353             fjy2             = _mm256_setzero_ps();
354             fjz2             = _mm256_setzero_ps();
355             fjx3             = _mm256_setzero_ps();
356             fjy3             = _mm256_setzero_ps();
357             fjz3             = _mm256_setzero_ps();
358
359             /**************************
360              * CALCULATE INTERACTIONS *
361              **************************/
362
363             if (gmx_mm256_any_lt(rsq00,rcutoff2))
364             {
365
366             r00              = _mm256_mul_ps(rsq00,rinv00);
367
368             /* LENNARD-JONES DISPERSION/REPULSION */
369
370             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
371             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
372             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
373             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
374             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
375
376             d                = _mm256_sub_ps(r00,rswitch);
377             d                = _mm256_max_ps(d,_mm256_setzero_ps());
378             d2               = _mm256_mul_ps(d,d);
379             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
380
381             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
382
383             /* Evaluate switch function */
384             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
385             fvdw             = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
386             vvdw             = _mm256_mul_ps(vvdw,sw);
387             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
388
389             /* Update potential sum for this i atom from the interaction with this j atom. */
390             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
391             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
392
393             fscal            = fvdw;
394
395             fscal            = _mm256_and_ps(fscal,cutoff_mask);
396
397             /* Calculate temporary vectorial force */
398             tx               = _mm256_mul_ps(fscal,dx00);
399             ty               = _mm256_mul_ps(fscal,dy00);
400             tz               = _mm256_mul_ps(fscal,dz00);
401
402             /* Update vectorial force */
403             fix0             = _mm256_add_ps(fix0,tx);
404             fiy0             = _mm256_add_ps(fiy0,ty);
405             fiz0             = _mm256_add_ps(fiz0,tz);
406
407             fjx0             = _mm256_add_ps(fjx0,tx);
408             fjy0             = _mm256_add_ps(fjy0,ty);
409             fjz0             = _mm256_add_ps(fjz0,tz);
410
411             }
412
413             /**************************
414              * CALCULATE INTERACTIONS *
415              **************************/
416
417             if (gmx_mm256_any_lt(rsq11,rcutoff2))
418             {
419
420             r11              = _mm256_mul_ps(rsq11,rinv11);
421
422             /* EWALD ELECTROSTATICS */
423             
424             /* Analytical PME correction */
425             zeta2            = _mm256_mul_ps(beta2,rsq11);
426             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
427             pmecorrF         = avx256_pmecorrF_f(zeta2);
428             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
429             felec            = _mm256_mul_ps(qq11,felec);
430             pmecorrV         = avx256_pmecorrV_f(zeta2);
431             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
432             velec            = _mm256_sub_ps(rinv11,pmecorrV);
433             velec            = _mm256_mul_ps(qq11,velec);
434             
435             d                = _mm256_sub_ps(r11,rswitch);
436             d                = _mm256_max_ps(d,_mm256_setzero_ps());
437             d2               = _mm256_mul_ps(d,d);
438             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
439
440             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
441
442             /* Evaluate switch function */
443             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
444             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
445             velec            = _mm256_mul_ps(velec,sw);
446             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
447
448             /* Update potential sum for this i atom from the interaction with this j atom. */
449             velec            = _mm256_and_ps(velec,cutoff_mask);
450             velecsum         = _mm256_add_ps(velecsum,velec);
451
452             fscal            = felec;
453
454             fscal            = _mm256_and_ps(fscal,cutoff_mask);
455
456             /* Calculate temporary vectorial force */
457             tx               = _mm256_mul_ps(fscal,dx11);
458             ty               = _mm256_mul_ps(fscal,dy11);
459             tz               = _mm256_mul_ps(fscal,dz11);
460
461             /* Update vectorial force */
462             fix1             = _mm256_add_ps(fix1,tx);
463             fiy1             = _mm256_add_ps(fiy1,ty);
464             fiz1             = _mm256_add_ps(fiz1,tz);
465
466             fjx1             = _mm256_add_ps(fjx1,tx);
467             fjy1             = _mm256_add_ps(fjy1,ty);
468             fjz1             = _mm256_add_ps(fjz1,tz);
469
470             }
471
472             /**************************
473              * CALCULATE INTERACTIONS *
474              **************************/
475
476             if (gmx_mm256_any_lt(rsq12,rcutoff2))
477             {
478
479             r12              = _mm256_mul_ps(rsq12,rinv12);
480
481             /* EWALD ELECTROSTATICS */
482             
483             /* Analytical PME correction */
484             zeta2            = _mm256_mul_ps(beta2,rsq12);
485             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
486             pmecorrF         = avx256_pmecorrF_f(zeta2);
487             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
488             felec            = _mm256_mul_ps(qq12,felec);
489             pmecorrV         = avx256_pmecorrV_f(zeta2);
490             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
491             velec            = _mm256_sub_ps(rinv12,pmecorrV);
492             velec            = _mm256_mul_ps(qq12,velec);
493             
494             d                = _mm256_sub_ps(r12,rswitch);
495             d                = _mm256_max_ps(d,_mm256_setzero_ps());
496             d2               = _mm256_mul_ps(d,d);
497             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
498
499             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
500
501             /* Evaluate switch function */
502             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
503             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
504             velec            = _mm256_mul_ps(velec,sw);
505             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
506
507             /* Update potential sum for this i atom from the interaction with this j atom. */
508             velec            = _mm256_and_ps(velec,cutoff_mask);
509             velecsum         = _mm256_add_ps(velecsum,velec);
510
511             fscal            = felec;
512
513             fscal            = _mm256_and_ps(fscal,cutoff_mask);
514
515             /* Calculate temporary vectorial force */
516             tx               = _mm256_mul_ps(fscal,dx12);
517             ty               = _mm256_mul_ps(fscal,dy12);
518             tz               = _mm256_mul_ps(fscal,dz12);
519
520             /* Update vectorial force */
521             fix1             = _mm256_add_ps(fix1,tx);
522             fiy1             = _mm256_add_ps(fiy1,ty);
523             fiz1             = _mm256_add_ps(fiz1,tz);
524
525             fjx2             = _mm256_add_ps(fjx2,tx);
526             fjy2             = _mm256_add_ps(fjy2,ty);
527             fjz2             = _mm256_add_ps(fjz2,tz);
528
529             }
530
531             /**************************
532              * CALCULATE INTERACTIONS *
533              **************************/
534
535             if (gmx_mm256_any_lt(rsq13,rcutoff2))
536             {
537
538             r13              = _mm256_mul_ps(rsq13,rinv13);
539
540             /* EWALD ELECTROSTATICS */
541             
542             /* Analytical PME correction */
543             zeta2            = _mm256_mul_ps(beta2,rsq13);
544             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
545             pmecorrF         = avx256_pmecorrF_f(zeta2);
546             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
547             felec            = _mm256_mul_ps(qq13,felec);
548             pmecorrV         = avx256_pmecorrV_f(zeta2);
549             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
550             velec            = _mm256_sub_ps(rinv13,pmecorrV);
551             velec            = _mm256_mul_ps(qq13,velec);
552             
553             d                = _mm256_sub_ps(r13,rswitch);
554             d                = _mm256_max_ps(d,_mm256_setzero_ps());
555             d2               = _mm256_mul_ps(d,d);
556             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
557
558             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
559
560             /* Evaluate switch function */
561             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
562             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv13,_mm256_mul_ps(velec,dsw)) );
563             velec            = _mm256_mul_ps(velec,sw);
564             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
565
566             /* Update potential sum for this i atom from the interaction with this j atom. */
567             velec            = _mm256_and_ps(velec,cutoff_mask);
568             velecsum         = _mm256_add_ps(velecsum,velec);
569
570             fscal            = felec;
571
572             fscal            = _mm256_and_ps(fscal,cutoff_mask);
573
574             /* Calculate temporary vectorial force */
575             tx               = _mm256_mul_ps(fscal,dx13);
576             ty               = _mm256_mul_ps(fscal,dy13);
577             tz               = _mm256_mul_ps(fscal,dz13);
578
579             /* Update vectorial force */
580             fix1             = _mm256_add_ps(fix1,tx);
581             fiy1             = _mm256_add_ps(fiy1,ty);
582             fiz1             = _mm256_add_ps(fiz1,tz);
583
584             fjx3             = _mm256_add_ps(fjx3,tx);
585             fjy3             = _mm256_add_ps(fjy3,ty);
586             fjz3             = _mm256_add_ps(fjz3,tz);
587
588             }
589
590             /**************************
591              * CALCULATE INTERACTIONS *
592              **************************/
593
594             if (gmx_mm256_any_lt(rsq21,rcutoff2))
595             {
596
597             r21              = _mm256_mul_ps(rsq21,rinv21);
598
599             /* EWALD ELECTROSTATICS */
600             
601             /* Analytical PME correction */
602             zeta2            = _mm256_mul_ps(beta2,rsq21);
603             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
604             pmecorrF         = avx256_pmecorrF_f(zeta2);
605             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
606             felec            = _mm256_mul_ps(qq21,felec);
607             pmecorrV         = avx256_pmecorrV_f(zeta2);
608             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
609             velec            = _mm256_sub_ps(rinv21,pmecorrV);
610             velec            = _mm256_mul_ps(qq21,velec);
611             
612             d                = _mm256_sub_ps(r21,rswitch);
613             d                = _mm256_max_ps(d,_mm256_setzero_ps());
614             d2               = _mm256_mul_ps(d,d);
615             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
616
617             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
618
619             /* Evaluate switch function */
620             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
621             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
622             velec            = _mm256_mul_ps(velec,sw);
623             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
624
625             /* Update potential sum for this i atom from the interaction with this j atom. */
626             velec            = _mm256_and_ps(velec,cutoff_mask);
627             velecsum         = _mm256_add_ps(velecsum,velec);
628
629             fscal            = felec;
630
631             fscal            = _mm256_and_ps(fscal,cutoff_mask);
632
633             /* Calculate temporary vectorial force */
634             tx               = _mm256_mul_ps(fscal,dx21);
635             ty               = _mm256_mul_ps(fscal,dy21);
636             tz               = _mm256_mul_ps(fscal,dz21);
637
638             /* Update vectorial force */
639             fix2             = _mm256_add_ps(fix2,tx);
640             fiy2             = _mm256_add_ps(fiy2,ty);
641             fiz2             = _mm256_add_ps(fiz2,tz);
642
643             fjx1             = _mm256_add_ps(fjx1,tx);
644             fjy1             = _mm256_add_ps(fjy1,ty);
645             fjz1             = _mm256_add_ps(fjz1,tz);
646
647             }
648
649             /**************************
650              * CALCULATE INTERACTIONS *
651              **************************/
652
653             if (gmx_mm256_any_lt(rsq22,rcutoff2))
654             {
655
656             r22              = _mm256_mul_ps(rsq22,rinv22);
657
658             /* EWALD ELECTROSTATICS */
659             
660             /* Analytical PME correction */
661             zeta2            = _mm256_mul_ps(beta2,rsq22);
662             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
663             pmecorrF         = avx256_pmecorrF_f(zeta2);
664             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
665             felec            = _mm256_mul_ps(qq22,felec);
666             pmecorrV         = avx256_pmecorrV_f(zeta2);
667             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
668             velec            = _mm256_sub_ps(rinv22,pmecorrV);
669             velec            = _mm256_mul_ps(qq22,velec);
670             
671             d                = _mm256_sub_ps(r22,rswitch);
672             d                = _mm256_max_ps(d,_mm256_setzero_ps());
673             d2               = _mm256_mul_ps(d,d);
674             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
675
676             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
677
678             /* Evaluate switch function */
679             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
680             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
681             velec            = _mm256_mul_ps(velec,sw);
682             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
683
684             /* Update potential sum for this i atom from the interaction with this j atom. */
685             velec            = _mm256_and_ps(velec,cutoff_mask);
686             velecsum         = _mm256_add_ps(velecsum,velec);
687
688             fscal            = felec;
689
690             fscal            = _mm256_and_ps(fscal,cutoff_mask);
691
692             /* Calculate temporary vectorial force */
693             tx               = _mm256_mul_ps(fscal,dx22);
694             ty               = _mm256_mul_ps(fscal,dy22);
695             tz               = _mm256_mul_ps(fscal,dz22);
696
697             /* Update vectorial force */
698             fix2             = _mm256_add_ps(fix2,tx);
699             fiy2             = _mm256_add_ps(fiy2,ty);
700             fiz2             = _mm256_add_ps(fiz2,tz);
701
702             fjx2             = _mm256_add_ps(fjx2,tx);
703             fjy2             = _mm256_add_ps(fjy2,ty);
704             fjz2             = _mm256_add_ps(fjz2,tz);
705
706             }
707
708             /**************************
709              * CALCULATE INTERACTIONS *
710              **************************/
711
712             if (gmx_mm256_any_lt(rsq23,rcutoff2))
713             {
714
715             r23              = _mm256_mul_ps(rsq23,rinv23);
716
717             /* EWALD ELECTROSTATICS */
718             
719             /* Analytical PME correction */
720             zeta2            = _mm256_mul_ps(beta2,rsq23);
721             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
722             pmecorrF         = avx256_pmecorrF_f(zeta2);
723             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
724             felec            = _mm256_mul_ps(qq23,felec);
725             pmecorrV         = avx256_pmecorrV_f(zeta2);
726             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
727             velec            = _mm256_sub_ps(rinv23,pmecorrV);
728             velec            = _mm256_mul_ps(qq23,velec);
729             
730             d                = _mm256_sub_ps(r23,rswitch);
731             d                = _mm256_max_ps(d,_mm256_setzero_ps());
732             d2               = _mm256_mul_ps(d,d);
733             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
734
735             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
736
737             /* Evaluate switch function */
738             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
739             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv23,_mm256_mul_ps(velec,dsw)) );
740             velec            = _mm256_mul_ps(velec,sw);
741             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
742
743             /* Update potential sum for this i atom from the interaction with this j atom. */
744             velec            = _mm256_and_ps(velec,cutoff_mask);
745             velecsum         = _mm256_add_ps(velecsum,velec);
746
747             fscal            = felec;
748
749             fscal            = _mm256_and_ps(fscal,cutoff_mask);
750
751             /* Calculate temporary vectorial force */
752             tx               = _mm256_mul_ps(fscal,dx23);
753             ty               = _mm256_mul_ps(fscal,dy23);
754             tz               = _mm256_mul_ps(fscal,dz23);
755
756             /* Update vectorial force */
757             fix2             = _mm256_add_ps(fix2,tx);
758             fiy2             = _mm256_add_ps(fiy2,ty);
759             fiz2             = _mm256_add_ps(fiz2,tz);
760
761             fjx3             = _mm256_add_ps(fjx3,tx);
762             fjy3             = _mm256_add_ps(fjy3,ty);
763             fjz3             = _mm256_add_ps(fjz3,tz);
764
765             }
766
767             /**************************
768              * CALCULATE INTERACTIONS *
769              **************************/
770
771             if (gmx_mm256_any_lt(rsq31,rcutoff2))
772             {
773
774             r31              = _mm256_mul_ps(rsq31,rinv31);
775
776             /* EWALD ELECTROSTATICS */
777             
778             /* Analytical PME correction */
779             zeta2            = _mm256_mul_ps(beta2,rsq31);
780             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
781             pmecorrF         = avx256_pmecorrF_f(zeta2);
782             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
783             felec            = _mm256_mul_ps(qq31,felec);
784             pmecorrV         = avx256_pmecorrV_f(zeta2);
785             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
786             velec            = _mm256_sub_ps(rinv31,pmecorrV);
787             velec            = _mm256_mul_ps(qq31,velec);
788             
789             d                = _mm256_sub_ps(r31,rswitch);
790             d                = _mm256_max_ps(d,_mm256_setzero_ps());
791             d2               = _mm256_mul_ps(d,d);
792             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
793
794             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
795
796             /* Evaluate switch function */
797             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
798             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv31,_mm256_mul_ps(velec,dsw)) );
799             velec            = _mm256_mul_ps(velec,sw);
800             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
801
802             /* Update potential sum for this i atom from the interaction with this j atom. */
803             velec            = _mm256_and_ps(velec,cutoff_mask);
804             velecsum         = _mm256_add_ps(velecsum,velec);
805
806             fscal            = felec;
807
808             fscal            = _mm256_and_ps(fscal,cutoff_mask);
809
810             /* Calculate temporary vectorial force */
811             tx               = _mm256_mul_ps(fscal,dx31);
812             ty               = _mm256_mul_ps(fscal,dy31);
813             tz               = _mm256_mul_ps(fscal,dz31);
814
815             /* Update vectorial force */
816             fix3             = _mm256_add_ps(fix3,tx);
817             fiy3             = _mm256_add_ps(fiy3,ty);
818             fiz3             = _mm256_add_ps(fiz3,tz);
819
820             fjx1             = _mm256_add_ps(fjx1,tx);
821             fjy1             = _mm256_add_ps(fjy1,ty);
822             fjz1             = _mm256_add_ps(fjz1,tz);
823
824             }
825
826             /**************************
827              * CALCULATE INTERACTIONS *
828              **************************/
829
830             if (gmx_mm256_any_lt(rsq32,rcutoff2))
831             {
832
833             r32              = _mm256_mul_ps(rsq32,rinv32);
834
835             /* EWALD ELECTROSTATICS */
836             
837             /* Analytical PME correction */
838             zeta2            = _mm256_mul_ps(beta2,rsq32);
839             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
840             pmecorrF         = avx256_pmecorrF_f(zeta2);
841             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
842             felec            = _mm256_mul_ps(qq32,felec);
843             pmecorrV         = avx256_pmecorrV_f(zeta2);
844             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
845             velec            = _mm256_sub_ps(rinv32,pmecorrV);
846             velec            = _mm256_mul_ps(qq32,velec);
847             
848             d                = _mm256_sub_ps(r32,rswitch);
849             d                = _mm256_max_ps(d,_mm256_setzero_ps());
850             d2               = _mm256_mul_ps(d,d);
851             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
852
853             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
854
855             /* Evaluate switch function */
856             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
857             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv32,_mm256_mul_ps(velec,dsw)) );
858             velec            = _mm256_mul_ps(velec,sw);
859             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
860
861             /* Update potential sum for this i atom from the interaction with this j atom. */
862             velec            = _mm256_and_ps(velec,cutoff_mask);
863             velecsum         = _mm256_add_ps(velecsum,velec);
864
865             fscal            = felec;
866
867             fscal            = _mm256_and_ps(fscal,cutoff_mask);
868
869             /* Calculate temporary vectorial force */
870             tx               = _mm256_mul_ps(fscal,dx32);
871             ty               = _mm256_mul_ps(fscal,dy32);
872             tz               = _mm256_mul_ps(fscal,dz32);
873
874             /* Update vectorial force */
875             fix3             = _mm256_add_ps(fix3,tx);
876             fiy3             = _mm256_add_ps(fiy3,ty);
877             fiz3             = _mm256_add_ps(fiz3,tz);
878
879             fjx2             = _mm256_add_ps(fjx2,tx);
880             fjy2             = _mm256_add_ps(fjy2,ty);
881             fjz2             = _mm256_add_ps(fjz2,tz);
882
883             }
884
885             /**************************
886              * CALCULATE INTERACTIONS *
887              **************************/
888
889             if (gmx_mm256_any_lt(rsq33,rcutoff2))
890             {
891
892             r33              = _mm256_mul_ps(rsq33,rinv33);
893
894             /* EWALD ELECTROSTATICS */
895             
896             /* Analytical PME correction */
897             zeta2            = _mm256_mul_ps(beta2,rsq33);
898             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
899             pmecorrF         = avx256_pmecorrF_f(zeta2);
900             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
901             felec            = _mm256_mul_ps(qq33,felec);
902             pmecorrV         = avx256_pmecorrV_f(zeta2);
903             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
904             velec            = _mm256_sub_ps(rinv33,pmecorrV);
905             velec            = _mm256_mul_ps(qq33,velec);
906             
907             d                = _mm256_sub_ps(r33,rswitch);
908             d                = _mm256_max_ps(d,_mm256_setzero_ps());
909             d2               = _mm256_mul_ps(d,d);
910             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
911
912             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
913
914             /* Evaluate switch function */
915             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
916             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv33,_mm256_mul_ps(velec,dsw)) );
917             velec            = _mm256_mul_ps(velec,sw);
918             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
919
920             /* Update potential sum for this i atom from the interaction with this j atom. */
921             velec            = _mm256_and_ps(velec,cutoff_mask);
922             velecsum         = _mm256_add_ps(velecsum,velec);
923
924             fscal            = felec;
925
926             fscal            = _mm256_and_ps(fscal,cutoff_mask);
927
928             /* Calculate temporary vectorial force */
929             tx               = _mm256_mul_ps(fscal,dx33);
930             ty               = _mm256_mul_ps(fscal,dy33);
931             tz               = _mm256_mul_ps(fscal,dz33);
932
933             /* Update vectorial force */
934             fix3             = _mm256_add_ps(fix3,tx);
935             fiy3             = _mm256_add_ps(fiy3,ty);
936             fiz3             = _mm256_add_ps(fiz3,tz);
937
938             fjx3             = _mm256_add_ps(fjx3,tx);
939             fjy3             = _mm256_add_ps(fjy3,ty);
940             fjz3             = _mm256_add_ps(fjz3,tz);
941
942             }
943
944             fjptrA             = f+j_coord_offsetA;
945             fjptrB             = f+j_coord_offsetB;
946             fjptrC             = f+j_coord_offsetC;
947             fjptrD             = f+j_coord_offsetD;
948             fjptrE             = f+j_coord_offsetE;
949             fjptrF             = f+j_coord_offsetF;
950             fjptrG             = f+j_coord_offsetG;
951             fjptrH             = f+j_coord_offsetH;
952
953             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
954                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
955                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
956
957             /* Inner loop uses 1034 flops */
958         }
959
960         if(jidx<j_index_end)
961         {
962
963             /* Get j neighbor index, and coordinate index */
964             jnrlistA         = jjnr[jidx];
965             jnrlistB         = jjnr[jidx+1];
966             jnrlistC         = jjnr[jidx+2];
967             jnrlistD         = jjnr[jidx+3];
968             jnrlistE         = jjnr[jidx+4];
969             jnrlistF         = jjnr[jidx+5];
970             jnrlistG         = jjnr[jidx+6];
971             jnrlistH         = jjnr[jidx+7];
972             /* Sign of each element will be negative for non-real atoms.
973              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
974              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
975              */
976             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
977                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
978                                             
979             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
980             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
981             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
982             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
983             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
984             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
985             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
986             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
987             j_coord_offsetA  = DIM*jnrA;
988             j_coord_offsetB  = DIM*jnrB;
989             j_coord_offsetC  = DIM*jnrC;
990             j_coord_offsetD  = DIM*jnrD;
991             j_coord_offsetE  = DIM*jnrE;
992             j_coord_offsetF  = DIM*jnrF;
993             j_coord_offsetG  = DIM*jnrG;
994             j_coord_offsetH  = DIM*jnrH;
995
996             /* load j atom coordinates */
997             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
998                                                  x+j_coord_offsetC,x+j_coord_offsetD,
999                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1000                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1001                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1002                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
1003
1004             /* Calculate displacement vector */
1005             dx00             = _mm256_sub_ps(ix0,jx0);
1006             dy00             = _mm256_sub_ps(iy0,jy0);
1007             dz00             = _mm256_sub_ps(iz0,jz0);
1008             dx11             = _mm256_sub_ps(ix1,jx1);
1009             dy11             = _mm256_sub_ps(iy1,jy1);
1010             dz11             = _mm256_sub_ps(iz1,jz1);
1011             dx12             = _mm256_sub_ps(ix1,jx2);
1012             dy12             = _mm256_sub_ps(iy1,jy2);
1013             dz12             = _mm256_sub_ps(iz1,jz2);
1014             dx13             = _mm256_sub_ps(ix1,jx3);
1015             dy13             = _mm256_sub_ps(iy1,jy3);
1016             dz13             = _mm256_sub_ps(iz1,jz3);
1017             dx21             = _mm256_sub_ps(ix2,jx1);
1018             dy21             = _mm256_sub_ps(iy2,jy1);
1019             dz21             = _mm256_sub_ps(iz2,jz1);
1020             dx22             = _mm256_sub_ps(ix2,jx2);
1021             dy22             = _mm256_sub_ps(iy2,jy2);
1022             dz22             = _mm256_sub_ps(iz2,jz2);
1023             dx23             = _mm256_sub_ps(ix2,jx3);
1024             dy23             = _mm256_sub_ps(iy2,jy3);
1025             dz23             = _mm256_sub_ps(iz2,jz3);
1026             dx31             = _mm256_sub_ps(ix3,jx1);
1027             dy31             = _mm256_sub_ps(iy3,jy1);
1028             dz31             = _mm256_sub_ps(iz3,jz1);
1029             dx32             = _mm256_sub_ps(ix3,jx2);
1030             dy32             = _mm256_sub_ps(iy3,jy2);
1031             dz32             = _mm256_sub_ps(iz3,jz2);
1032             dx33             = _mm256_sub_ps(ix3,jx3);
1033             dy33             = _mm256_sub_ps(iy3,jy3);
1034             dz33             = _mm256_sub_ps(iz3,jz3);
1035
1036             /* Calculate squared distance and things based on it */
1037             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1038             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1039             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1040             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
1041             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1042             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1043             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
1044             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
1045             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
1046             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
1047
1048             rinv00           = avx256_invsqrt_f(rsq00);
1049             rinv11           = avx256_invsqrt_f(rsq11);
1050             rinv12           = avx256_invsqrt_f(rsq12);
1051             rinv13           = avx256_invsqrt_f(rsq13);
1052             rinv21           = avx256_invsqrt_f(rsq21);
1053             rinv22           = avx256_invsqrt_f(rsq22);
1054             rinv23           = avx256_invsqrt_f(rsq23);
1055             rinv31           = avx256_invsqrt_f(rsq31);
1056             rinv32           = avx256_invsqrt_f(rsq32);
1057             rinv33           = avx256_invsqrt_f(rsq33);
1058
1059             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1060             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1061             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1062             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
1063             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1064             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1065             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
1066             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
1067             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
1068             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
1069
1070             fjx0             = _mm256_setzero_ps();
1071             fjy0             = _mm256_setzero_ps();
1072             fjz0             = _mm256_setzero_ps();
1073             fjx1             = _mm256_setzero_ps();
1074             fjy1             = _mm256_setzero_ps();
1075             fjz1             = _mm256_setzero_ps();
1076             fjx2             = _mm256_setzero_ps();
1077             fjy2             = _mm256_setzero_ps();
1078             fjz2             = _mm256_setzero_ps();
1079             fjx3             = _mm256_setzero_ps();
1080             fjy3             = _mm256_setzero_ps();
1081             fjz3             = _mm256_setzero_ps();
1082
1083             /**************************
1084              * CALCULATE INTERACTIONS *
1085              **************************/
1086
1087             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1088             {
1089
1090             r00              = _mm256_mul_ps(rsq00,rinv00);
1091             r00              = _mm256_andnot_ps(dummy_mask,r00);
1092
1093             /* LENNARD-JONES DISPERSION/REPULSION */
1094
1095             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1096             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
1097             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
1098             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
1099             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
1100
1101             d                = _mm256_sub_ps(r00,rswitch);
1102             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1103             d2               = _mm256_mul_ps(d,d);
1104             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1105
1106             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1107
1108             /* Evaluate switch function */
1109             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1110             fvdw             = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
1111             vvdw             = _mm256_mul_ps(vvdw,sw);
1112             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1113
1114             /* Update potential sum for this i atom from the interaction with this j atom. */
1115             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
1116             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
1117             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
1118
1119             fscal            = fvdw;
1120
1121             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1122
1123             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1124
1125             /* Calculate temporary vectorial force */
1126             tx               = _mm256_mul_ps(fscal,dx00);
1127             ty               = _mm256_mul_ps(fscal,dy00);
1128             tz               = _mm256_mul_ps(fscal,dz00);
1129
1130             /* Update vectorial force */
1131             fix0             = _mm256_add_ps(fix0,tx);
1132             fiy0             = _mm256_add_ps(fiy0,ty);
1133             fiz0             = _mm256_add_ps(fiz0,tz);
1134
1135             fjx0             = _mm256_add_ps(fjx0,tx);
1136             fjy0             = _mm256_add_ps(fjy0,ty);
1137             fjz0             = _mm256_add_ps(fjz0,tz);
1138
1139             }
1140
1141             /**************************
1142              * CALCULATE INTERACTIONS *
1143              **************************/
1144
1145             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1146             {
1147
1148             r11              = _mm256_mul_ps(rsq11,rinv11);
1149             r11              = _mm256_andnot_ps(dummy_mask,r11);
1150
1151             /* EWALD ELECTROSTATICS */
1152             
1153             /* Analytical PME correction */
1154             zeta2            = _mm256_mul_ps(beta2,rsq11);
1155             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1156             pmecorrF         = avx256_pmecorrF_f(zeta2);
1157             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1158             felec            = _mm256_mul_ps(qq11,felec);
1159             pmecorrV         = avx256_pmecorrV_f(zeta2);
1160             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1161             velec            = _mm256_sub_ps(rinv11,pmecorrV);
1162             velec            = _mm256_mul_ps(qq11,velec);
1163             
1164             d                = _mm256_sub_ps(r11,rswitch);
1165             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1166             d2               = _mm256_mul_ps(d,d);
1167             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1168
1169             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1170
1171             /* Evaluate switch function */
1172             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1173             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
1174             velec            = _mm256_mul_ps(velec,sw);
1175             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
1176
1177             /* Update potential sum for this i atom from the interaction with this j atom. */
1178             velec            = _mm256_and_ps(velec,cutoff_mask);
1179             velec            = _mm256_andnot_ps(dummy_mask,velec);
1180             velecsum         = _mm256_add_ps(velecsum,velec);
1181
1182             fscal            = felec;
1183
1184             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1185
1186             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1187
1188             /* Calculate temporary vectorial force */
1189             tx               = _mm256_mul_ps(fscal,dx11);
1190             ty               = _mm256_mul_ps(fscal,dy11);
1191             tz               = _mm256_mul_ps(fscal,dz11);
1192
1193             /* Update vectorial force */
1194             fix1             = _mm256_add_ps(fix1,tx);
1195             fiy1             = _mm256_add_ps(fiy1,ty);
1196             fiz1             = _mm256_add_ps(fiz1,tz);
1197
1198             fjx1             = _mm256_add_ps(fjx1,tx);
1199             fjy1             = _mm256_add_ps(fjy1,ty);
1200             fjz1             = _mm256_add_ps(fjz1,tz);
1201
1202             }
1203
1204             /**************************
1205              * CALCULATE INTERACTIONS *
1206              **************************/
1207
1208             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1209             {
1210
1211             r12              = _mm256_mul_ps(rsq12,rinv12);
1212             r12              = _mm256_andnot_ps(dummy_mask,r12);
1213
1214             /* EWALD ELECTROSTATICS */
1215             
1216             /* Analytical PME correction */
1217             zeta2            = _mm256_mul_ps(beta2,rsq12);
1218             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1219             pmecorrF         = avx256_pmecorrF_f(zeta2);
1220             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1221             felec            = _mm256_mul_ps(qq12,felec);
1222             pmecorrV         = avx256_pmecorrV_f(zeta2);
1223             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1224             velec            = _mm256_sub_ps(rinv12,pmecorrV);
1225             velec            = _mm256_mul_ps(qq12,velec);
1226             
1227             d                = _mm256_sub_ps(r12,rswitch);
1228             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1229             d2               = _mm256_mul_ps(d,d);
1230             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1231
1232             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1233
1234             /* Evaluate switch function */
1235             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1236             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
1237             velec            = _mm256_mul_ps(velec,sw);
1238             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
1239
1240             /* Update potential sum for this i atom from the interaction with this j atom. */
1241             velec            = _mm256_and_ps(velec,cutoff_mask);
1242             velec            = _mm256_andnot_ps(dummy_mask,velec);
1243             velecsum         = _mm256_add_ps(velecsum,velec);
1244
1245             fscal            = felec;
1246
1247             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1248
1249             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1250
1251             /* Calculate temporary vectorial force */
1252             tx               = _mm256_mul_ps(fscal,dx12);
1253             ty               = _mm256_mul_ps(fscal,dy12);
1254             tz               = _mm256_mul_ps(fscal,dz12);
1255
1256             /* Update vectorial force */
1257             fix1             = _mm256_add_ps(fix1,tx);
1258             fiy1             = _mm256_add_ps(fiy1,ty);
1259             fiz1             = _mm256_add_ps(fiz1,tz);
1260
1261             fjx2             = _mm256_add_ps(fjx2,tx);
1262             fjy2             = _mm256_add_ps(fjy2,ty);
1263             fjz2             = _mm256_add_ps(fjz2,tz);
1264
1265             }
1266
1267             /**************************
1268              * CALCULATE INTERACTIONS *
1269              **************************/
1270
1271             if (gmx_mm256_any_lt(rsq13,rcutoff2))
1272             {
1273
1274             r13              = _mm256_mul_ps(rsq13,rinv13);
1275             r13              = _mm256_andnot_ps(dummy_mask,r13);
1276
1277             /* EWALD ELECTROSTATICS */
1278             
1279             /* Analytical PME correction */
1280             zeta2            = _mm256_mul_ps(beta2,rsq13);
1281             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
1282             pmecorrF         = avx256_pmecorrF_f(zeta2);
1283             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1284             felec            = _mm256_mul_ps(qq13,felec);
1285             pmecorrV         = avx256_pmecorrV_f(zeta2);
1286             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1287             velec            = _mm256_sub_ps(rinv13,pmecorrV);
1288             velec            = _mm256_mul_ps(qq13,velec);
1289             
1290             d                = _mm256_sub_ps(r13,rswitch);
1291             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1292             d2               = _mm256_mul_ps(d,d);
1293             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1294
1295             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1296
1297             /* Evaluate switch function */
1298             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1299             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv13,_mm256_mul_ps(velec,dsw)) );
1300             velec            = _mm256_mul_ps(velec,sw);
1301             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
1302
1303             /* Update potential sum for this i atom from the interaction with this j atom. */
1304             velec            = _mm256_and_ps(velec,cutoff_mask);
1305             velec            = _mm256_andnot_ps(dummy_mask,velec);
1306             velecsum         = _mm256_add_ps(velecsum,velec);
1307
1308             fscal            = felec;
1309
1310             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1311
1312             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1313
1314             /* Calculate temporary vectorial force */
1315             tx               = _mm256_mul_ps(fscal,dx13);
1316             ty               = _mm256_mul_ps(fscal,dy13);
1317             tz               = _mm256_mul_ps(fscal,dz13);
1318
1319             /* Update vectorial force */
1320             fix1             = _mm256_add_ps(fix1,tx);
1321             fiy1             = _mm256_add_ps(fiy1,ty);
1322             fiz1             = _mm256_add_ps(fiz1,tz);
1323
1324             fjx3             = _mm256_add_ps(fjx3,tx);
1325             fjy3             = _mm256_add_ps(fjy3,ty);
1326             fjz3             = _mm256_add_ps(fjz3,tz);
1327
1328             }
1329
1330             /**************************
1331              * CALCULATE INTERACTIONS *
1332              **************************/
1333
1334             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1335             {
1336
1337             r21              = _mm256_mul_ps(rsq21,rinv21);
1338             r21              = _mm256_andnot_ps(dummy_mask,r21);
1339
1340             /* EWALD ELECTROSTATICS */
1341             
1342             /* Analytical PME correction */
1343             zeta2            = _mm256_mul_ps(beta2,rsq21);
1344             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1345             pmecorrF         = avx256_pmecorrF_f(zeta2);
1346             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1347             felec            = _mm256_mul_ps(qq21,felec);
1348             pmecorrV         = avx256_pmecorrV_f(zeta2);
1349             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1350             velec            = _mm256_sub_ps(rinv21,pmecorrV);
1351             velec            = _mm256_mul_ps(qq21,velec);
1352             
1353             d                = _mm256_sub_ps(r21,rswitch);
1354             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1355             d2               = _mm256_mul_ps(d,d);
1356             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1357
1358             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1359
1360             /* Evaluate switch function */
1361             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1362             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
1363             velec            = _mm256_mul_ps(velec,sw);
1364             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
1365
1366             /* Update potential sum for this i atom from the interaction with this j atom. */
1367             velec            = _mm256_and_ps(velec,cutoff_mask);
1368             velec            = _mm256_andnot_ps(dummy_mask,velec);
1369             velecsum         = _mm256_add_ps(velecsum,velec);
1370
1371             fscal            = felec;
1372
1373             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1374
1375             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1376
1377             /* Calculate temporary vectorial force */
1378             tx               = _mm256_mul_ps(fscal,dx21);
1379             ty               = _mm256_mul_ps(fscal,dy21);
1380             tz               = _mm256_mul_ps(fscal,dz21);
1381
1382             /* Update vectorial force */
1383             fix2             = _mm256_add_ps(fix2,tx);
1384             fiy2             = _mm256_add_ps(fiy2,ty);
1385             fiz2             = _mm256_add_ps(fiz2,tz);
1386
1387             fjx1             = _mm256_add_ps(fjx1,tx);
1388             fjy1             = _mm256_add_ps(fjy1,ty);
1389             fjz1             = _mm256_add_ps(fjz1,tz);
1390
1391             }
1392
1393             /**************************
1394              * CALCULATE INTERACTIONS *
1395              **************************/
1396
1397             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1398             {
1399
1400             r22              = _mm256_mul_ps(rsq22,rinv22);
1401             r22              = _mm256_andnot_ps(dummy_mask,r22);
1402
1403             /* EWALD ELECTROSTATICS */
1404             
1405             /* Analytical PME correction */
1406             zeta2            = _mm256_mul_ps(beta2,rsq22);
1407             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1408             pmecorrF         = avx256_pmecorrF_f(zeta2);
1409             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1410             felec            = _mm256_mul_ps(qq22,felec);
1411             pmecorrV         = avx256_pmecorrV_f(zeta2);
1412             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1413             velec            = _mm256_sub_ps(rinv22,pmecorrV);
1414             velec            = _mm256_mul_ps(qq22,velec);
1415             
1416             d                = _mm256_sub_ps(r22,rswitch);
1417             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1418             d2               = _mm256_mul_ps(d,d);
1419             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1420
1421             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1422
1423             /* Evaluate switch function */
1424             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1425             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
1426             velec            = _mm256_mul_ps(velec,sw);
1427             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
1428
1429             /* Update potential sum for this i atom from the interaction with this j atom. */
1430             velec            = _mm256_and_ps(velec,cutoff_mask);
1431             velec            = _mm256_andnot_ps(dummy_mask,velec);
1432             velecsum         = _mm256_add_ps(velecsum,velec);
1433
1434             fscal            = felec;
1435
1436             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1437
1438             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1439
1440             /* Calculate temporary vectorial force */
1441             tx               = _mm256_mul_ps(fscal,dx22);
1442             ty               = _mm256_mul_ps(fscal,dy22);
1443             tz               = _mm256_mul_ps(fscal,dz22);
1444
1445             /* Update vectorial force */
1446             fix2             = _mm256_add_ps(fix2,tx);
1447             fiy2             = _mm256_add_ps(fiy2,ty);
1448             fiz2             = _mm256_add_ps(fiz2,tz);
1449
1450             fjx2             = _mm256_add_ps(fjx2,tx);
1451             fjy2             = _mm256_add_ps(fjy2,ty);
1452             fjz2             = _mm256_add_ps(fjz2,tz);
1453
1454             }
1455
1456             /**************************
1457              * CALCULATE INTERACTIONS *
1458              **************************/
1459
1460             if (gmx_mm256_any_lt(rsq23,rcutoff2))
1461             {
1462
1463             r23              = _mm256_mul_ps(rsq23,rinv23);
1464             r23              = _mm256_andnot_ps(dummy_mask,r23);
1465
1466             /* EWALD ELECTROSTATICS */
1467             
1468             /* Analytical PME correction */
1469             zeta2            = _mm256_mul_ps(beta2,rsq23);
1470             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
1471             pmecorrF         = avx256_pmecorrF_f(zeta2);
1472             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1473             felec            = _mm256_mul_ps(qq23,felec);
1474             pmecorrV         = avx256_pmecorrV_f(zeta2);
1475             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1476             velec            = _mm256_sub_ps(rinv23,pmecorrV);
1477             velec            = _mm256_mul_ps(qq23,velec);
1478             
1479             d                = _mm256_sub_ps(r23,rswitch);
1480             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1481             d2               = _mm256_mul_ps(d,d);
1482             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1483
1484             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1485
1486             /* Evaluate switch function */
1487             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1488             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv23,_mm256_mul_ps(velec,dsw)) );
1489             velec            = _mm256_mul_ps(velec,sw);
1490             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
1491
1492             /* Update potential sum for this i atom from the interaction with this j atom. */
1493             velec            = _mm256_and_ps(velec,cutoff_mask);
1494             velec            = _mm256_andnot_ps(dummy_mask,velec);
1495             velecsum         = _mm256_add_ps(velecsum,velec);
1496
1497             fscal            = felec;
1498
1499             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1500
1501             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1502
1503             /* Calculate temporary vectorial force */
1504             tx               = _mm256_mul_ps(fscal,dx23);
1505             ty               = _mm256_mul_ps(fscal,dy23);
1506             tz               = _mm256_mul_ps(fscal,dz23);
1507
1508             /* Update vectorial force */
1509             fix2             = _mm256_add_ps(fix2,tx);
1510             fiy2             = _mm256_add_ps(fiy2,ty);
1511             fiz2             = _mm256_add_ps(fiz2,tz);
1512
1513             fjx3             = _mm256_add_ps(fjx3,tx);
1514             fjy3             = _mm256_add_ps(fjy3,ty);
1515             fjz3             = _mm256_add_ps(fjz3,tz);
1516
1517             }
1518
1519             /**************************
1520              * CALCULATE INTERACTIONS *
1521              **************************/
1522
1523             if (gmx_mm256_any_lt(rsq31,rcutoff2))
1524             {
1525
1526             r31              = _mm256_mul_ps(rsq31,rinv31);
1527             r31              = _mm256_andnot_ps(dummy_mask,r31);
1528
1529             /* EWALD ELECTROSTATICS */
1530             
1531             /* Analytical PME correction */
1532             zeta2            = _mm256_mul_ps(beta2,rsq31);
1533             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
1534             pmecorrF         = avx256_pmecorrF_f(zeta2);
1535             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1536             felec            = _mm256_mul_ps(qq31,felec);
1537             pmecorrV         = avx256_pmecorrV_f(zeta2);
1538             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1539             velec            = _mm256_sub_ps(rinv31,pmecorrV);
1540             velec            = _mm256_mul_ps(qq31,velec);
1541             
1542             d                = _mm256_sub_ps(r31,rswitch);
1543             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1544             d2               = _mm256_mul_ps(d,d);
1545             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1546
1547             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1548
1549             /* Evaluate switch function */
1550             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1551             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv31,_mm256_mul_ps(velec,dsw)) );
1552             velec            = _mm256_mul_ps(velec,sw);
1553             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
1554
1555             /* Update potential sum for this i atom from the interaction with this j atom. */
1556             velec            = _mm256_and_ps(velec,cutoff_mask);
1557             velec            = _mm256_andnot_ps(dummy_mask,velec);
1558             velecsum         = _mm256_add_ps(velecsum,velec);
1559
1560             fscal            = felec;
1561
1562             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1563
1564             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1565
1566             /* Calculate temporary vectorial force */
1567             tx               = _mm256_mul_ps(fscal,dx31);
1568             ty               = _mm256_mul_ps(fscal,dy31);
1569             tz               = _mm256_mul_ps(fscal,dz31);
1570
1571             /* Update vectorial force */
1572             fix3             = _mm256_add_ps(fix3,tx);
1573             fiy3             = _mm256_add_ps(fiy3,ty);
1574             fiz3             = _mm256_add_ps(fiz3,tz);
1575
1576             fjx1             = _mm256_add_ps(fjx1,tx);
1577             fjy1             = _mm256_add_ps(fjy1,ty);
1578             fjz1             = _mm256_add_ps(fjz1,tz);
1579
1580             }
1581
1582             /**************************
1583              * CALCULATE INTERACTIONS *
1584              **************************/
1585
1586             if (gmx_mm256_any_lt(rsq32,rcutoff2))
1587             {
1588
1589             r32              = _mm256_mul_ps(rsq32,rinv32);
1590             r32              = _mm256_andnot_ps(dummy_mask,r32);
1591
1592             /* EWALD ELECTROSTATICS */
1593             
1594             /* Analytical PME correction */
1595             zeta2            = _mm256_mul_ps(beta2,rsq32);
1596             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
1597             pmecorrF         = avx256_pmecorrF_f(zeta2);
1598             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1599             felec            = _mm256_mul_ps(qq32,felec);
1600             pmecorrV         = avx256_pmecorrV_f(zeta2);
1601             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1602             velec            = _mm256_sub_ps(rinv32,pmecorrV);
1603             velec            = _mm256_mul_ps(qq32,velec);
1604             
1605             d                = _mm256_sub_ps(r32,rswitch);
1606             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1607             d2               = _mm256_mul_ps(d,d);
1608             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1609
1610             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1611
1612             /* Evaluate switch function */
1613             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1614             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv32,_mm256_mul_ps(velec,dsw)) );
1615             velec            = _mm256_mul_ps(velec,sw);
1616             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
1617
1618             /* Update potential sum for this i atom from the interaction with this j atom. */
1619             velec            = _mm256_and_ps(velec,cutoff_mask);
1620             velec            = _mm256_andnot_ps(dummy_mask,velec);
1621             velecsum         = _mm256_add_ps(velecsum,velec);
1622
1623             fscal            = felec;
1624
1625             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1626
1627             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1628
1629             /* Calculate temporary vectorial force */
1630             tx               = _mm256_mul_ps(fscal,dx32);
1631             ty               = _mm256_mul_ps(fscal,dy32);
1632             tz               = _mm256_mul_ps(fscal,dz32);
1633
1634             /* Update vectorial force */
1635             fix3             = _mm256_add_ps(fix3,tx);
1636             fiy3             = _mm256_add_ps(fiy3,ty);
1637             fiz3             = _mm256_add_ps(fiz3,tz);
1638
1639             fjx2             = _mm256_add_ps(fjx2,tx);
1640             fjy2             = _mm256_add_ps(fjy2,ty);
1641             fjz2             = _mm256_add_ps(fjz2,tz);
1642
1643             }
1644
1645             /**************************
1646              * CALCULATE INTERACTIONS *
1647              **************************/
1648
1649             if (gmx_mm256_any_lt(rsq33,rcutoff2))
1650             {
1651
1652             r33              = _mm256_mul_ps(rsq33,rinv33);
1653             r33              = _mm256_andnot_ps(dummy_mask,r33);
1654
1655             /* EWALD ELECTROSTATICS */
1656             
1657             /* Analytical PME correction */
1658             zeta2            = _mm256_mul_ps(beta2,rsq33);
1659             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
1660             pmecorrF         = avx256_pmecorrF_f(zeta2);
1661             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1662             felec            = _mm256_mul_ps(qq33,felec);
1663             pmecorrV         = avx256_pmecorrV_f(zeta2);
1664             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1665             velec            = _mm256_sub_ps(rinv33,pmecorrV);
1666             velec            = _mm256_mul_ps(qq33,velec);
1667             
1668             d                = _mm256_sub_ps(r33,rswitch);
1669             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1670             d2               = _mm256_mul_ps(d,d);
1671             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1672
1673             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1674
1675             /* Evaluate switch function */
1676             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1677             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv33,_mm256_mul_ps(velec,dsw)) );
1678             velec            = _mm256_mul_ps(velec,sw);
1679             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
1680
1681             /* Update potential sum for this i atom from the interaction with this j atom. */
1682             velec            = _mm256_and_ps(velec,cutoff_mask);
1683             velec            = _mm256_andnot_ps(dummy_mask,velec);
1684             velecsum         = _mm256_add_ps(velecsum,velec);
1685
1686             fscal            = felec;
1687
1688             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1689
1690             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1691
1692             /* Calculate temporary vectorial force */
1693             tx               = _mm256_mul_ps(fscal,dx33);
1694             ty               = _mm256_mul_ps(fscal,dy33);
1695             tz               = _mm256_mul_ps(fscal,dz33);
1696
1697             /* Update vectorial force */
1698             fix3             = _mm256_add_ps(fix3,tx);
1699             fiy3             = _mm256_add_ps(fiy3,ty);
1700             fiz3             = _mm256_add_ps(fiz3,tz);
1701
1702             fjx3             = _mm256_add_ps(fjx3,tx);
1703             fjy3             = _mm256_add_ps(fjy3,ty);
1704             fjz3             = _mm256_add_ps(fjz3,tz);
1705
1706             }
1707
1708             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1709             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1710             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1711             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1712             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1713             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1714             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1715             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1716
1717             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1718                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
1719                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1720
1721             /* Inner loop uses 1044 flops */
1722         }
1723
1724         /* End of innermost loop */
1725
1726         gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1727                                                  f+i_coord_offset,fshift+i_shift_offset);
1728
1729         ggid                        = gid[iidx];
1730         /* Update potential energies */
1731         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1732         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1733
1734         /* Increment number of inner iterations */
1735         inneriter                  += j_index_end - j_index_start;
1736
1737         /* Outer loop uses 26 flops */
1738     }
1739
1740     /* Increment number of outer iterations */
1741     outeriter        += nri;
1742
1743     /* Update outer/inner flops */
1744
1745     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*1044);
1746 }
1747 /*
1748  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_F_avx_256_single
1749  * Electrostatics interaction: Ewald
1750  * VdW interaction:            LennardJones
1751  * Geometry:                   Water4-Water4
1752  * Calculate force/pot:        Force
1753  */
1754 void
1755 nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_F_avx_256_single
1756                     (t_nblist                    * gmx_restrict       nlist,
1757                      rvec                        * gmx_restrict          xx,
1758                      rvec                        * gmx_restrict          ff,
1759                      struct t_forcerec           * gmx_restrict          fr,
1760                      t_mdatoms                   * gmx_restrict     mdatoms,
1761                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1762                      t_nrnb                      * gmx_restrict        nrnb)
1763 {
1764     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1765      * just 0 for non-waters.
1766      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1767      * jnr indices corresponding to data put in the four positions in the SIMD register.
1768      */
1769     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1770     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1771     int              jnrA,jnrB,jnrC,jnrD;
1772     int              jnrE,jnrF,jnrG,jnrH;
1773     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1774     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1775     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1776     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1777     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1778     real             rcutoff_scalar;
1779     real             *shiftvec,*fshift,*x,*f;
1780     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1781     real             scratch[4*DIM];
1782     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1783     real *           vdwioffsetptr0;
1784     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1785     real *           vdwioffsetptr1;
1786     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1787     real *           vdwioffsetptr2;
1788     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1789     real *           vdwioffsetptr3;
1790     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1791     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1792     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1793     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1794     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1795     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1796     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1797     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D,vdwjidx3E,vdwjidx3F,vdwjidx3G,vdwjidx3H;
1798     __m256           jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
1799     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1800     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1801     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1802     __m256           dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
1803     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1804     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1805     __m256           dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
1806     __m256           dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
1807     __m256           dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
1808     __m256           dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
1809     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
1810     real             *charge;
1811     int              nvdwtype;
1812     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1813     int              *vdwtype;
1814     real             *vdwparam;
1815     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
1816     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
1817     __m256i          ewitab;
1818     __m128i          ewitab_lo,ewitab_hi;
1819     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1820     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1821     real             *ewtab;
1822     __m256           rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
1823     real             rswitch_scalar,d_scalar;
1824     __m256           dummy_mask,cutoff_mask;
1825     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1826     __m256           one     = _mm256_set1_ps(1.0);
1827     __m256           two     = _mm256_set1_ps(2.0);
1828     x                = xx[0];
1829     f                = ff[0];
1830
1831     nri              = nlist->nri;
1832     iinr             = nlist->iinr;
1833     jindex           = nlist->jindex;
1834     jjnr             = nlist->jjnr;
1835     shiftidx         = nlist->shift;
1836     gid              = nlist->gid;
1837     shiftvec         = fr->shift_vec[0];
1838     fshift           = fr->fshift[0];
1839     facel            = _mm256_set1_ps(fr->ic->epsfac);
1840     charge           = mdatoms->chargeA;
1841     nvdwtype         = fr->ntype;
1842     vdwparam         = fr->nbfp;
1843     vdwtype          = mdatoms->typeA;
1844
1845     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
1846     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1847     beta2            = _mm256_mul_ps(beta,beta);
1848     beta3            = _mm256_mul_ps(beta,beta2);
1849
1850     ewtab            = fr->ic->tabq_coul_FDV0;
1851     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
1852     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1853
1854     /* Setup water-specific parameters */
1855     inr              = nlist->iinr[0];
1856     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1857     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1858     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
1859     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1860
1861     jq1              = _mm256_set1_ps(charge[inr+1]);
1862     jq2              = _mm256_set1_ps(charge[inr+2]);
1863     jq3              = _mm256_set1_ps(charge[inr+3]);
1864     vdwjidx0A        = 2*vdwtype[inr+0];
1865     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1866     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1867     qq11             = _mm256_mul_ps(iq1,jq1);
1868     qq12             = _mm256_mul_ps(iq1,jq2);
1869     qq13             = _mm256_mul_ps(iq1,jq3);
1870     qq21             = _mm256_mul_ps(iq2,jq1);
1871     qq22             = _mm256_mul_ps(iq2,jq2);
1872     qq23             = _mm256_mul_ps(iq2,jq3);
1873     qq31             = _mm256_mul_ps(iq3,jq1);
1874     qq32             = _mm256_mul_ps(iq3,jq2);
1875     qq33             = _mm256_mul_ps(iq3,jq3);
1876
1877     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1878     rcutoff_scalar   = fr->ic->rcoulomb;
1879     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
1880     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
1881
1882     rswitch_scalar   = fr->ic->rcoulomb_switch;
1883     rswitch          = _mm256_set1_ps(rswitch_scalar);
1884     /* Setup switch parameters */
1885     d_scalar         = rcutoff_scalar-rswitch_scalar;
1886     d                = _mm256_set1_ps(d_scalar);
1887     swV3             = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
1888     swV4             = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1889     swV5             = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1890     swF2             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
1891     swF3             = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1892     swF4             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1893
1894     /* Avoid stupid compiler warnings */
1895     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1896     j_coord_offsetA = 0;
1897     j_coord_offsetB = 0;
1898     j_coord_offsetC = 0;
1899     j_coord_offsetD = 0;
1900     j_coord_offsetE = 0;
1901     j_coord_offsetF = 0;
1902     j_coord_offsetG = 0;
1903     j_coord_offsetH = 0;
1904
1905     outeriter        = 0;
1906     inneriter        = 0;
1907
1908     for(iidx=0;iidx<4*DIM;iidx++)
1909     {
1910         scratch[iidx] = 0.0;
1911     }
1912
1913     /* Start outer loop over neighborlists */
1914     for(iidx=0; iidx<nri; iidx++)
1915     {
1916         /* Load shift vector for this list */
1917         i_shift_offset   = DIM*shiftidx[iidx];
1918
1919         /* Load limits for loop over neighbors */
1920         j_index_start    = jindex[iidx];
1921         j_index_end      = jindex[iidx+1];
1922
1923         /* Get outer coordinate index */
1924         inr              = iinr[iidx];
1925         i_coord_offset   = DIM*inr;
1926
1927         /* Load i particle coords and add shift vector */
1928         gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1929                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1930
1931         fix0             = _mm256_setzero_ps();
1932         fiy0             = _mm256_setzero_ps();
1933         fiz0             = _mm256_setzero_ps();
1934         fix1             = _mm256_setzero_ps();
1935         fiy1             = _mm256_setzero_ps();
1936         fiz1             = _mm256_setzero_ps();
1937         fix2             = _mm256_setzero_ps();
1938         fiy2             = _mm256_setzero_ps();
1939         fiz2             = _mm256_setzero_ps();
1940         fix3             = _mm256_setzero_ps();
1941         fiy3             = _mm256_setzero_ps();
1942         fiz3             = _mm256_setzero_ps();
1943
1944         /* Start inner kernel loop */
1945         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1946         {
1947
1948             /* Get j neighbor index, and coordinate index */
1949             jnrA             = jjnr[jidx];
1950             jnrB             = jjnr[jidx+1];
1951             jnrC             = jjnr[jidx+2];
1952             jnrD             = jjnr[jidx+3];
1953             jnrE             = jjnr[jidx+4];
1954             jnrF             = jjnr[jidx+5];
1955             jnrG             = jjnr[jidx+6];
1956             jnrH             = jjnr[jidx+7];
1957             j_coord_offsetA  = DIM*jnrA;
1958             j_coord_offsetB  = DIM*jnrB;
1959             j_coord_offsetC  = DIM*jnrC;
1960             j_coord_offsetD  = DIM*jnrD;
1961             j_coord_offsetE  = DIM*jnrE;
1962             j_coord_offsetF  = DIM*jnrF;
1963             j_coord_offsetG  = DIM*jnrG;
1964             j_coord_offsetH  = DIM*jnrH;
1965
1966             /* load j atom coordinates */
1967             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1968                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1969                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1970                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1971                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1972                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
1973
1974             /* Calculate displacement vector */
1975             dx00             = _mm256_sub_ps(ix0,jx0);
1976             dy00             = _mm256_sub_ps(iy0,jy0);
1977             dz00             = _mm256_sub_ps(iz0,jz0);
1978             dx11             = _mm256_sub_ps(ix1,jx1);
1979             dy11             = _mm256_sub_ps(iy1,jy1);
1980             dz11             = _mm256_sub_ps(iz1,jz1);
1981             dx12             = _mm256_sub_ps(ix1,jx2);
1982             dy12             = _mm256_sub_ps(iy1,jy2);
1983             dz12             = _mm256_sub_ps(iz1,jz2);
1984             dx13             = _mm256_sub_ps(ix1,jx3);
1985             dy13             = _mm256_sub_ps(iy1,jy3);
1986             dz13             = _mm256_sub_ps(iz1,jz3);
1987             dx21             = _mm256_sub_ps(ix2,jx1);
1988             dy21             = _mm256_sub_ps(iy2,jy1);
1989             dz21             = _mm256_sub_ps(iz2,jz1);
1990             dx22             = _mm256_sub_ps(ix2,jx2);
1991             dy22             = _mm256_sub_ps(iy2,jy2);
1992             dz22             = _mm256_sub_ps(iz2,jz2);
1993             dx23             = _mm256_sub_ps(ix2,jx3);
1994             dy23             = _mm256_sub_ps(iy2,jy3);
1995             dz23             = _mm256_sub_ps(iz2,jz3);
1996             dx31             = _mm256_sub_ps(ix3,jx1);
1997             dy31             = _mm256_sub_ps(iy3,jy1);
1998             dz31             = _mm256_sub_ps(iz3,jz1);
1999             dx32             = _mm256_sub_ps(ix3,jx2);
2000             dy32             = _mm256_sub_ps(iy3,jy2);
2001             dz32             = _mm256_sub_ps(iz3,jz2);
2002             dx33             = _mm256_sub_ps(ix3,jx3);
2003             dy33             = _mm256_sub_ps(iy3,jy3);
2004             dz33             = _mm256_sub_ps(iz3,jz3);
2005
2006             /* Calculate squared distance and things based on it */
2007             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
2008             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2009             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2010             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
2011             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2012             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2013             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
2014             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
2015             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
2016             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
2017
2018             rinv00           = avx256_invsqrt_f(rsq00);
2019             rinv11           = avx256_invsqrt_f(rsq11);
2020             rinv12           = avx256_invsqrt_f(rsq12);
2021             rinv13           = avx256_invsqrt_f(rsq13);
2022             rinv21           = avx256_invsqrt_f(rsq21);
2023             rinv22           = avx256_invsqrt_f(rsq22);
2024             rinv23           = avx256_invsqrt_f(rsq23);
2025             rinv31           = avx256_invsqrt_f(rsq31);
2026             rinv32           = avx256_invsqrt_f(rsq32);
2027             rinv33           = avx256_invsqrt_f(rsq33);
2028
2029             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
2030             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
2031             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
2032             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
2033             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
2034             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
2035             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
2036             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
2037             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
2038             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
2039
2040             fjx0             = _mm256_setzero_ps();
2041             fjy0             = _mm256_setzero_ps();
2042             fjz0             = _mm256_setzero_ps();
2043             fjx1             = _mm256_setzero_ps();
2044             fjy1             = _mm256_setzero_ps();
2045             fjz1             = _mm256_setzero_ps();
2046             fjx2             = _mm256_setzero_ps();
2047             fjy2             = _mm256_setzero_ps();
2048             fjz2             = _mm256_setzero_ps();
2049             fjx3             = _mm256_setzero_ps();
2050             fjy3             = _mm256_setzero_ps();
2051             fjz3             = _mm256_setzero_ps();
2052
2053             /**************************
2054              * CALCULATE INTERACTIONS *
2055              **************************/
2056
2057             if (gmx_mm256_any_lt(rsq00,rcutoff2))
2058             {
2059
2060             r00              = _mm256_mul_ps(rsq00,rinv00);
2061
2062             /* LENNARD-JONES DISPERSION/REPULSION */
2063
2064             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
2065             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
2066             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
2067             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
2068             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
2069
2070             d                = _mm256_sub_ps(r00,rswitch);
2071             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2072             d2               = _mm256_mul_ps(d,d);
2073             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2074
2075             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2076
2077             /* Evaluate switch function */
2078             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2079             fvdw             = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
2080             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
2081
2082             fscal            = fvdw;
2083
2084             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2085
2086             /* Calculate temporary vectorial force */
2087             tx               = _mm256_mul_ps(fscal,dx00);
2088             ty               = _mm256_mul_ps(fscal,dy00);
2089             tz               = _mm256_mul_ps(fscal,dz00);
2090
2091             /* Update vectorial force */
2092             fix0             = _mm256_add_ps(fix0,tx);
2093             fiy0             = _mm256_add_ps(fiy0,ty);
2094             fiz0             = _mm256_add_ps(fiz0,tz);
2095
2096             fjx0             = _mm256_add_ps(fjx0,tx);
2097             fjy0             = _mm256_add_ps(fjy0,ty);
2098             fjz0             = _mm256_add_ps(fjz0,tz);
2099
2100             }
2101
2102             /**************************
2103              * CALCULATE INTERACTIONS *
2104              **************************/
2105
2106             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2107             {
2108
2109             r11              = _mm256_mul_ps(rsq11,rinv11);
2110
2111             /* EWALD ELECTROSTATICS */
2112             
2113             /* Analytical PME correction */
2114             zeta2            = _mm256_mul_ps(beta2,rsq11);
2115             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
2116             pmecorrF         = avx256_pmecorrF_f(zeta2);
2117             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2118             felec            = _mm256_mul_ps(qq11,felec);
2119             pmecorrV         = avx256_pmecorrV_f(zeta2);
2120             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2121             velec            = _mm256_sub_ps(rinv11,pmecorrV);
2122             velec            = _mm256_mul_ps(qq11,velec);
2123             
2124             d                = _mm256_sub_ps(r11,rswitch);
2125             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2126             d2               = _mm256_mul_ps(d,d);
2127             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2128
2129             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2130
2131             /* Evaluate switch function */
2132             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2133             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
2134             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2135
2136             fscal            = felec;
2137
2138             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2139
2140             /* Calculate temporary vectorial force */
2141             tx               = _mm256_mul_ps(fscal,dx11);
2142             ty               = _mm256_mul_ps(fscal,dy11);
2143             tz               = _mm256_mul_ps(fscal,dz11);
2144
2145             /* Update vectorial force */
2146             fix1             = _mm256_add_ps(fix1,tx);
2147             fiy1             = _mm256_add_ps(fiy1,ty);
2148             fiz1             = _mm256_add_ps(fiz1,tz);
2149
2150             fjx1             = _mm256_add_ps(fjx1,tx);
2151             fjy1             = _mm256_add_ps(fjy1,ty);
2152             fjz1             = _mm256_add_ps(fjz1,tz);
2153
2154             }
2155
2156             /**************************
2157              * CALCULATE INTERACTIONS *
2158              **************************/
2159
2160             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2161             {
2162
2163             r12              = _mm256_mul_ps(rsq12,rinv12);
2164
2165             /* EWALD ELECTROSTATICS */
2166             
2167             /* Analytical PME correction */
2168             zeta2            = _mm256_mul_ps(beta2,rsq12);
2169             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
2170             pmecorrF         = avx256_pmecorrF_f(zeta2);
2171             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2172             felec            = _mm256_mul_ps(qq12,felec);
2173             pmecorrV         = avx256_pmecorrV_f(zeta2);
2174             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2175             velec            = _mm256_sub_ps(rinv12,pmecorrV);
2176             velec            = _mm256_mul_ps(qq12,velec);
2177             
2178             d                = _mm256_sub_ps(r12,rswitch);
2179             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2180             d2               = _mm256_mul_ps(d,d);
2181             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2182
2183             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2184
2185             /* Evaluate switch function */
2186             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2187             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
2188             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2189
2190             fscal            = felec;
2191
2192             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2193
2194             /* Calculate temporary vectorial force */
2195             tx               = _mm256_mul_ps(fscal,dx12);
2196             ty               = _mm256_mul_ps(fscal,dy12);
2197             tz               = _mm256_mul_ps(fscal,dz12);
2198
2199             /* Update vectorial force */
2200             fix1             = _mm256_add_ps(fix1,tx);
2201             fiy1             = _mm256_add_ps(fiy1,ty);
2202             fiz1             = _mm256_add_ps(fiz1,tz);
2203
2204             fjx2             = _mm256_add_ps(fjx2,tx);
2205             fjy2             = _mm256_add_ps(fjy2,ty);
2206             fjz2             = _mm256_add_ps(fjz2,tz);
2207
2208             }
2209
2210             /**************************
2211              * CALCULATE INTERACTIONS *
2212              **************************/
2213
2214             if (gmx_mm256_any_lt(rsq13,rcutoff2))
2215             {
2216
2217             r13              = _mm256_mul_ps(rsq13,rinv13);
2218
2219             /* EWALD ELECTROSTATICS */
2220             
2221             /* Analytical PME correction */
2222             zeta2            = _mm256_mul_ps(beta2,rsq13);
2223             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
2224             pmecorrF         = avx256_pmecorrF_f(zeta2);
2225             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2226             felec            = _mm256_mul_ps(qq13,felec);
2227             pmecorrV         = avx256_pmecorrV_f(zeta2);
2228             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2229             velec            = _mm256_sub_ps(rinv13,pmecorrV);
2230             velec            = _mm256_mul_ps(qq13,velec);
2231             
2232             d                = _mm256_sub_ps(r13,rswitch);
2233             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2234             d2               = _mm256_mul_ps(d,d);
2235             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2236
2237             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2238
2239             /* Evaluate switch function */
2240             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2241             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv13,_mm256_mul_ps(velec,dsw)) );
2242             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
2243
2244             fscal            = felec;
2245
2246             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2247
2248             /* Calculate temporary vectorial force */
2249             tx               = _mm256_mul_ps(fscal,dx13);
2250             ty               = _mm256_mul_ps(fscal,dy13);
2251             tz               = _mm256_mul_ps(fscal,dz13);
2252
2253             /* Update vectorial force */
2254             fix1             = _mm256_add_ps(fix1,tx);
2255             fiy1             = _mm256_add_ps(fiy1,ty);
2256             fiz1             = _mm256_add_ps(fiz1,tz);
2257
2258             fjx3             = _mm256_add_ps(fjx3,tx);
2259             fjy3             = _mm256_add_ps(fjy3,ty);
2260             fjz3             = _mm256_add_ps(fjz3,tz);
2261
2262             }
2263
2264             /**************************
2265              * CALCULATE INTERACTIONS *
2266              **************************/
2267
2268             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2269             {
2270
2271             r21              = _mm256_mul_ps(rsq21,rinv21);
2272
2273             /* EWALD ELECTROSTATICS */
2274             
2275             /* Analytical PME correction */
2276             zeta2            = _mm256_mul_ps(beta2,rsq21);
2277             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2278             pmecorrF         = avx256_pmecorrF_f(zeta2);
2279             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2280             felec            = _mm256_mul_ps(qq21,felec);
2281             pmecorrV         = avx256_pmecorrV_f(zeta2);
2282             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2283             velec            = _mm256_sub_ps(rinv21,pmecorrV);
2284             velec            = _mm256_mul_ps(qq21,velec);
2285             
2286             d                = _mm256_sub_ps(r21,rswitch);
2287             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2288             d2               = _mm256_mul_ps(d,d);
2289             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2290
2291             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2292
2293             /* Evaluate switch function */
2294             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2295             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
2296             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2297
2298             fscal            = felec;
2299
2300             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2301
2302             /* Calculate temporary vectorial force */
2303             tx               = _mm256_mul_ps(fscal,dx21);
2304             ty               = _mm256_mul_ps(fscal,dy21);
2305             tz               = _mm256_mul_ps(fscal,dz21);
2306
2307             /* Update vectorial force */
2308             fix2             = _mm256_add_ps(fix2,tx);
2309             fiy2             = _mm256_add_ps(fiy2,ty);
2310             fiz2             = _mm256_add_ps(fiz2,tz);
2311
2312             fjx1             = _mm256_add_ps(fjx1,tx);
2313             fjy1             = _mm256_add_ps(fjy1,ty);
2314             fjz1             = _mm256_add_ps(fjz1,tz);
2315
2316             }
2317
2318             /**************************
2319              * CALCULATE INTERACTIONS *
2320              **************************/
2321
2322             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2323             {
2324
2325             r22              = _mm256_mul_ps(rsq22,rinv22);
2326
2327             /* EWALD ELECTROSTATICS */
2328             
2329             /* Analytical PME correction */
2330             zeta2            = _mm256_mul_ps(beta2,rsq22);
2331             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2332             pmecorrF         = avx256_pmecorrF_f(zeta2);
2333             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2334             felec            = _mm256_mul_ps(qq22,felec);
2335             pmecorrV         = avx256_pmecorrV_f(zeta2);
2336             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2337             velec            = _mm256_sub_ps(rinv22,pmecorrV);
2338             velec            = _mm256_mul_ps(qq22,velec);
2339             
2340             d                = _mm256_sub_ps(r22,rswitch);
2341             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2342             d2               = _mm256_mul_ps(d,d);
2343             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2344
2345             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2346
2347             /* Evaluate switch function */
2348             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2349             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
2350             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
2351
2352             fscal            = felec;
2353
2354             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2355
2356             /* Calculate temporary vectorial force */
2357             tx               = _mm256_mul_ps(fscal,dx22);
2358             ty               = _mm256_mul_ps(fscal,dy22);
2359             tz               = _mm256_mul_ps(fscal,dz22);
2360
2361             /* Update vectorial force */
2362             fix2             = _mm256_add_ps(fix2,tx);
2363             fiy2             = _mm256_add_ps(fiy2,ty);
2364             fiz2             = _mm256_add_ps(fiz2,tz);
2365
2366             fjx2             = _mm256_add_ps(fjx2,tx);
2367             fjy2             = _mm256_add_ps(fjy2,ty);
2368             fjz2             = _mm256_add_ps(fjz2,tz);
2369
2370             }
2371
2372             /**************************
2373              * CALCULATE INTERACTIONS *
2374              **************************/
2375
2376             if (gmx_mm256_any_lt(rsq23,rcutoff2))
2377             {
2378
2379             r23              = _mm256_mul_ps(rsq23,rinv23);
2380
2381             /* EWALD ELECTROSTATICS */
2382             
2383             /* Analytical PME correction */
2384             zeta2            = _mm256_mul_ps(beta2,rsq23);
2385             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
2386             pmecorrF         = avx256_pmecorrF_f(zeta2);
2387             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2388             felec            = _mm256_mul_ps(qq23,felec);
2389             pmecorrV         = avx256_pmecorrV_f(zeta2);
2390             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2391             velec            = _mm256_sub_ps(rinv23,pmecorrV);
2392             velec            = _mm256_mul_ps(qq23,velec);
2393             
2394             d                = _mm256_sub_ps(r23,rswitch);
2395             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2396             d2               = _mm256_mul_ps(d,d);
2397             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2398
2399             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2400
2401             /* Evaluate switch function */
2402             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2403             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv23,_mm256_mul_ps(velec,dsw)) );
2404             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
2405
2406             fscal            = felec;
2407
2408             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2409
2410             /* Calculate temporary vectorial force */
2411             tx               = _mm256_mul_ps(fscal,dx23);
2412             ty               = _mm256_mul_ps(fscal,dy23);
2413             tz               = _mm256_mul_ps(fscal,dz23);
2414
2415             /* Update vectorial force */
2416             fix2             = _mm256_add_ps(fix2,tx);
2417             fiy2             = _mm256_add_ps(fiy2,ty);
2418             fiz2             = _mm256_add_ps(fiz2,tz);
2419
2420             fjx3             = _mm256_add_ps(fjx3,tx);
2421             fjy3             = _mm256_add_ps(fjy3,ty);
2422             fjz3             = _mm256_add_ps(fjz3,tz);
2423
2424             }
2425
2426             /**************************
2427              * CALCULATE INTERACTIONS *
2428              **************************/
2429
2430             if (gmx_mm256_any_lt(rsq31,rcutoff2))
2431             {
2432
2433             r31              = _mm256_mul_ps(rsq31,rinv31);
2434
2435             /* EWALD ELECTROSTATICS */
2436             
2437             /* Analytical PME correction */
2438             zeta2            = _mm256_mul_ps(beta2,rsq31);
2439             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
2440             pmecorrF         = avx256_pmecorrF_f(zeta2);
2441             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2442             felec            = _mm256_mul_ps(qq31,felec);
2443             pmecorrV         = avx256_pmecorrV_f(zeta2);
2444             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2445             velec            = _mm256_sub_ps(rinv31,pmecorrV);
2446             velec            = _mm256_mul_ps(qq31,velec);
2447             
2448             d                = _mm256_sub_ps(r31,rswitch);
2449             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2450             d2               = _mm256_mul_ps(d,d);
2451             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2452
2453             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2454
2455             /* Evaluate switch function */
2456             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2457             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv31,_mm256_mul_ps(velec,dsw)) );
2458             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
2459
2460             fscal            = felec;
2461
2462             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2463
2464             /* Calculate temporary vectorial force */
2465             tx               = _mm256_mul_ps(fscal,dx31);
2466             ty               = _mm256_mul_ps(fscal,dy31);
2467             tz               = _mm256_mul_ps(fscal,dz31);
2468
2469             /* Update vectorial force */
2470             fix3             = _mm256_add_ps(fix3,tx);
2471             fiy3             = _mm256_add_ps(fiy3,ty);
2472             fiz3             = _mm256_add_ps(fiz3,tz);
2473
2474             fjx1             = _mm256_add_ps(fjx1,tx);
2475             fjy1             = _mm256_add_ps(fjy1,ty);
2476             fjz1             = _mm256_add_ps(fjz1,tz);
2477
2478             }
2479
2480             /**************************
2481              * CALCULATE INTERACTIONS *
2482              **************************/
2483
2484             if (gmx_mm256_any_lt(rsq32,rcutoff2))
2485             {
2486
2487             r32              = _mm256_mul_ps(rsq32,rinv32);
2488
2489             /* EWALD ELECTROSTATICS */
2490             
2491             /* Analytical PME correction */
2492             zeta2            = _mm256_mul_ps(beta2,rsq32);
2493             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
2494             pmecorrF         = avx256_pmecorrF_f(zeta2);
2495             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2496             felec            = _mm256_mul_ps(qq32,felec);
2497             pmecorrV         = avx256_pmecorrV_f(zeta2);
2498             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2499             velec            = _mm256_sub_ps(rinv32,pmecorrV);
2500             velec            = _mm256_mul_ps(qq32,velec);
2501             
2502             d                = _mm256_sub_ps(r32,rswitch);
2503             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2504             d2               = _mm256_mul_ps(d,d);
2505             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2506
2507             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2508
2509             /* Evaluate switch function */
2510             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2511             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv32,_mm256_mul_ps(velec,dsw)) );
2512             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
2513
2514             fscal            = felec;
2515
2516             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2517
2518             /* Calculate temporary vectorial force */
2519             tx               = _mm256_mul_ps(fscal,dx32);
2520             ty               = _mm256_mul_ps(fscal,dy32);
2521             tz               = _mm256_mul_ps(fscal,dz32);
2522
2523             /* Update vectorial force */
2524             fix3             = _mm256_add_ps(fix3,tx);
2525             fiy3             = _mm256_add_ps(fiy3,ty);
2526             fiz3             = _mm256_add_ps(fiz3,tz);
2527
2528             fjx2             = _mm256_add_ps(fjx2,tx);
2529             fjy2             = _mm256_add_ps(fjy2,ty);
2530             fjz2             = _mm256_add_ps(fjz2,tz);
2531
2532             }
2533
2534             /**************************
2535              * CALCULATE INTERACTIONS *
2536              **************************/
2537
2538             if (gmx_mm256_any_lt(rsq33,rcutoff2))
2539             {
2540
2541             r33              = _mm256_mul_ps(rsq33,rinv33);
2542
2543             /* EWALD ELECTROSTATICS */
2544             
2545             /* Analytical PME correction */
2546             zeta2            = _mm256_mul_ps(beta2,rsq33);
2547             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
2548             pmecorrF         = avx256_pmecorrF_f(zeta2);
2549             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2550             felec            = _mm256_mul_ps(qq33,felec);
2551             pmecorrV         = avx256_pmecorrV_f(zeta2);
2552             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2553             velec            = _mm256_sub_ps(rinv33,pmecorrV);
2554             velec            = _mm256_mul_ps(qq33,velec);
2555             
2556             d                = _mm256_sub_ps(r33,rswitch);
2557             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2558             d2               = _mm256_mul_ps(d,d);
2559             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2560
2561             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2562
2563             /* Evaluate switch function */
2564             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2565             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv33,_mm256_mul_ps(velec,dsw)) );
2566             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
2567
2568             fscal            = felec;
2569
2570             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2571
2572             /* Calculate temporary vectorial force */
2573             tx               = _mm256_mul_ps(fscal,dx33);
2574             ty               = _mm256_mul_ps(fscal,dy33);
2575             tz               = _mm256_mul_ps(fscal,dz33);
2576
2577             /* Update vectorial force */
2578             fix3             = _mm256_add_ps(fix3,tx);
2579             fiy3             = _mm256_add_ps(fiy3,ty);
2580             fiz3             = _mm256_add_ps(fiz3,tz);
2581
2582             fjx3             = _mm256_add_ps(fjx3,tx);
2583             fjy3             = _mm256_add_ps(fjy3,ty);
2584             fjz3             = _mm256_add_ps(fjz3,tz);
2585
2586             }
2587
2588             fjptrA             = f+j_coord_offsetA;
2589             fjptrB             = f+j_coord_offsetB;
2590             fjptrC             = f+j_coord_offsetC;
2591             fjptrD             = f+j_coord_offsetD;
2592             fjptrE             = f+j_coord_offsetE;
2593             fjptrF             = f+j_coord_offsetF;
2594             fjptrG             = f+j_coord_offsetG;
2595             fjptrH             = f+j_coord_offsetH;
2596
2597             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2598                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2599                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2600
2601             /* Inner loop uses 1004 flops */
2602         }
2603
2604         if(jidx<j_index_end)
2605         {
2606
2607             /* Get j neighbor index, and coordinate index */
2608             jnrlistA         = jjnr[jidx];
2609             jnrlistB         = jjnr[jidx+1];
2610             jnrlistC         = jjnr[jidx+2];
2611             jnrlistD         = jjnr[jidx+3];
2612             jnrlistE         = jjnr[jidx+4];
2613             jnrlistF         = jjnr[jidx+5];
2614             jnrlistG         = jjnr[jidx+6];
2615             jnrlistH         = jjnr[jidx+7];
2616             /* Sign of each element will be negative for non-real atoms.
2617              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2618              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
2619              */
2620             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
2621                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
2622                                             
2623             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
2624             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
2625             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
2626             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
2627             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
2628             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
2629             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
2630             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
2631             j_coord_offsetA  = DIM*jnrA;
2632             j_coord_offsetB  = DIM*jnrB;
2633             j_coord_offsetC  = DIM*jnrC;
2634             j_coord_offsetD  = DIM*jnrD;
2635             j_coord_offsetE  = DIM*jnrE;
2636             j_coord_offsetF  = DIM*jnrF;
2637             j_coord_offsetG  = DIM*jnrG;
2638             j_coord_offsetH  = DIM*jnrH;
2639
2640             /* load j atom coordinates */
2641             gmx_mm256_load_4rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
2642                                                  x+j_coord_offsetC,x+j_coord_offsetD,
2643                                                  x+j_coord_offsetE,x+j_coord_offsetF,
2644                                                  x+j_coord_offsetG,x+j_coord_offsetH,
2645                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
2646                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
2647
2648             /* Calculate displacement vector */
2649             dx00             = _mm256_sub_ps(ix0,jx0);
2650             dy00             = _mm256_sub_ps(iy0,jy0);
2651             dz00             = _mm256_sub_ps(iz0,jz0);
2652             dx11             = _mm256_sub_ps(ix1,jx1);
2653             dy11             = _mm256_sub_ps(iy1,jy1);
2654             dz11             = _mm256_sub_ps(iz1,jz1);
2655             dx12             = _mm256_sub_ps(ix1,jx2);
2656             dy12             = _mm256_sub_ps(iy1,jy2);
2657             dz12             = _mm256_sub_ps(iz1,jz2);
2658             dx13             = _mm256_sub_ps(ix1,jx3);
2659             dy13             = _mm256_sub_ps(iy1,jy3);
2660             dz13             = _mm256_sub_ps(iz1,jz3);
2661             dx21             = _mm256_sub_ps(ix2,jx1);
2662             dy21             = _mm256_sub_ps(iy2,jy1);
2663             dz21             = _mm256_sub_ps(iz2,jz1);
2664             dx22             = _mm256_sub_ps(ix2,jx2);
2665             dy22             = _mm256_sub_ps(iy2,jy2);
2666             dz22             = _mm256_sub_ps(iz2,jz2);
2667             dx23             = _mm256_sub_ps(ix2,jx3);
2668             dy23             = _mm256_sub_ps(iy2,jy3);
2669             dz23             = _mm256_sub_ps(iz2,jz3);
2670             dx31             = _mm256_sub_ps(ix3,jx1);
2671             dy31             = _mm256_sub_ps(iy3,jy1);
2672             dz31             = _mm256_sub_ps(iz3,jz1);
2673             dx32             = _mm256_sub_ps(ix3,jx2);
2674             dy32             = _mm256_sub_ps(iy3,jy2);
2675             dz32             = _mm256_sub_ps(iz3,jz2);
2676             dx33             = _mm256_sub_ps(ix3,jx3);
2677             dy33             = _mm256_sub_ps(iy3,jy3);
2678             dz33             = _mm256_sub_ps(iz3,jz3);
2679
2680             /* Calculate squared distance and things based on it */
2681             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
2682             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
2683             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
2684             rsq13            = gmx_mm256_calc_rsq_ps(dx13,dy13,dz13);
2685             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
2686             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
2687             rsq23            = gmx_mm256_calc_rsq_ps(dx23,dy23,dz23);
2688             rsq31            = gmx_mm256_calc_rsq_ps(dx31,dy31,dz31);
2689             rsq32            = gmx_mm256_calc_rsq_ps(dx32,dy32,dz32);
2690             rsq33            = gmx_mm256_calc_rsq_ps(dx33,dy33,dz33);
2691
2692             rinv00           = avx256_invsqrt_f(rsq00);
2693             rinv11           = avx256_invsqrt_f(rsq11);
2694             rinv12           = avx256_invsqrt_f(rsq12);
2695             rinv13           = avx256_invsqrt_f(rsq13);
2696             rinv21           = avx256_invsqrt_f(rsq21);
2697             rinv22           = avx256_invsqrt_f(rsq22);
2698             rinv23           = avx256_invsqrt_f(rsq23);
2699             rinv31           = avx256_invsqrt_f(rsq31);
2700             rinv32           = avx256_invsqrt_f(rsq32);
2701             rinv33           = avx256_invsqrt_f(rsq33);
2702
2703             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
2704             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
2705             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
2706             rinvsq13         = _mm256_mul_ps(rinv13,rinv13);
2707             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
2708             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
2709             rinvsq23         = _mm256_mul_ps(rinv23,rinv23);
2710             rinvsq31         = _mm256_mul_ps(rinv31,rinv31);
2711             rinvsq32         = _mm256_mul_ps(rinv32,rinv32);
2712             rinvsq33         = _mm256_mul_ps(rinv33,rinv33);
2713
2714             fjx0             = _mm256_setzero_ps();
2715             fjy0             = _mm256_setzero_ps();
2716             fjz0             = _mm256_setzero_ps();
2717             fjx1             = _mm256_setzero_ps();
2718             fjy1             = _mm256_setzero_ps();
2719             fjz1             = _mm256_setzero_ps();
2720             fjx2             = _mm256_setzero_ps();
2721             fjy2             = _mm256_setzero_ps();
2722             fjz2             = _mm256_setzero_ps();
2723             fjx3             = _mm256_setzero_ps();
2724             fjy3             = _mm256_setzero_ps();
2725             fjz3             = _mm256_setzero_ps();
2726
2727             /**************************
2728              * CALCULATE INTERACTIONS *
2729              **************************/
2730
2731             if (gmx_mm256_any_lt(rsq00,rcutoff2))
2732             {
2733
2734             r00              = _mm256_mul_ps(rsq00,rinv00);
2735             r00              = _mm256_andnot_ps(dummy_mask,r00);
2736
2737             /* LENNARD-JONES DISPERSION/REPULSION */
2738
2739             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
2740             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
2741             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
2742             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
2743             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
2744
2745             d                = _mm256_sub_ps(r00,rswitch);
2746             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2747             d2               = _mm256_mul_ps(d,d);
2748             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2749
2750             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2751
2752             /* Evaluate switch function */
2753             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2754             fvdw             = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
2755             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
2756
2757             fscal            = fvdw;
2758
2759             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2760
2761             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2762
2763             /* Calculate temporary vectorial force */
2764             tx               = _mm256_mul_ps(fscal,dx00);
2765             ty               = _mm256_mul_ps(fscal,dy00);
2766             tz               = _mm256_mul_ps(fscal,dz00);
2767
2768             /* Update vectorial force */
2769             fix0             = _mm256_add_ps(fix0,tx);
2770             fiy0             = _mm256_add_ps(fiy0,ty);
2771             fiz0             = _mm256_add_ps(fiz0,tz);
2772
2773             fjx0             = _mm256_add_ps(fjx0,tx);
2774             fjy0             = _mm256_add_ps(fjy0,ty);
2775             fjz0             = _mm256_add_ps(fjz0,tz);
2776
2777             }
2778
2779             /**************************
2780              * CALCULATE INTERACTIONS *
2781              **************************/
2782
2783             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2784             {
2785
2786             r11              = _mm256_mul_ps(rsq11,rinv11);
2787             r11              = _mm256_andnot_ps(dummy_mask,r11);
2788
2789             /* EWALD ELECTROSTATICS */
2790             
2791             /* Analytical PME correction */
2792             zeta2            = _mm256_mul_ps(beta2,rsq11);
2793             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
2794             pmecorrF         = avx256_pmecorrF_f(zeta2);
2795             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2796             felec            = _mm256_mul_ps(qq11,felec);
2797             pmecorrV         = avx256_pmecorrV_f(zeta2);
2798             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2799             velec            = _mm256_sub_ps(rinv11,pmecorrV);
2800             velec            = _mm256_mul_ps(qq11,velec);
2801             
2802             d                = _mm256_sub_ps(r11,rswitch);
2803             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2804             d2               = _mm256_mul_ps(d,d);
2805             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2806
2807             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2808
2809             /* Evaluate switch function */
2810             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2811             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv11,_mm256_mul_ps(velec,dsw)) );
2812             cutoff_mask      = _mm256_cmp_ps(rsq11,rcutoff2,_CMP_LT_OQ);
2813
2814             fscal            = felec;
2815
2816             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2817
2818             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2819
2820             /* Calculate temporary vectorial force */
2821             tx               = _mm256_mul_ps(fscal,dx11);
2822             ty               = _mm256_mul_ps(fscal,dy11);
2823             tz               = _mm256_mul_ps(fscal,dz11);
2824
2825             /* Update vectorial force */
2826             fix1             = _mm256_add_ps(fix1,tx);
2827             fiy1             = _mm256_add_ps(fiy1,ty);
2828             fiz1             = _mm256_add_ps(fiz1,tz);
2829
2830             fjx1             = _mm256_add_ps(fjx1,tx);
2831             fjy1             = _mm256_add_ps(fjy1,ty);
2832             fjz1             = _mm256_add_ps(fjz1,tz);
2833
2834             }
2835
2836             /**************************
2837              * CALCULATE INTERACTIONS *
2838              **************************/
2839
2840             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2841             {
2842
2843             r12              = _mm256_mul_ps(rsq12,rinv12);
2844             r12              = _mm256_andnot_ps(dummy_mask,r12);
2845
2846             /* EWALD ELECTROSTATICS */
2847             
2848             /* Analytical PME correction */
2849             zeta2            = _mm256_mul_ps(beta2,rsq12);
2850             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
2851             pmecorrF         = avx256_pmecorrF_f(zeta2);
2852             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2853             felec            = _mm256_mul_ps(qq12,felec);
2854             pmecorrV         = avx256_pmecorrV_f(zeta2);
2855             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2856             velec            = _mm256_sub_ps(rinv12,pmecorrV);
2857             velec            = _mm256_mul_ps(qq12,velec);
2858             
2859             d                = _mm256_sub_ps(r12,rswitch);
2860             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2861             d2               = _mm256_mul_ps(d,d);
2862             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2863
2864             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2865
2866             /* Evaluate switch function */
2867             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2868             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv12,_mm256_mul_ps(velec,dsw)) );
2869             cutoff_mask      = _mm256_cmp_ps(rsq12,rcutoff2,_CMP_LT_OQ);
2870
2871             fscal            = felec;
2872
2873             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2874
2875             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2876
2877             /* Calculate temporary vectorial force */
2878             tx               = _mm256_mul_ps(fscal,dx12);
2879             ty               = _mm256_mul_ps(fscal,dy12);
2880             tz               = _mm256_mul_ps(fscal,dz12);
2881
2882             /* Update vectorial force */
2883             fix1             = _mm256_add_ps(fix1,tx);
2884             fiy1             = _mm256_add_ps(fiy1,ty);
2885             fiz1             = _mm256_add_ps(fiz1,tz);
2886
2887             fjx2             = _mm256_add_ps(fjx2,tx);
2888             fjy2             = _mm256_add_ps(fjy2,ty);
2889             fjz2             = _mm256_add_ps(fjz2,tz);
2890
2891             }
2892
2893             /**************************
2894              * CALCULATE INTERACTIONS *
2895              **************************/
2896
2897             if (gmx_mm256_any_lt(rsq13,rcutoff2))
2898             {
2899
2900             r13              = _mm256_mul_ps(rsq13,rinv13);
2901             r13              = _mm256_andnot_ps(dummy_mask,r13);
2902
2903             /* EWALD ELECTROSTATICS */
2904             
2905             /* Analytical PME correction */
2906             zeta2            = _mm256_mul_ps(beta2,rsq13);
2907             rinv3            = _mm256_mul_ps(rinvsq13,rinv13);
2908             pmecorrF         = avx256_pmecorrF_f(zeta2);
2909             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2910             felec            = _mm256_mul_ps(qq13,felec);
2911             pmecorrV         = avx256_pmecorrV_f(zeta2);
2912             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2913             velec            = _mm256_sub_ps(rinv13,pmecorrV);
2914             velec            = _mm256_mul_ps(qq13,velec);
2915             
2916             d                = _mm256_sub_ps(r13,rswitch);
2917             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2918             d2               = _mm256_mul_ps(d,d);
2919             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2920
2921             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2922
2923             /* Evaluate switch function */
2924             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2925             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv13,_mm256_mul_ps(velec,dsw)) );
2926             cutoff_mask      = _mm256_cmp_ps(rsq13,rcutoff2,_CMP_LT_OQ);
2927
2928             fscal            = felec;
2929
2930             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2931
2932             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2933
2934             /* Calculate temporary vectorial force */
2935             tx               = _mm256_mul_ps(fscal,dx13);
2936             ty               = _mm256_mul_ps(fscal,dy13);
2937             tz               = _mm256_mul_ps(fscal,dz13);
2938
2939             /* Update vectorial force */
2940             fix1             = _mm256_add_ps(fix1,tx);
2941             fiy1             = _mm256_add_ps(fiy1,ty);
2942             fiz1             = _mm256_add_ps(fiz1,tz);
2943
2944             fjx3             = _mm256_add_ps(fjx3,tx);
2945             fjy3             = _mm256_add_ps(fjy3,ty);
2946             fjz3             = _mm256_add_ps(fjz3,tz);
2947
2948             }
2949
2950             /**************************
2951              * CALCULATE INTERACTIONS *
2952              **************************/
2953
2954             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2955             {
2956
2957             r21              = _mm256_mul_ps(rsq21,rinv21);
2958             r21              = _mm256_andnot_ps(dummy_mask,r21);
2959
2960             /* EWALD ELECTROSTATICS */
2961             
2962             /* Analytical PME correction */
2963             zeta2            = _mm256_mul_ps(beta2,rsq21);
2964             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2965             pmecorrF         = avx256_pmecorrF_f(zeta2);
2966             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2967             felec            = _mm256_mul_ps(qq21,felec);
2968             pmecorrV         = avx256_pmecorrV_f(zeta2);
2969             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
2970             velec            = _mm256_sub_ps(rinv21,pmecorrV);
2971             velec            = _mm256_mul_ps(qq21,velec);
2972             
2973             d                = _mm256_sub_ps(r21,rswitch);
2974             d                = _mm256_max_ps(d,_mm256_setzero_ps());
2975             d2               = _mm256_mul_ps(d,d);
2976             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
2977
2978             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
2979
2980             /* Evaluate switch function */
2981             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2982             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv21,_mm256_mul_ps(velec,dsw)) );
2983             cutoff_mask      = _mm256_cmp_ps(rsq21,rcutoff2,_CMP_LT_OQ);
2984
2985             fscal            = felec;
2986
2987             fscal            = _mm256_and_ps(fscal,cutoff_mask);
2988
2989             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2990
2991             /* Calculate temporary vectorial force */
2992             tx               = _mm256_mul_ps(fscal,dx21);
2993             ty               = _mm256_mul_ps(fscal,dy21);
2994             tz               = _mm256_mul_ps(fscal,dz21);
2995
2996             /* Update vectorial force */
2997             fix2             = _mm256_add_ps(fix2,tx);
2998             fiy2             = _mm256_add_ps(fiy2,ty);
2999             fiz2             = _mm256_add_ps(fiz2,tz);
3000
3001             fjx1             = _mm256_add_ps(fjx1,tx);
3002             fjy1             = _mm256_add_ps(fjy1,ty);
3003             fjz1             = _mm256_add_ps(fjz1,tz);
3004
3005             }
3006
3007             /**************************
3008              * CALCULATE INTERACTIONS *
3009              **************************/
3010
3011             if (gmx_mm256_any_lt(rsq22,rcutoff2))
3012             {
3013
3014             r22              = _mm256_mul_ps(rsq22,rinv22);
3015             r22              = _mm256_andnot_ps(dummy_mask,r22);
3016
3017             /* EWALD ELECTROSTATICS */
3018             
3019             /* Analytical PME correction */
3020             zeta2            = _mm256_mul_ps(beta2,rsq22);
3021             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
3022             pmecorrF         = avx256_pmecorrF_f(zeta2);
3023             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
3024             felec            = _mm256_mul_ps(qq22,felec);
3025             pmecorrV         = avx256_pmecorrV_f(zeta2);
3026             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
3027             velec            = _mm256_sub_ps(rinv22,pmecorrV);
3028             velec            = _mm256_mul_ps(qq22,velec);
3029             
3030             d                = _mm256_sub_ps(r22,rswitch);
3031             d                = _mm256_max_ps(d,_mm256_setzero_ps());
3032             d2               = _mm256_mul_ps(d,d);
3033             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
3034
3035             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
3036
3037             /* Evaluate switch function */
3038             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3039             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv22,_mm256_mul_ps(velec,dsw)) );
3040             cutoff_mask      = _mm256_cmp_ps(rsq22,rcutoff2,_CMP_LT_OQ);
3041
3042             fscal            = felec;
3043
3044             fscal            = _mm256_and_ps(fscal,cutoff_mask);
3045
3046             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
3047
3048             /* Calculate temporary vectorial force */
3049             tx               = _mm256_mul_ps(fscal,dx22);
3050             ty               = _mm256_mul_ps(fscal,dy22);
3051             tz               = _mm256_mul_ps(fscal,dz22);
3052
3053             /* Update vectorial force */
3054             fix2             = _mm256_add_ps(fix2,tx);
3055             fiy2             = _mm256_add_ps(fiy2,ty);
3056             fiz2             = _mm256_add_ps(fiz2,tz);
3057
3058             fjx2             = _mm256_add_ps(fjx2,tx);
3059             fjy2             = _mm256_add_ps(fjy2,ty);
3060             fjz2             = _mm256_add_ps(fjz2,tz);
3061
3062             }
3063
3064             /**************************
3065              * CALCULATE INTERACTIONS *
3066              **************************/
3067
3068             if (gmx_mm256_any_lt(rsq23,rcutoff2))
3069             {
3070
3071             r23              = _mm256_mul_ps(rsq23,rinv23);
3072             r23              = _mm256_andnot_ps(dummy_mask,r23);
3073
3074             /* EWALD ELECTROSTATICS */
3075             
3076             /* Analytical PME correction */
3077             zeta2            = _mm256_mul_ps(beta2,rsq23);
3078             rinv3            = _mm256_mul_ps(rinvsq23,rinv23);
3079             pmecorrF         = avx256_pmecorrF_f(zeta2);
3080             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
3081             felec            = _mm256_mul_ps(qq23,felec);
3082             pmecorrV         = avx256_pmecorrV_f(zeta2);
3083             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
3084             velec            = _mm256_sub_ps(rinv23,pmecorrV);
3085             velec            = _mm256_mul_ps(qq23,velec);
3086             
3087             d                = _mm256_sub_ps(r23,rswitch);
3088             d                = _mm256_max_ps(d,_mm256_setzero_ps());
3089             d2               = _mm256_mul_ps(d,d);
3090             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
3091
3092             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
3093
3094             /* Evaluate switch function */
3095             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3096             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv23,_mm256_mul_ps(velec,dsw)) );
3097             cutoff_mask      = _mm256_cmp_ps(rsq23,rcutoff2,_CMP_LT_OQ);
3098
3099             fscal            = felec;
3100
3101             fscal            = _mm256_and_ps(fscal,cutoff_mask);
3102
3103             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
3104
3105             /* Calculate temporary vectorial force */
3106             tx               = _mm256_mul_ps(fscal,dx23);
3107             ty               = _mm256_mul_ps(fscal,dy23);
3108             tz               = _mm256_mul_ps(fscal,dz23);
3109
3110             /* Update vectorial force */
3111             fix2             = _mm256_add_ps(fix2,tx);
3112             fiy2             = _mm256_add_ps(fiy2,ty);
3113             fiz2             = _mm256_add_ps(fiz2,tz);
3114
3115             fjx3             = _mm256_add_ps(fjx3,tx);
3116             fjy3             = _mm256_add_ps(fjy3,ty);
3117             fjz3             = _mm256_add_ps(fjz3,tz);
3118
3119             }
3120
3121             /**************************
3122              * CALCULATE INTERACTIONS *
3123              **************************/
3124
3125             if (gmx_mm256_any_lt(rsq31,rcutoff2))
3126             {
3127
3128             r31              = _mm256_mul_ps(rsq31,rinv31);
3129             r31              = _mm256_andnot_ps(dummy_mask,r31);
3130
3131             /* EWALD ELECTROSTATICS */
3132             
3133             /* Analytical PME correction */
3134             zeta2            = _mm256_mul_ps(beta2,rsq31);
3135             rinv3            = _mm256_mul_ps(rinvsq31,rinv31);
3136             pmecorrF         = avx256_pmecorrF_f(zeta2);
3137             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
3138             felec            = _mm256_mul_ps(qq31,felec);
3139             pmecorrV         = avx256_pmecorrV_f(zeta2);
3140             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
3141             velec            = _mm256_sub_ps(rinv31,pmecorrV);
3142             velec            = _mm256_mul_ps(qq31,velec);
3143             
3144             d                = _mm256_sub_ps(r31,rswitch);
3145             d                = _mm256_max_ps(d,_mm256_setzero_ps());
3146             d2               = _mm256_mul_ps(d,d);
3147             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
3148
3149             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
3150
3151             /* Evaluate switch function */
3152             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3153             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv31,_mm256_mul_ps(velec,dsw)) );
3154             cutoff_mask      = _mm256_cmp_ps(rsq31,rcutoff2,_CMP_LT_OQ);
3155
3156             fscal            = felec;
3157
3158             fscal            = _mm256_and_ps(fscal,cutoff_mask);
3159
3160             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
3161
3162             /* Calculate temporary vectorial force */
3163             tx               = _mm256_mul_ps(fscal,dx31);
3164             ty               = _mm256_mul_ps(fscal,dy31);
3165             tz               = _mm256_mul_ps(fscal,dz31);
3166
3167             /* Update vectorial force */
3168             fix3             = _mm256_add_ps(fix3,tx);
3169             fiy3             = _mm256_add_ps(fiy3,ty);
3170             fiz3             = _mm256_add_ps(fiz3,tz);
3171
3172             fjx1             = _mm256_add_ps(fjx1,tx);
3173             fjy1             = _mm256_add_ps(fjy1,ty);
3174             fjz1             = _mm256_add_ps(fjz1,tz);
3175
3176             }
3177
3178             /**************************
3179              * CALCULATE INTERACTIONS *
3180              **************************/
3181
3182             if (gmx_mm256_any_lt(rsq32,rcutoff2))
3183             {
3184
3185             r32              = _mm256_mul_ps(rsq32,rinv32);
3186             r32              = _mm256_andnot_ps(dummy_mask,r32);
3187
3188             /* EWALD ELECTROSTATICS */
3189             
3190             /* Analytical PME correction */
3191             zeta2            = _mm256_mul_ps(beta2,rsq32);
3192             rinv3            = _mm256_mul_ps(rinvsq32,rinv32);
3193             pmecorrF         = avx256_pmecorrF_f(zeta2);
3194             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
3195             felec            = _mm256_mul_ps(qq32,felec);
3196             pmecorrV         = avx256_pmecorrV_f(zeta2);
3197             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
3198             velec            = _mm256_sub_ps(rinv32,pmecorrV);
3199             velec            = _mm256_mul_ps(qq32,velec);
3200             
3201             d                = _mm256_sub_ps(r32,rswitch);
3202             d                = _mm256_max_ps(d,_mm256_setzero_ps());
3203             d2               = _mm256_mul_ps(d,d);
3204             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
3205
3206             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
3207
3208             /* Evaluate switch function */
3209             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3210             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv32,_mm256_mul_ps(velec,dsw)) );
3211             cutoff_mask      = _mm256_cmp_ps(rsq32,rcutoff2,_CMP_LT_OQ);
3212
3213             fscal            = felec;
3214
3215             fscal            = _mm256_and_ps(fscal,cutoff_mask);
3216
3217             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
3218
3219             /* Calculate temporary vectorial force */
3220             tx               = _mm256_mul_ps(fscal,dx32);
3221             ty               = _mm256_mul_ps(fscal,dy32);
3222             tz               = _mm256_mul_ps(fscal,dz32);
3223
3224             /* Update vectorial force */
3225             fix3             = _mm256_add_ps(fix3,tx);
3226             fiy3             = _mm256_add_ps(fiy3,ty);
3227             fiz3             = _mm256_add_ps(fiz3,tz);
3228
3229             fjx2             = _mm256_add_ps(fjx2,tx);
3230             fjy2             = _mm256_add_ps(fjy2,ty);
3231             fjz2             = _mm256_add_ps(fjz2,tz);
3232
3233             }
3234
3235             /**************************
3236              * CALCULATE INTERACTIONS *
3237              **************************/
3238
3239             if (gmx_mm256_any_lt(rsq33,rcutoff2))
3240             {
3241
3242             r33              = _mm256_mul_ps(rsq33,rinv33);
3243             r33              = _mm256_andnot_ps(dummy_mask,r33);
3244
3245             /* EWALD ELECTROSTATICS */
3246             
3247             /* Analytical PME correction */
3248             zeta2            = _mm256_mul_ps(beta2,rsq33);
3249             rinv3            = _mm256_mul_ps(rinvsq33,rinv33);
3250             pmecorrF         = avx256_pmecorrF_f(zeta2);
3251             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
3252             felec            = _mm256_mul_ps(qq33,felec);
3253             pmecorrV         = avx256_pmecorrV_f(zeta2);
3254             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
3255             velec            = _mm256_sub_ps(rinv33,pmecorrV);
3256             velec            = _mm256_mul_ps(qq33,velec);
3257             
3258             d                = _mm256_sub_ps(r33,rswitch);
3259             d                = _mm256_max_ps(d,_mm256_setzero_ps());
3260             d2               = _mm256_mul_ps(d,d);
3261             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
3262
3263             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
3264
3265             /* Evaluate switch function */
3266             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3267             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv33,_mm256_mul_ps(velec,dsw)) );
3268             cutoff_mask      = _mm256_cmp_ps(rsq33,rcutoff2,_CMP_LT_OQ);
3269
3270             fscal            = felec;
3271
3272             fscal            = _mm256_and_ps(fscal,cutoff_mask);
3273
3274             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
3275
3276             /* Calculate temporary vectorial force */
3277             tx               = _mm256_mul_ps(fscal,dx33);
3278             ty               = _mm256_mul_ps(fscal,dy33);
3279             tz               = _mm256_mul_ps(fscal,dz33);
3280
3281             /* Update vectorial force */
3282             fix3             = _mm256_add_ps(fix3,tx);
3283             fiy3             = _mm256_add_ps(fiy3,ty);
3284             fiz3             = _mm256_add_ps(fiz3,tz);
3285
3286             fjx3             = _mm256_add_ps(fjx3,tx);
3287             fjy3             = _mm256_add_ps(fjy3,ty);
3288             fjz3             = _mm256_add_ps(fjz3,tz);
3289
3290             }
3291
3292             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
3293             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
3294             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
3295             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
3296             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
3297             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
3298             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
3299             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
3300
3301             gmx_mm256_decrement_4rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
3302                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
3303                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
3304
3305             /* Inner loop uses 1014 flops */
3306         }
3307
3308         /* End of innermost loop */
3309
3310         gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
3311                                                  f+i_coord_offset,fshift+i_shift_offset);
3312
3313         /* Increment number of inner iterations */
3314         inneriter                  += j_index_end - j_index_start;
3315
3316         /* Outer loop uses 24 flops */
3317     }
3318
3319     /* Increment number of outer iterations */
3320     outeriter        += nri;
3321
3322     /* Update outer/inner flops */
3323
3324     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*1014);
3325 }