Use full path for legacyheaders
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_avx_256_double.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_double kernel generator.
37  */
38 #include "config.h"
39
40 #include <math.h>
41
42 #include "../nb_kernel.h"
43 #include "gromacs/legacyheaders/types/simple.h"
44 #include "gromacs/math/vec.h"
45 #include "gromacs/legacyheaders/nrnb.h"
46
47 #include "gromacs/simd/math_x86_avx_256_double.h"
48 #include "kernelutil_x86_avx_256_double.h"
49
50 /*
51  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_VF_avx_256_double
52  * Electrostatics interaction: Ewald
53  * VdW interaction:            LennardJones
54  * Geometry:                   Water4-Water4
55  * Calculate force/pot:        PotentialAndForce
56  */
57 void
58 nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_VF_avx_256_double
59                     (t_nblist                    * gmx_restrict       nlist,
60                      rvec                        * gmx_restrict          xx,
61                      rvec                        * gmx_restrict          ff,
62                      t_forcerec                  * gmx_restrict          fr,
63                      t_mdatoms                   * gmx_restrict     mdatoms,
64                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65                      t_nrnb                      * gmx_restrict        nrnb)
66 {
67     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
68      * just 0 for non-waters.
69      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
70      * jnr indices corresponding to data put in the four positions in the SIMD register.
71      */
72     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
73     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74     int              jnrA,jnrB,jnrC,jnrD;
75     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
76     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
77     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
78     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
79     real             rcutoff_scalar;
80     real             *shiftvec,*fshift,*x,*f;
81     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
82     real             scratch[4*DIM];
83     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
84     real *           vdwioffsetptr0;
85     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
86     real *           vdwioffsetptr1;
87     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
88     real *           vdwioffsetptr2;
89     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
90     real *           vdwioffsetptr3;
91     __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
92     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
93     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
94     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
95     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
96     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
97     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
98     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
99     __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
100     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
101     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
102     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
103     __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
104     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
105     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
106     __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
107     __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
108     __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
109     __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
110     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
111     real             *charge;
112     int              nvdwtype;
113     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
114     int              *vdwtype;
115     real             *vdwparam;
116     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
117     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
118     __m128i          ewitab;
119     __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
120     __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
121     real             *ewtab;
122     __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
123     real             rswitch_scalar,d_scalar;
124     __m256d          dummy_mask,cutoff_mask;
125     __m128           tmpmask0,tmpmask1;
126     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
127     __m256d          one     = _mm256_set1_pd(1.0);
128     __m256d          two     = _mm256_set1_pd(2.0);
129     x                = xx[0];
130     f                = ff[0];
131
132     nri              = nlist->nri;
133     iinr             = nlist->iinr;
134     jindex           = nlist->jindex;
135     jjnr             = nlist->jjnr;
136     shiftidx         = nlist->shift;
137     gid              = nlist->gid;
138     shiftvec         = fr->shift_vec[0];
139     fshift           = fr->fshift[0];
140     facel            = _mm256_set1_pd(fr->epsfac);
141     charge           = mdatoms->chargeA;
142     nvdwtype         = fr->ntype;
143     vdwparam         = fr->nbfp;
144     vdwtype          = mdatoms->typeA;
145
146     sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
147     beta             = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
148     beta2            = _mm256_mul_pd(beta,beta);
149     beta3            = _mm256_mul_pd(beta,beta2);
150
151     ewtab            = fr->ic->tabq_coul_FDV0;
152     ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
153     ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
154
155     /* Setup water-specific parameters */
156     inr              = nlist->iinr[0];
157     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
158     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
159     iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
160     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
161
162     jq1              = _mm256_set1_pd(charge[inr+1]);
163     jq2              = _mm256_set1_pd(charge[inr+2]);
164     jq3              = _mm256_set1_pd(charge[inr+3]);
165     vdwjidx0A        = 2*vdwtype[inr+0];
166     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
167     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
168     qq11             = _mm256_mul_pd(iq1,jq1);
169     qq12             = _mm256_mul_pd(iq1,jq2);
170     qq13             = _mm256_mul_pd(iq1,jq3);
171     qq21             = _mm256_mul_pd(iq2,jq1);
172     qq22             = _mm256_mul_pd(iq2,jq2);
173     qq23             = _mm256_mul_pd(iq2,jq3);
174     qq31             = _mm256_mul_pd(iq3,jq1);
175     qq32             = _mm256_mul_pd(iq3,jq2);
176     qq33             = _mm256_mul_pd(iq3,jq3);
177
178     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
179     rcutoff_scalar   = fr->rcoulomb;
180     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
181     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
182
183     rswitch_scalar   = fr->rcoulomb_switch;
184     rswitch          = _mm256_set1_pd(rswitch_scalar);
185     /* Setup switch parameters */
186     d_scalar         = rcutoff_scalar-rswitch_scalar;
187     d                = _mm256_set1_pd(d_scalar);
188     swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
189     swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
190     swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
191     swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
192     swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
193     swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
194
195     /* Avoid stupid compiler warnings */
196     jnrA = jnrB = jnrC = jnrD = 0;
197     j_coord_offsetA = 0;
198     j_coord_offsetB = 0;
199     j_coord_offsetC = 0;
200     j_coord_offsetD = 0;
201
202     outeriter        = 0;
203     inneriter        = 0;
204
205     for(iidx=0;iidx<4*DIM;iidx++)
206     {
207         scratch[iidx] = 0.0;
208     }
209
210     /* Start outer loop over neighborlists */
211     for(iidx=0; iidx<nri; iidx++)
212     {
213         /* Load shift vector for this list */
214         i_shift_offset   = DIM*shiftidx[iidx];
215
216         /* Load limits for loop over neighbors */
217         j_index_start    = jindex[iidx];
218         j_index_end      = jindex[iidx+1];
219
220         /* Get outer coordinate index */
221         inr              = iinr[iidx];
222         i_coord_offset   = DIM*inr;
223
224         /* Load i particle coords and add shift vector */
225         gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
226                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
227
228         fix0             = _mm256_setzero_pd();
229         fiy0             = _mm256_setzero_pd();
230         fiz0             = _mm256_setzero_pd();
231         fix1             = _mm256_setzero_pd();
232         fiy1             = _mm256_setzero_pd();
233         fiz1             = _mm256_setzero_pd();
234         fix2             = _mm256_setzero_pd();
235         fiy2             = _mm256_setzero_pd();
236         fiz2             = _mm256_setzero_pd();
237         fix3             = _mm256_setzero_pd();
238         fiy3             = _mm256_setzero_pd();
239         fiz3             = _mm256_setzero_pd();
240
241         /* Reset potential sums */
242         velecsum         = _mm256_setzero_pd();
243         vvdwsum          = _mm256_setzero_pd();
244
245         /* Start inner kernel loop */
246         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
247         {
248
249             /* Get j neighbor index, and coordinate index */
250             jnrA             = jjnr[jidx];
251             jnrB             = jjnr[jidx+1];
252             jnrC             = jjnr[jidx+2];
253             jnrD             = jjnr[jidx+3];
254             j_coord_offsetA  = DIM*jnrA;
255             j_coord_offsetB  = DIM*jnrB;
256             j_coord_offsetC  = DIM*jnrC;
257             j_coord_offsetD  = DIM*jnrD;
258
259             /* load j atom coordinates */
260             gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
261                                                  x+j_coord_offsetC,x+j_coord_offsetD,
262                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
263                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
264
265             /* Calculate displacement vector */
266             dx00             = _mm256_sub_pd(ix0,jx0);
267             dy00             = _mm256_sub_pd(iy0,jy0);
268             dz00             = _mm256_sub_pd(iz0,jz0);
269             dx11             = _mm256_sub_pd(ix1,jx1);
270             dy11             = _mm256_sub_pd(iy1,jy1);
271             dz11             = _mm256_sub_pd(iz1,jz1);
272             dx12             = _mm256_sub_pd(ix1,jx2);
273             dy12             = _mm256_sub_pd(iy1,jy2);
274             dz12             = _mm256_sub_pd(iz1,jz2);
275             dx13             = _mm256_sub_pd(ix1,jx3);
276             dy13             = _mm256_sub_pd(iy1,jy3);
277             dz13             = _mm256_sub_pd(iz1,jz3);
278             dx21             = _mm256_sub_pd(ix2,jx1);
279             dy21             = _mm256_sub_pd(iy2,jy1);
280             dz21             = _mm256_sub_pd(iz2,jz1);
281             dx22             = _mm256_sub_pd(ix2,jx2);
282             dy22             = _mm256_sub_pd(iy2,jy2);
283             dz22             = _mm256_sub_pd(iz2,jz2);
284             dx23             = _mm256_sub_pd(ix2,jx3);
285             dy23             = _mm256_sub_pd(iy2,jy3);
286             dz23             = _mm256_sub_pd(iz2,jz3);
287             dx31             = _mm256_sub_pd(ix3,jx1);
288             dy31             = _mm256_sub_pd(iy3,jy1);
289             dz31             = _mm256_sub_pd(iz3,jz1);
290             dx32             = _mm256_sub_pd(ix3,jx2);
291             dy32             = _mm256_sub_pd(iy3,jy2);
292             dz32             = _mm256_sub_pd(iz3,jz2);
293             dx33             = _mm256_sub_pd(ix3,jx3);
294             dy33             = _mm256_sub_pd(iy3,jy3);
295             dz33             = _mm256_sub_pd(iz3,jz3);
296
297             /* Calculate squared distance and things based on it */
298             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
299             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
300             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
301             rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
302             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
303             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
304             rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
305             rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
306             rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
307             rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
308
309             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
310             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
311             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
312             rinv13           = gmx_mm256_invsqrt_pd(rsq13);
313             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
314             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
315             rinv23           = gmx_mm256_invsqrt_pd(rsq23);
316             rinv31           = gmx_mm256_invsqrt_pd(rsq31);
317             rinv32           = gmx_mm256_invsqrt_pd(rsq32);
318             rinv33           = gmx_mm256_invsqrt_pd(rsq33);
319
320             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
321             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
322             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
323             rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
324             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
325             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
326             rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
327             rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
328             rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
329             rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
330
331             fjx0             = _mm256_setzero_pd();
332             fjy0             = _mm256_setzero_pd();
333             fjz0             = _mm256_setzero_pd();
334             fjx1             = _mm256_setzero_pd();
335             fjy1             = _mm256_setzero_pd();
336             fjz1             = _mm256_setzero_pd();
337             fjx2             = _mm256_setzero_pd();
338             fjy2             = _mm256_setzero_pd();
339             fjz2             = _mm256_setzero_pd();
340             fjx3             = _mm256_setzero_pd();
341             fjy3             = _mm256_setzero_pd();
342             fjz3             = _mm256_setzero_pd();
343
344             /**************************
345              * CALCULATE INTERACTIONS *
346              **************************/
347
348             if (gmx_mm256_any_lt(rsq00,rcutoff2))
349             {
350
351             r00              = _mm256_mul_pd(rsq00,rinv00);
352
353             /* LENNARD-JONES DISPERSION/REPULSION */
354
355             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
356             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
357             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
358             vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
359             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
360
361             d                = _mm256_sub_pd(r00,rswitch);
362             d                = _mm256_max_pd(d,_mm256_setzero_pd());
363             d2               = _mm256_mul_pd(d,d);
364             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
365
366             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
367
368             /* Evaluate switch function */
369             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
370             fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
371             vvdw             = _mm256_mul_pd(vvdw,sw);
372             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
373
374             /* Update potential sum for this i atom from the interaction with this j atom. */
375             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
376             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
377
378             fscal            = fvdw;
379
380             fscal            = _mm256_and_pd(fscal,cutoff_mask);
381
382             /* Calculate temporary vectorial force */
383             tx               = _mm256_mul_pd(fscal,dx00);
384             ty               = _mm256_mul_pd(fscal,dy00);
385             tz               = _mm256_mul_pd(fscal,dz00);
386
387             /* Update vectorial force */
388             fix0             = _mm256_add_pd(fix0,tx);
389             fiy0             = _mm256_add_pd(fiy0,ty);
390             fiz0             = _mm256_add_pd(fiz0,tz);
391
392             fjx0             = _mm256_add_pd(fjx0,tx);
393             fjy0             = _mm256_add_pd(fjy0,ty);
394             fjz0             = _mm256_add_pd(fjz0,tz);
395
396             }
397
398             /**************************
399              * CALCULATE INTERACTIONS *
400              **************************/
401
402             if (gmx_mm256_any_lt(rsq11,rcutoff2))
403             {
404
405             r11              = _mm256_mul_pd(rsq11,rinv11);
406
407             /* EWALD ELECTROSTATICS */
408
409             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
410             ewrt             = _mm256_mul_pd(r11,ewtabscale);
411             ewitab           = _mm256_cvttpd_epi32(ewrt);
412             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
413             ewitab           = _mm_slli_epi32(ewitab,2);
414             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
415             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
416             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
417             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
418             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
419             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
420             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
421             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
422             felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
423
424             d                = _mm256_sub_pd(r11,rswitch);
425             d                = _mm256_max_pd(d,_mm256_setzero_pd());
426             d2               = _mm256_mul_pd(d,d);
427             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
428
429             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
430
431             /* Evaluate switch function */
432             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
433             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
434             velec            = _mm256_mul_pd(velec,sw);
435             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
436
437             /* Update potential sum for this i atom from the interaction with this j atom. */
438             velec            = _mm256_and_pd(velec,cutoff_mask);
439             velecsum         = _mm256_add_pd(velecsum,velec);
440
441             fscal            = felec;
442
443             fscal            = _mm256_and_pd(fscal,cutoff_mask);
444
445             /* Calculate temporary vectorial force */
446             tx               = _mm256_mul_pd(fscal,dx11);
447             ty               = _mm256_mul_pd(fscal,dy11);
448             tz               = _mm256_mul_pd(fscal,dz11);
449
450             /* Update vectorial force */
451             fix1             = _mm256_add_pd(fix1,tx);
452             fiy1             = _mm256_add_pd(fiy1,ty);
453             fiz1             = _mm256_add_pd(fiz1,tz);
454
455             fjx1             = _mm256_add_pd(fjx1,tx);
456             fjy1             = _mm256_add_pd(fjy1,ty);
457             fjz1             = _mm256_add_pd(fjz1,tz);
458
459             }
460
461             /**************************
462              * CALCULATE INTERACTIONS *
463              **************************/
464
465             if (gmx_mm256_any_lt(rsq12,rcutoff2))
466             {
467
468             r12              = _mm256_mul_pd(rsq12,rinv12);
469
470             /* EWALD ELECTROSTATICS */
471
472             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
473             ewrt             = _mm256_mul_pd(r12,ewtabscale);
474             ewitab           = _mm256_cvttpd_epi32(ewrt);
475             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
476             ewitab           = _mm_slli_epi32(ewitab,2);
477             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
478             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
479             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
480             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
481             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
482             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
483             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
484             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
485             felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
486
487             d                = _mm256_sub_pd(r12,rswitch);
488             d                = _mm256_max_pd(d,_mm256_setzero_pd());
489             d2               = _mm256_mul_pd(d,d);
490             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
491
492             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
493
494             /* Evaluate switch function */
495             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
496             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
497             velec            = _mm256_mul_pd(velec,sw);
498             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
499
500             /* Update potential sum for this i atom from the interaction with this j atom. */
501             velec            = _mm256_and_pd(velec,cutoff_mask);
502             velecsum         = _mm256_add_pd(velecsum,velec);
503
504             fscal            = felec;
505
506             fscal            = _mm256_and_pd(fscal,cutoff_mask);
507
508             /* Calculate temporary vectorial force */
509             tx               = _mm256_mul_pd(fscal,dx12);
510             ty               = _mm256_mul_pd(fscal,dy12);
511             tz               = _mm256_mul_pd(fscal,dz12);
512
513             /* Update vectorial force */
514             fix1             = _mm256_add_pd(fix1,tx);
515             fiy1             = _mm256_add_pd(fiy1,ty);
516             fiz1             = _mm256_add_pd(fiz1,tz);
517
518             fjx2             = _mm256_add_pd(fjx2,tx);
519             fjy2             = _mm256_add_pd(fjy2,ty);
520             fjz2             = _mm256_add_pd(fjz2,tz);
521
522             }
523
524             /**************************
525              * CALCULATE INTERACTIONS *
526              **************************/
527
528             if (gmx_mm256_any_lt(rsq13,rcutoff2))
529             {
530
531             r13              = _mm256_mul_pd(rsq13,rinv13);
532
533             /* EWALD ELECTROSTATICS */
534
535             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
536             ewrt             = _mm256_mul_pd(r13,ewtabscale);
537             ewitab           = _mm256_cvttpd_epi32(ewrt);
538             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
539             ewitab           = _mm_slli_epi32(ewitab,2);
540             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
541             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
542             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
543             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
544             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
545             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
546             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
547             velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
548             felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
549
550             d                = _mm256_sub_pd(r13,rswitch);
551             d                = _mm256_max_pd(d,_mm256_setzero_pd());
552             d2               = _mm256_mul_pd(d,d);
553             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
554
555             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
556
557             /* Evaluate switch function */
558             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
559             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv13,_mm256_mul_pd(velec,dsw)) );
560             velec            = _mm256_mul_pd(velec,sw);
561             cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
562
563             /* Update potential sum for this i atom from the interaction with this j atom. */
564             velec            = _mm256_and_pd(velec,cutoff_mask);
565             velecsum         = _mm256_add_pd(velecsum,velec);
566
567             fscal            = felec;
568
569             fscal            = _mm256_and_pd(fscal,cutoff_mask);
570
571             /* Calculate temporary vectorial force */
572             tx               = _mm256_mul_pd(fscal,dx13);
573             ty               = _mm256_mul_pd(fscal,dy13);
574             tz               = _mm256_mul_pd(fscal,dz13);
575
576             /* Update vectorial force */
577             fix1             = _mm256_add_pd(fix1,tx);
578             fiy1             = _mm256_add_pd(fiy1,ty);
579             fiz1             = _mm256_add_pd(fiz1,tz);
580
581             fjx3             = _mm256_add_pd(fjx3,tx);
582             fjy3             = _mm256_add_pd(fjy3,ty);
583             fjz3             = _mm256_add_pd(fjz3,tz);
584
585             }
586
587             /**************************
588              * CALCULATE INTERACTIONS *
589              **************************/
590
591             if (gmx_mm256_any_lt(rsq21,rcutoff2))
592             {
593
594             r21              = _mm256_mul_pd(rsq21,rinv21);
595
596             /* EWALD ELECTROSTATICS */
597
598             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
599             ewrt             = _mm256_mul_pd(r21,ewtabscale);
600             ewitab           = _mm256_cvttpd_epi32(ewrt);
601             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
602             ewitab           = _mm_slli_epi32(ewitab,2);
603             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
604             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
605             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
606             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
607             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
608             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
609             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
610             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
611             felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
612
613             d                = _mm256_sub_pd(r21,rswitch);
614             d                = _mm256_max_pd(d,_mm256_setzero_pd());
615             d2               = _mm256_mul_pd(d,d);
616             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
617
618             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
619
620             /* Evaluate switch function */
621             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
622             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
623             velec            = _mm256_mul_pd(velec,sw);
624             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
625
626             /* Update potential sum for this i atom from the interaction with this j atom. */
627             velec            = _mm256_and_pd(velec,cutoff_mask);
628             velecsum         = _mm256_add_pd(velecsum,velec);
629
630             fscal            = felec;
631
632             fscal            = _mm256_and_pd(fscal,cutoff_mask);
633
634             /* Calculate temporary vectorial force */
635             tx               = _mm256_mul_pd(fscal,dx21);
636             ty               = _mm256_mul_pd(fscal,dy21);
637             tz               = _mm256_mul_pd(fscal,dz21);
638
639             /* Update vectorial force */
640             fix2             = _mm256_add_pd(fix2,tx);
641             fiy2             = _mm256_add_pd(fiy2,ty);
642             fiz2             = _mm256_add_pd(fiz2,tz);
643
644             fjx1             = _mm256_add_pd(fjx1,tx);
645             fjy1             = _mm256_add_pd(fjy1,ty);
646             fjz1             = _mm256_add_pd(fjz1,tz);
647
648             }
649
650             /**************************
651              * CALCULATE INTERACTIONS *
652              **************************/
653
654             if (gmx_mm256_any_lt(rsq22,rcutoff2))
655             {
656
657             r22              = _mm256_mul_pd(rsq22,rinv22);
658
659             /* EWALD ELECTROSTATICS */
660
661             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
662             ewrt             = _mm256_mul_pd(r22,ewtabscale);
663             ewitab           = _mm256_cvttpd_epi32(ewrt);
664             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
665             ewitab           = _mm_slli_epi32(ewitab,2);
666             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
667             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
668             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
669             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
670             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
671             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
672             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
673             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
674             felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
675
676             d                = _mm256_sub_pd(r22,rswitch);
677             d                = _mm256_max_pd(d,_mm256_setzero_pd());
678             d2               = _mm256_mul_pd(d,d);
679             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
680
681             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
682
683             /* Evaluate switch function */
684             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
685             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
686             velec            = _mm256_mul_pd(velec,sw);
687             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
688
689             /* Update potential sum for this i atom from the interaction with this j atom. */
690             velec            = _mm256_and_pd(velec,cutoff_mask);
691             velecsum         = _mm256_add_pd(velecsum,velec);
692
693             fscal            = felec;
694
695             fscal            = _mm256_and_pd(fscal,cutoff_mask);
696
697             /* Calculate temporary vectorial force */
698             tx               = _mm256_mul_pd(fscal,dx22);
699             ty               = _mm256_mul_pd(fscal,dy22);
700             tz               = _mm256_mul_pd(fscal,dz22);
701
702             /* Update vectorial force */
703             fix2             = _mm256_add_pd(fix2,tx);
704             fiy2             = _mm256_add_pd(fiy2,ty);
705             fiz2             = _mm256_add_pd(fiz2,tz);
706
707             fjx2             = _mm256_add_pd(fjx2,tx);
708             fjy2             = _mm256_add_pd(fjy2,ty);
709             fjz2             = _mm256_add_pd(fjz2,tz);
710
711             }
712
713             /**************************
714              * CALCULATE INTERACTIONS *
715              **************************/
716
717             if (gmx_mm256_any_lt(rsq23,rcutoff2))
718             {
719
720             r23              = _mm256_mul_pd(rsq23,rinv23);
721
722             /* EWALD ELECTROSTATICS */
723
724             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
725             ewrt             = _mm256_mul_pd(r23,ewtabscale);
726             ewitab           = _mm256_cvttpd_epi32(ewrt);
727             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
728             ewitab           = _mm_slli_epi32(ewitab,2);
729             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
730             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
731             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
732             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
733             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
734             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
735             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
736             velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
737             felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
738
739             d                = _mm256_sub_pd(r23,rswitch);
740             d                = _mm256_max_pd(d,_mm256_setzero_pd());
741             d2               = _mm256_mul_pd(d,d);
742             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
743
744             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
745
746             /* Evaluate switch function */
747             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
748             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv23,_mm256_mul_pd(velec,dsw)) );
749             velec            = _mm256_mul_pd(velec,sw);
750             cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
751
752             /* Update potential sum for this i atom from the interaction with this j atom. */
753             velec            = _mm256_and_pd(velec,cutoff_mask);
754             velecsum         = _mm256_add_pd(velecsum,velec);
755
756             fscal            = felec;
757
758             fscal            = _mm256_and_pd(fscal,cutoff_mask);
759
760             /* Calculate temporary vectorial force */
761             tx               = _mm256_mul_pd(fscal,dx23);
762             ty               = _mm256_mul_pd(fscal,dy23);
763             tz               = _mm256_mul_pd(fscal,dz23);
764
765             /* Update vectorial force */
766             fix2             = _mm256_add_pd(fix2,tx);
767             fiy2             = _mm256_add_pd(fiy2,ty);
768             fiz2             = _mm256_add_pd(fiz2,tz);
769
770             fjx3             = _mm256_add_pd(fjx3,tx);
771             fjy3             = _mm256_add_pd(fjy3,ty);
772             fjz3             = _mm256_add_pd(fjz3,tz);
773
774             }
775
776             /**************************
777              * CALCULATE INTERACTIONS *
778              **************************/
779
780             if (gmx_mm256_any_lt(rsq31,rcutoff2))
781             {
782
783             r31              = _mm256_mul_pd(rsq31,rinv31);
784
785             /* EWALD ELECTROSTATICS */
786
787             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
788             ewrt             = _mm256_mul_pd(r31,ewtabscale);
789             ewitab           = _mm256_cvttpd_epi32(ewrt);
790             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
791             ewitab           = _mm_slli_epi32(ewitab,2);
792             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
793             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
794             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
795             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
796             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
797             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
798             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
799             velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
800             felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
801
802             d                = _mm256_sub_pd(r31,rswitch);
803             d                = _mm256_max_pd(d,_mm256_setzero_pd());
804             d2               = _mm256_mul_pd(d,d);
805             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
806
807             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
808
809             /* Evaluate switch function */
810             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
811             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv31,_mm256_mul_pd(velec,dsw)) );
812             velec            = _mm256_mul_pd(velec,sw);
813             cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
814
815             /* Update potential sum for this i atom from the interaction with this j atom. */
816             velec            = _mm256_and_pd(velec,cutoff_mask);
817             velecsum         = _mm256_add_pd(velecsum,velec);
818
819             fscal            = felec;
820
821             fscal            = _mm256_and_pd(fscal,cutoff_mask);
822
823             /* Calculate temporary vectorial force */
824             tx               = _mm256_mul_pd(fscal,dx31);
825             ty               = _mm256_mul_pd(fscal,dy31);
826             tz               = _mm256_mul_pd(fscal,dz31);
827
828             /* Update vectorial force */
829             fix3             = _mm256_add_pd(fix3,tx);
830             fiy3             = _mm256_add_pd(fiy3,ty);
831             fiz3             = _mm256_add_pd(fiz3,tz);
832
833             fjx1             = _mm256_add_pd(fjx1,tx);
834             fjy1             = _mm256_add_pd(fjy1,ty);
835             fjz1             = _mm256_add_pd(fjz1,tz);
836
837             }
838
839             /**************************
840              * CALCULATE INTERACTIONS *
841              **************************/
842
843             if (gmx_mm256_any_lt(rsq32,rcutoff2))
844             {
845
846             r32              = _mm256_mul_pd(rsq32,rinv32);
847
848             /* EWALD ELECTROSTATICS */
849
850             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
851             ewrt             = _mm256_mul_pd(r32,ewtabscale);
852             ewitab           = _mm256_cvttpd_epi32(ewrt);
853             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
854             ewitab           = _mm_slli_epi32(ewitab,2);
855             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
856             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
857             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
858             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
859             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
860             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
861             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
862             velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
863             felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
864
865             d                = _mm256_sub_pd(r32,rswitch);
866             d                = _mm256_max_pd(d,_mm256_setzero_pd());
867             d2               = _mm256_mul_pd(d,d);
868             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
869
870             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
871
872             /* Evaluate switch function */
873             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
874             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv32,_mm256_mul_pd(velec,dsw)) );
875             velec            = _mm256_mul_pd(velec,sw);
876             cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
877
878             /* Update potential sum for this i atom from the interaction with this j atom. */
879             velec            = _mm256_and_pd(velec,cutoff_mask);
880             velecsum         = _mm256_add_pd(velecsum,velec);
881
882             fscal            = felec;
883
884             fscal            = _mm256_and_pd(fscal,cutoff_mask);
885
886             /* Calculate temporary vectorial force */
887             tx               = _mm256_mul_pd(fscal,dx32);
888             ty               = _mm256_mul_pd(fscal,dy32);
889             tz               = _mm256_mul_pd(fscal,dz32);
890
891             /* Update vectorial force */
892             fix3             = _mm256_add_pd(fix3,tx);
893             fiy3             = _mm256_add_pd(fiy3,ty);
894             fiz3             = _mm256_add_pd(fiz3,tz);
895
896             fjx2             = _mm256_add_pd(fjx2,tx);
897             fjy2             = _mm256_add_pd(fjy2,ty);
898             fjz2             = _mm256_add_pd(fjz2,tz);
899
900             }
901
902             /**************************
903              * CALCULATE INTERACTIONS *
904              **************************/
905
906             if (gmx_mm256_any_lt(rsq33,rcutoff2))
907             {
908
909             r33              = _mm256_mul_pd(rsq33,rinv33);
910
911             /* EWALD ELECTROSTATICS */
912
913             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
914             ewrt             = _mm256_mul_pd(r33,ewtabscale);
915             ewitab           = _mm256_cvttpd_epi32(ewrt);
916             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
917             ewitab           = _mm_slli_epi32(ewitab,2);
918             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
919             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
920             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
921             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
922             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
923             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
924             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
925             velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
926             felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
927
928             d                = _mm256_sub_pd(r33,rswitch);
929             d                = _mm256_max_pd(d,_mm256_setzero_pd());
930             d2               = _mm256_mul_pd(d,d);
931             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
932
933             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
934
935             /* Evaluate switch function */
936             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
937             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv33,_mm256_mul_pd(velec,dsw)) );
938             velec            = _mm256_mul_pd(velec,sw);
939             cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
940
941             /* Update potential sum for this i atom from the interaction with this j atom. */
942             velec            = _mm256_and_pd(velec,cutoff_mask);
943             velecsum         = _mm256_add_pd(velecsum,velec);
944
945             fscal            = felec;
946
947             fscal            = _mm256_and_pd(fscal,cutoff_mask);
948
949             /* Calculate temporary vectorial force */
950             tx               = _mm256_mul_pd(fscal,dx33);
951             ty               = _mm256_mul_pd(fscal,dy33);
952             tz               = _mm256_mul_pd(fscal,dz33);
953
954             /* Update vectorial force */
955             fix3             = _mm256_add_pd(fix3,tx);
956             fiy3             = _mm256_add_pd(fiy3,ty);
957             fiz3             = _mm256_add_pd(fiz3,tz);
958
959             fjx3             = _mm256_add_pd(fjx3,tx);
960             fjy3             = _mm256_add_pd(fjy3,ty);
961             fjz3             = _mm256_add_pd(fjz3,tz);
962
963             }
964
965             fjptrA             = f+j_coord_offsetA;
966             fjptrB             = f+j_coord_offsetB;
967             fjptrC             = f+j_coord_offsetC;
968             fjptrD             = f+j_coord_offsetD;
969
970             gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
971                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
972                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
973
974             /* Inner loop uses 647 flops */
975         }
976
977         if(jidx<j_index_end)
978         {
979
980             /* Get j neighbor index, and coordinate index */
981             jnrlistA         = jjnr[jidx];
982             jnrlistB         = jjnr[jidx+1];
983             jnrlistC         = jjnr[jidx+2];
984             jnrlistD         = jjnr[jidx+3];
985             /* Sign of each element will be negative for non-real atoms.
986              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
987              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
988              */
989             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
990
991             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
992             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
993             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
994
995             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
996             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
997             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
998             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
999             j_coord_offsetA  = DIM*jnrA;
1000             j_coord_offsetB  = DIM*jnrB;
1001             j_coord_offsetC  = DIM*jnrC;
1002             j_coord_offsetD  = DIM*jnrD;
1003
1004             /* load j atom coordinates */
1005             gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1006                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1007                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1008                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
1009
1010             /* Calculate displacement vector */
1011             dx00             = _mm256_sub_pd(ix0,jx0);
1012             dy00             = _mm256_sub_pd(iy0,jy0);
1013             dz00             = _mm256_sub_pd(iz0,jz0);
1014             dx11             = _mm256_sub_pd(ix1,jx1);
1015             dy11             = _mm256_sub_pd(iy1,jy1);
1016             dz11             = _mm256_sub_pd(iz1,jz1);
1017             dx12             = _mm256_sub_pd(ix1,jx2);
1018             dy12             = _mm256_sub_pd(iy1,jy2);
1019             dz12             = _mm256_sub_pd(iz1,jz2);
1020             dx13             = _mm256_sub_pd(ix1,jx3);
1021             dy13             = _mm256_sub_pd(iy1,jy3);
1022             dz13             = _mm256_sub_pd(iz1,jz3);
1023             dx21             = _mm256_sub_pd(ix2,jx1);
1024             dy21             = _mm256_sub_pd(iy2,jy1);
1025             dz21             = _mm256_sub_pd(iz2,jz1);
1026             dx22             = _mm256_sub_pd(ix2,jx2);
1027             dy22             = _mm256_sub_pd(iy2,jy2);
1028             dz22             = _mm256_sub_pd(iz2,jz2);
1029             dx23             = _mm256_sub_pd(ix2,jx3);
1030             dy23             = _mm256_sub_pd(iy2,jy3);
1031             dz23             = _mm256_sub_pd(iz2,jz3);
1032             dx31             = _mm256_sub_pd(ix3,jx1);
1033             dy31             = _mm256_sub_pd(iy3,jy1);
1034             dz31             = _mm256_sub_pd(iz3,jz1);
1035             dx32             = _mm256_sub_pd(ix3,jx2);
1036             dy32             = _mm256_sub_pd(iy3,jy2);
1037             dz32             = _mm256_sub_pd(iz3,jz2);
1038             dx33             = _mm256_sub_pd(ix3,jx3);
1039             dy33             = _mm256_sub_pd(iy3,jy3);
1040             dz33             = _mm256_sub_pd(iz3,jz3);
1041
1042             /* Calculate squared distance and things based on it */
1043             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1044             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1045             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1046             rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
1047             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1048             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1049             rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
1050             rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
1051             rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
1052             rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
1053
1054             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1055             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
1056             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
1057             rinv13           = gmx_mm256_invsqrt_pd(rsq13);
1058             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
1059             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
1060             rinv23           = gmx_mm256_invsqrt_pd(rsq23);
1061             rinv31           = gmx_mm256_invsqrt_pd(rsq31);
1062             rinv32           = gmx_mm256_invsqrt_pd(rsq32);
1063             rinv33           = gmx_mm256_invsqrt_pd(rsq33);
1064
1065             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1066             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1067             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1068             rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
1069             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1070             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1071             rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
1072             rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
1073             rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
1074             rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
1075
1076             fjx0             = _mm256_setzero_pd();
1077             fjy0             = _mm256_setzero_pd();
1078             fjz0             = _mm256_setzero_pd();
1079             fjx1             = _mm256_setzero_pd();
1080             fjy1             = _mm256_setzero_pd();
1081             fjz1             = _mm256_setzero_pd();
1082             fjx2             = _mm256_setzero_pd();
1083             fjy2             = _mm256_setzero_pd();
1084             fjz2             = _mm256_setzero_pd();
1085             fjx3             = _mm256_setzero_pd();
1086             fjy3             = _mm256_setzero_pd();
1087             fjz3             = _mm256_setzero_pd();
1088
1089             /**************************
1090              * CALCULATE INTERACTIONS *
1091              **************************/
1092
1093             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1094             {
1095
1096             r00              = _mm256_mul_pd(rsq00,rinv00);
1097             r00              = _mm256_andnot_pd(dummy_mask,r00);
1098
1099             /* LENNARD-JONES DISPERSION/REPULSION */
1100
1101             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
1102             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
1103             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
1104             vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
1105             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
1106
1107             d                = _mm256_sub_pd(r00,rswitch);
1108             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1109             d2               = _mm256_mul_pd(d,d);
1110             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1111
1112             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1113
1114             /* Evaluate switch function */
1115             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1116             fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
1117             vvdw             = _mm256_mul_pd(vvdw,sw);
1118             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1119
1120             /* Update potential sum for this i atom from the interaction with this j atom. */
1121             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
1122             vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
1123             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
1124
1125             fscal            = fvdw;
1126
1127             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1128
1129             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1130
1131             /* Calculate temporary vectorial force */
1132             tx               = _mm256_mul_pd(fscal,dx00);
1133             ty               = _mm256_mul_pd(fscal,dy00);
1134             tz               = _mm256_mul_pd(fscal,dz00);
1135
1136             /* Update vectorial force */
1137             fix0             = _mm256_add_pd(fix0,tx);
1138             fiy0             = _mm256_add_pd(fiy0,ty);
1139             fiz0             = _mm256_add_pd(fiz0,tz);
1140
1141             fjx0             = _mm256_add_pd(fjx0,tx);
1142             fjy0             = _mm256_add_pd(fjy0,ty);
1143             fjz0             = _mm256_add_pd(fjz0,tz);
1144
1145             }
1146
1147             /**************************
1148              * CALCULATE INTERACTIONS *
1149              **************************/
1150
1151             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1152             {
1153
1154             r11              = _mm256_mul_pd(rsq11,rinv11);
1155             r11              = _mm256_andnot_pd(dummy_mask,r11);
1156
1157             /* EWALD ELECTROSTATICS */
1158
1159             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1160             ewrt             = _mm256_mul_pd(r11,ewtabscale);
1161             ewitab           = _mm256_cvttpd_epi32(ewrt);
1162             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1163             ewitab           = _mm_slli_epi32(ewitab,2);
1164             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1165             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1166             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1167             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1168             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1169             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1170             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1171             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
1172             felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
1173
1174             d                = _mm256_sub_pd(r11,rswitch);
1175             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1176             d2               = _mm256_mul_pd(d,d);
1177             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1178
1179             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1180
1181             /* Evaluate switch function */
1182             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1183             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
1184             velec            = _mm256_mul_pd(velec,sw);
1185             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
1186
1187             /* Update potential sum for this i atom from the interaction with this j atom. */
1188             velec            = _mm256_and_pd(velec,cutoff_mask);
1189             velec            = _mm256_andnot_pd(dummy_mask,velec);
1190             velecsum         = _mm256_add_pd(velecsum,velec);
1191
1192             fscal            = felec;
1193
1194             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1195
1196             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1197
1198             /* Calculate temporary vectorial force */
1199             tx               = _mm256_mul_pd(fscal,dx11);
1200             ty               = _mm256_mul_pd(fscal,dy11);
1201             tz               = _mm256_mul_pd(fscal,dz11);
1202
1203             /* Update vectorial force */
1204             fix1             = _mm256_add_pd(fix1,tx);
1205             fiy1             = _mm256_add_pd(fiy1,ty);
1206             fiz1             = _mm256_add_pd(fiz1,tz);
1207
1208             fjx1             = _mm256_add_pd(fjx1,tx);
1209             fjy1             = _mm256_add_pd(fjy1,ty);
1210             fjz1             = _mm256_add_pd(fjz1,tz);
1211
1212             }
1213
1214             /**************************
1215              * CALCULATE INTERACTIONS *
1216              **************************/
1217
1218             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1219             {
1220
1221             r12              = _mm256_mul_pd(rsq12,rinv12);
1222             r12              = _mm256_andnot_pd(dummy_mask,r12);
1223
1224             /* EWALD ELECTROSTATICS */
1225
1226             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1227             ewrt             = _mm256_mul_pd(r12,ewtabscale);
1228             ewitab           = _mm256_cvttpd_epi32(ewrt);
1229             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1230             ewitab           = _mm_slli_epi32(ewitab,2);
1231             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1232             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1233             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1234             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1235             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1236             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1237             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1238             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
1239             felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
1240
1241             d                = _mm256_sub_pd(r12,rswitch);
1242             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1243             d2               = _mm256_mul_pd(d,d);
1244             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1245
1246             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1247
1248             /* Evaluate switch function */
1249             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1250             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
1251             velec            = _mm256_mul_pd(velec,sw);
1252             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
1253
1254             /* Update potential sum for this i atom from the interaction with this j atom. */
1255             velec            = _mm256_and_pd(velec,cutoff_mask);
1256             velec            = _mm256_andnot_pd(dummy_mask,velec);
1257             velecsum         = _mm256_add_pd(velecsum,velec);
1258
1259             fscal            = felec;
1260
1261             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1262
1263             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1264
1265             /* Calculate temporary vectorial force */
1266             tx               = _mm256_mul_pd(fscal,dx12);
1267             ty               = _mm256_mul_pd(fscal,dy12);
1268             tz               = _mm256_mul_pd(fscal,dz12);
1269
1270             /* Update vectorial force */
1271             fix1             = _mm256_add_pd(fix1,tx);
1272             fiy1             = _mm256_add_pd(fiy1,ty);
1273             fiz1             = _mm256_add_pd(fiz1,tz);
1274
1275             fjx2             = _mm256_add_pd(fjx2,tx);
1276             fjy2             = _mm256_add_pd(fjy2,ty);
1277             fjz2             = _mm256_add_pd(fjz2,tz);
1278
1279             }
1280
1281             /**************************
1282              * CALCULATE INTERACTIONS *
1283              **************************/
1284
1285             if (gmx_mm256_any_lt(rsq13,rcutoff2))
1286             {
1287
1288             r13              = _mm256_mul_pd(rsq13,rinv13);
1289             r13              = _mm256_andnot_pd(dummy_mask,r13);
1290
1291             /* EWALD ELECTROSTATICS */
1292
1293             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1294             ewrt             = _mm256_mul_pd(r13,ewtabscale);
1295             ewitab           = _mm256_cvttpd_epi32(ewrt);
1296             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1297             ewitab           = _mm_slli_epi32(ewitab,2);
1298             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1299             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1300             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1301             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1302             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1303             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1304             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1305             velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
1306             felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
1307
1308             d                = _mm256_sub_pd(r13,rswitch);
1309             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1310             d2               = _mm256_mul_pd(d,d);
1311             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1312
1313             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1314
1315             /* Evaluate switch function */
1316             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1317             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv13,_mm256_mul_pd(velec,dsw)) );
1318             velec            = _mm256_mul_pd(velec,sw);
1319             cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
1320
1321             /* Update potential sum for this i atom from the interaction with this j atom. */
1322             velec            = _mm256_and_pd(velec,cutoff_mask);
1323             velec            = _mm256_andnot_pd(dummy_mask,velec);
1324             velecsum         = _mm256_add_pd(velecsum,velec);
1325
1326             fscal            = felec;
1327
1328             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1329
1330             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1331
1332             /* Calculate temporary vectorial force */
1333             tx               = _mm256_mul_pd(fscal,dx13);
1334             ty               = _mm256_mul_pd(fscal,dy13);
1335             tz               = _mm256_mul_pd(fscal,dz13);
1336
1337             /* Update vectorial force */
1338             fix1             = _mm256_add_pd(fix1,tx);
1339             fiy1             = _mm256_add_pd(fiy1,ty);
1340             fiz1             = _mm256_add_pd(fiz1,tz);
1341
1342             fjx3             = _mm256_add_pd(fjx3,tx);
1343             fjy3             = _mm256_add_pd(fjy3,ty);
1344             fjz3             = _mm256_add_pd(fjz3,tz);
1345
1346             }
1347
1348             /**************************
1349              * CALCULATE INTERACTIONS *
1350              **************************/
1351
1352             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1353             {
1354
1355             r21              = _mm256_mul_pd(rsq21,rinv21);
1356             r21              = _mm256_andnot_pd(dummy_mask,r21);
1357
1358             /* EWALD ELECTROSTATICS */
1359
1360             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1361             ewrt             = _mm256_mul_pd(r21,ewtabscale);
1362             ewitab           = _mm256_cvttpd_epi32(ewrt);
1363             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1364             ewitab           = _mm_slli_epi32(ewitab,2);
1365             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1366             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1367             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1368             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1369             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1370             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1371             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1372             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
1373             felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
1374
1375             d                = _mm256_sub_pd(r21,rswitch);
1376             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1377             d2               = _mm256_mul_pd(d,d);
1378             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1379
1380             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1381
1382             /* Evaluate switch function */
1383             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1384             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
1385             velec            = _mm256_mul_pd(velec,sw);
1386             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
1387
1388             /* Update potential sum for this i atom from the interaction with this j atom. */
1389             velec            = _mm256_and_pd(velec,cutoff_mask);
1390             velec            = _mm256_andnot_pd(dummy_mask,velec);
1391             velecsum         = _mm256_add_pd(velecsum,velec);
1392
1393             fscal            = felec;
1394
1395             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1396
1397             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1398
1399             /* Calculate temporary vectorial force */
1400             tx               = _mm256_mul_pd(fscal,dx21);
1401             ty               = _mm256_mul_pd(fscal,dy21);
1402             tz               = _mm256_mul_pd(fscal,dz21);
1403
1404             /* Update vectorial force */
1405             fix2             = _mm256_add_pd(fix2,tx);
1406             fiy2             = _mm256_add_pd(fiy2,ty);
1407             fiz2             = _mm256_add_pd(fiz2,tz);
1408
1409             fjx1             = _mm256_add_pd(fjx1,tx);
1410             fjy1             = _mm256_add_pd(fjy1,ty);
1411             fjz1             = _mm256_add_pd(fjz1,tz);
1412
1413             }
1414
1415             /**************************
1416              * CALCULATE INTERACTIONS *
1417              **************************/
1418
1419             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1420             {
1421
1422             r22              = _mm256_mul_pd(rsq22,rinv22);
1423             r22              = _mm256_andnot_pd(dummy_mask,r22);
1424
1425             /* EWALD ELECTROSTATICS */
1426
1427             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1428             ewrt             = _mm256_mul_pd(r22,ewtabscale);
1429             ewitab           = _mm256_cvttpd_epi32(ewrt);
1430             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1431             ewitab           = _mm_slli_epi32(ewitab,2);
1432             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1433             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1434             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1435             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1436             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1437             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1438             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1439             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
1440             felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
1441
1442             d                = _mm256_sub_pd(r22,rswitch);
1443             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1444             d2               = _mm256_mul_pd(d,d);
1445             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1446
1447             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1448
1449             /* Evaluate switch function */
1450             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1451             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
1452             velec            = _mm256_mul_pd(velec,sw);
1453             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
1454
1455             /* Update potential sum for this i atom from the interaction with this j atom. */
1456             velec            = _mm256_and_pd(velec,cutoff_mask);
1457             velec            = _mm256_andnot_pd(dummy_mask,velec);
1458             velecsum         = _mm256_add_pd(velecsum,velec);
1459
1460             fscal            = felec;
1461
1462             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1463
1464             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1465
1466             /* Calculate temporary vectorial force */
1467             tx               = _mm256_mul_pd(fscal,dx22);
1468             ty               = _mm256_mul_pd(fscal,dy22);
1469             tz               = _mm256_mul_pd(fscal,dz22);
1470
1471             /* Update vectorial force */
1472             fix2             = _mm256_add_pd(fix2,tx);
1473             fiy2             = _mm256_add_pd(fiy2,ty);
1474             fiz2             = _mm256_add_pd(fiz2,tz);
1475
1476             fjx2             = _mm256_add_pd(fjx2,tx);
1477             fjy2             = _mm256_add_pd(fjy2,ty);
1478             fjz2             = _mm256_add_pd(fjz2,tz);
1479
1480             }
1481
1482             /**************************
1483              * CALCULATE INTERACTIONS *
1484              **************************/
1485
1486             if (gmx_mm256_any_lt(rsq23,rcutoff2))
1487             {
1488
1489             r23              = _mm256_mul_pd(rsq23,rinv23);
1490             r23              = _mm256_andnot_pd(dummy_mask,r23);
1491
1492             /* EWALD ELECTROSTATICS */
1493
1494             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1495             ewrt             = _mm256_mul_pd(r23,ewtabscale);
1496             ewitab           = _mm256_cvttpd_epi32(ewrt);
1497             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1498             ewitab           = _mm_slli_epi32(ewitab,2);
1499             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1500             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1501             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1502             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1503             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1504             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1505             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1506             velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
1507             felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
1508
1509             d                = _mm256_sub_pd(r23,rswitch);
1510             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1511             d2               = _mm256_mul_pd(d,d);
1512             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1513
1514             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1515
1516             /* Evaluate switch function */
1517             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1518             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv23,_mm256_mul_pd(velec,dsw)) );
1519             velec            = _mm256_mul_pd(velec,sw);
1520             cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
1521
1522             /* Update potential sum for this i atom from the interaction with this j atom. */
1523             velec            = _mm256_and_pd(velec,cutoff_mask);
1524             velec            = _mm256_andnot_pd(dummy_mask,velec);
1525             velecsum         = _mm256_add_pd(velecsum,velec);
1526
1527             fscal            = felec;
1528
1529             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1530
1531             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1532
1533             /* Calculate temporary vectorial force */
1534             tx               = _mm256_mul_pd(fscal,dx23);
1535             ty               = _mm256_mul_pd(fscal,dy23);
1536             tz               = _mm256_mul_pd(fscal,dz23);
1537
1538             /* Update vectorial force */
1539             fix2             = _mm256_add_pd(fix2,tx);
1540             fiy2             = _mm256_add_pd(fiy2,ty);
1541             fiz2             = _mm256_add_pd(fiz2,tz);
1542
1543             fjx3             = _mm256_add_pd(fjx3,tx);
1544             fjy3             = _mm256_add_pd(fjy3,ty);
1545             fjz3             = _mm256_add_pd(fjz3,tz);
1546
1547             }
1548
1549             /**************************
1550              * CALCULATE INTERACTIONS *
1551              **************************/
1552
1553             if (gmx_mm256_any_lt(rsq31,rcutoff2))
1554             {
1555
1556             r31              = _mm256_mul_pd(rsq31,rinv31);
1557             r31              = _mm256_andnot_pd(dummy_mask,r31);
1558
1559             /* EWALD ELECTROSTATICS */
1560
1561             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1562             ewrt             = _mm256_mul_pd(r31,ewtabscale);
1563             ewitab           = _mm256_cvttpd_epi32(ewrt);
1564             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1565             ewitab           = _mm_slli_epi32(ewitab,2);
1566             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1567             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1568             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1569             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1570             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1571             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1572             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1573             velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
1574             felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
1575
1576             d                = _mm256_sub_pd(r31,rswitch);
1577             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1578             d2               = _mm256_mul_pd(d,d);
1579             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1580
1581             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1582
1583             /* Evaluate switch function */
1584             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1585             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv31,_mm256_mul_pd(velec,dsw)) );
1586             velec            = _mm256_mul_pd(velec,sw);
1587             cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
1588
1589             /* Update potential sum for this i atom from the interaction with this j atom. */
1590             velec            = _mm256_and_pd(velec,cutoff_mask);
1591             velec            = _mm256_andnot_pd(dummy_mask,velec);
1592             velecsum         = _mm256_add_pd(velecsum,velec);
1593
1594             fscal            = felec;
1595
1596             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1597
1598             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1599
1600             /* Calculate temporary vectorial force */
1601             tx               = _mm256_mul_pd(fscal,dx31);
1602             ty               = _mm256_mul_pd(fscal,dy31);
1603             tz               = _mm256_mul_pd(fscal,dz31);
1604
1605             /* Update vectorial force */
1606             fix3             = _mm256_add_pd(fix3,tx);
1607             fiy3             = _mm256_add_pd(fiy3,ty);
1608             fiz3             = _mm256_add_pd(fiz3,tz);
1609
1610             fjx1             = _mm256_add_pd(fjx1,tx);
1611             fjy1             = _mm256_add_pd(fjy1,ty);
1612             fjz1             = _mm256_add_pd(fjz1,tz);
1613
1614             }
1615
1616             /**************************
1617              * CALCULATE INTERACTIONS *
1618              **************************/
1619
1620             if (gmx_mm256_any_lt(rsq32,rcutoff2))
1621             {
1622
1623             r32              = _mm256_mul_pd(rsq32,rinv32);
1624             r32              = _mm256_andnot_pd(dummy_mask,r32);
1625
1626             /* EWALD ELECTROSTATICS */
1627
1628             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1629             ewrt             = _mm256_mul_pd(r32,ewtabscale);
1630             ewitab           = _mm256_cvttpd_epi32(ewrt);
1631             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1632             ewitab           = _mm_slli_epi32(ewitab,2);
1633             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1634             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1635             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1636             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1637             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1638             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1639             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1640             velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
1641             felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
1642
1643             d                = _mm256_sub_pd(r32,rswitch);
1644             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1645             d2               = _mm256_mul_pd(d,d);
1646             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1647
1648             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1649
1650             /* Evaluate switch function */
1651             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1652             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv32,_mm256_mul_pd(velec,dsw)) );
1653             velec            = _mm256_mul_pd(velec,sw);
1654             cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
1655
1656             /* Update potential sum for this i atom from the interaction with this j atom. */
1657             velec            = _mm256_and_pd(velec,cutoff_mask);
1658             velec            = _mm256_andnot_pd(dummy_mask,velec);
1659             velecsum         = _mm256_add_pd(velecsum,velec);
1660
1661             fscal            = felec;
1662
1663             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1664
1665             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1666
1667             /* Calculate temporary vectorial force */
1668             tx               = _mm256_mul_pd(fscal,dx32);
1669             ty               = _mm256_mul_pd(fscal,dy32);
1670             tz               = _mm256_mul_pd(fscal,dz32);
1671
1672             /* Update vectorial force */
1673             fix3             = _mm256_add_pd(fix3,tx);
1674             fiy3             = _mm256_add_pd(fiy3,ty);
1675             fiz3             = _mm256_add_pd(fiz3,tz);
1676
1677             fjx2             = _mm256_add_pd(fjx2,tx);
1678             fjy2             = _mm256_add_pd(fjy2,ty);
1679             fjz2             = _mm256_add_pd(fjz2,tz);
1680
1681             }
1682
1683             /**************************
1684              * CALCULATE INTERACTIONS *
1685              **************************/
1686
1687             if (gmx_mm256_any_lt(rsq33,rcutoff2))
1688             {
1689
1690             r33              = _mm256_mul_pd(rsq33,rinv33);
1691             r33              = _mm256_andnot_pd(dummy_mask,r33);
1692
1693             /* EWALD ELECTROSTATICS */
1694
1695             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1696             ewrt             = _mm256_mul_pd(r33,ewtabscale);
1697             ewitab           = _mm256_cvttpd_epi32(ewrt);
1698             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1699             ewitab           = _mm_slli_epi32(ewitab,2);
1700             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1701             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1702             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1703             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1704             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1705             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1706             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1707             velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
1708             felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
1709
1710             d                = _mm256_sub_pd(r33,rswitch);
1711             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1712             d2               = _mm256_mul_pd(d,d);
1713             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1714
1715             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1716
1717             /* Evaluate switch function */
1718             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1719             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv33,_mm256_mul_pd(velec,dsw)) );
1720             velec            = _mm256_mul_pd(velec,sw);
1721             cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
1722
1723             /* Update potential sum for this i atom from the interaction with this j atom. */
1724             velec            = _mm256_and_pd(velec,cutoff_mask);
1725             velec            = _mm256_andnot_pd(dummy_mask,velec);
1726             velecsum         = _mm256_add_pd(velecsum,velec);
1727
1728             fscal            = felec;
1729
1730             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1731
1732             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1733
1734             /* Calculate temporary vectorial force */
1735             tx               = _mm256_mul_pd(fscal,dx33);
1736             ty               = _mm256_mul_pd(fscal,dy33);
1737             tz               = _mm256_mul_pd(fscal,dz33);
1738
1739             /* Update vectorial force */
1740             fix3             = _mm256_add_pd(fix3,tx);
1741             fiy3             = _mm256_add_pd(fiy3,ty);
1742             fiz3             = _mm256_add_pd(fiz3,tz);
1743
1744             fjx3             = _mm256_add_pd(fjx3,tx);
1745             fjy3             = _mm256_add_pd(fjy3,ty);
1746             fjz3             = _mm256_add_pd(fjz3,tz);
1747
1748             }
1749
1750             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1751             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1752             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1753             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1754
1755             gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1756                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
1757                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
1758
1759             /* Inner loop uses 657 flops */
1760         }
1761
1762         /* End of innermost loop */
1763
1764         gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1765                                                  f+i_coord_offset,fshift+i_shift_offset);
1766
1767         ggid                        = gid[iidx];
1768         /* Update potential energies */
1769         gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1770         gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
1771
1772         /* Increment number of inner iterations */
1773         inneriter                  += j_index_end - j_index_start;
1774
1775         /* Outer loop uses 26 flops */
1776     }
1777
1778     /* Increment number of outer iterations */
1779     outeriter        += nri;
1780
1781     /* Update outer/inner flops */
1782
1783     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_VF,outeriter*26 + inneriter*657);
1784 }
1785 /*
1786  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_F_avx_256_double
1787  * Electrostatics interaction: Ewald
1788  * VdW interaction:            LennardJones
1789  * Geometry:                   Water4-Water4
1790  * Calculate force/pot:        Force
1791  */
1792 void
1793 nb_kernel_ElecEwSw_VdwLJSw_GeomW4W4_F_avx_256_double
1794                     (t_nblist                    * gmx_restrict       nlist,
1795                      rvec                        * gmx_restrict          xx,
1796                      rvec                        * gmx_restrict          ff,
1797                      t_forcerec                  * gmx_restrict          fr,
1798                      t_mdatoms                   * gmx_restrict     mdatoms,
1799                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1800                      t_nrnb                      * gmx_restrict        nrnb)
1801 {
1802     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1803      * just 0 for non-waters.
1804      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1805      * jnr indices corresponding to data put in the four positions in the SIMD register.
1806      */
1807     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1808     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1809     int              jnrA,jnrB,jnrC,jnrD;
1810     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1811     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1812     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1813     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1814     real             rcutoff_scalar;
1815     real             *shiftvec,*fshift,*x,*f;
1816     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1817     real             scratch[4*DIM];
1818     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1819     real *           vdwioffsetptr0;
1820     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1821     real *           vdwioffsetptr1;
1822     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1823     real *           vdwioffsetptr2;
1824     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1825     real *           vdwioffsetptr3;
1826     __m256d          ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
1827     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
1828     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1829     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1830     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1831     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1832     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1833     int              vdwjidx3A,vdwjidx3B,vdwjidx3C,vdwjidx3D;
1834     __m256d          jx3,jy3,jz3,fjx3,fjy3,fjz3,jq3,isaj3;
1835     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1836     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1837     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1838     __m256d          dx13,dy13,dz13,rsq13,rinv13,rinvsq13,r13,qq13,c6_13,c12_13;
1839     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1840     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1841     __m256d          dx23,dy23,dz23,rsq23,rinv23,rinvsq23,r23,qq23,c6_23,c12_23;
1842     __m256d          dx31,dy31,dz31,rsq31,rinv31,rinvsq31,r31,qq31,c6_31,c12_31;
1843     __m256d          dx32,dy32,dz32,rsq32,rinv32,rinvsq32,r32,qq32,c6_32,c12_32;
1844     __m256d          dx33,dy33,dz33,rsq33,rinv33,rinvsq33,r33,qq33,c6_33,c12_33;
1845     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
1846     real             *charge;
1847     int              nvdwtype;
1848     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1849     int              *vdwtype;
1850     real             *vdwparam;
1851     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
1852     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
1853     __m128i          ewitab;
1854     __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1855     __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1856     real             *ewtab;
1857     __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
1858     real             rswitch_scalar,d_scalar;
1859     __m256d          dummy_mask,cutoff_mask;
1860     __m128           tmpmask0,tmpmask1;
1861     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1862     __m256d          one     = _mm256_set1_pd(1.0);
1863     __m256d          two     = _mm256_set1_pd(2.0);
1864     x                = xx[0];
1865     f                = ff[0];
1866
1867     nri              = nlist->nri;
1868     iinr             = nlist->iinr;
1869     jindex           = nlist->jindex;
1870     jjnr             = nlist->jjnr;
1871     shiftidx         = nlist->shift;
1872     gid              = nlist->gid;
1873     shiftvec         = fr->shift_vec[0];
1874     fshift           = fr->fshift[0];
1875     facel            = _mm256_set1_pd(fr->epsfac);
1876     charge           = mdatoms->chargeA;
1877     nvdwtype         = fr->ntype;
1878     vdwparam         = fr->nbfp;
1879     vdwtype          = mdatoms->typeA;
1880
1881     sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
1882     beta             = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
1883     beta2            = _mm256_mul_pd(beta,beta);
1884     beta3            = _mm256_mul_pd(beta,beta2);
1885
1886     ewtab            = fr->ic->tabq_coul_FDV0;
1887     ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
1888     ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
1889
1890     /* Setup water-specific parameters */
1891     inr              = nlist->iinr[0];
1892     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1893     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1894     iq3              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+3]));
1895     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1896
1897     jq1              = _mm256_set1_pd(charge[inr+1]);
1898     jq2              = _mm256_set1_pd(charge[inr+2]);
1899     jq3              = _mm256_set1_pd(charge[inr+3]);
1900     vdwjidx0A        = 2*vdwtype[inr+0];
1901     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
1902     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
1903     qq11             = _mm256_mul_pd(iq1,jq1);
1904     qq12             = _mm256_mul_pd(iq1,jq2);
1905     qq13             = _mm256_mul_pd(iq1,jq3);
1906     qq21             = _mm256_mul_pd(iq2,jq1);
1907     qq22             = _mm256_mul_pd(iq2,jq2);
1908     qq23             = _mm256_mul_pd(iq2,jq3);
1909     qq31             = _mm256_mul_pd(iq3,jq1);
1910     qq32             = _mm256_mul_pd(iq3,jq2);
1911     qq33             = _mm256_mul_pd(iq3,jq3);
1912
1913     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1914     rcutoff_scalar   = fr->rcoulomb;
1915     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
1916     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
1917
1918     rswitch_scalar   = fr->rcoulomb_switch;
1919     rswitch          = _mm256_set1_pd(rswitch_scalar);
1920     /* Setup switch parameters */
1921     d_scalar         = rcutoff_scalar-rswitch_scalar;
1922     d                = _mm256_set1_pd(d_scalar);
1923     swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
1924     swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1925     swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1926     swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
1927     swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1928     swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1929
1930     /* Avoid stupid compiler warnings */
1931     jnrA = jnrB = jnrC = jnrD = 0;
1932     j_coord_offsetA = 0;
1933     j_coord_offsetB = 0;
1934     j_coord_offsetC = 0;
1935     j_coord_offsetD = 0;
1936
1937     outeriter        = 0;
1938     inneriter        = 0;
1939
1940     for(iidx=0;iidx<4*DIM;iidx++)
1941     {
1942         scratch[iidx] = 0.0;
1943     }
1944
1945     /* Start outer loop over neighborlists */
1946     for(iidx=0; iidx<nri; iidx++)
1947     {
1948         /* Load shift vector for this list */
1949         i_shift_offset   = DIM*shiftidx[iidx];
1950
1951         /* Load limits for loop over neighbors */
1952         j_index_start    = jindex[iidx];
1953         j_index_end      = jindex[iidx+1];
1954
1955         /* Get outer coordinate index */
1956         inr              = iinr[iidx];
1957         i_coord_offset   = DIM*inr;
1958
1959         /* Load i particle coords and add shift vector */
1960         gmx_mm256_load_shift_and_4rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1961                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1962
1963         fix0             = _mm256_setzero_pd();
1964         fiy0             = _mm256_setzero_pd();
1965         fiz0             = _mm256_setzero_pd();
1966         fix1             = _mm256_setzero_pd();
1967         fiy1             = _mm256_setzero_pd();
1968         fiz1             = _mm256_setzero_pd();
1969         fix2             = _mm256_setzero_pd();
1970         fiy2             = _mm256_setzero_pd();
1971         fiz2             = _mm256_setzero_pd();
1972         fix3             = _mm256_setzero_pd();
1973         fiy3             = _mm256_setzero_pd();
1974         fiz3             = _mm256_setzero_pd();
1975
1976         /* Start inner kernel loop */
1977         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1978         {
1979
1980             /* Get j neighbor index, and coordinate index */
1981             jnrA             = jjnr[jidx];
1982             jnrB             = jjnr[jidx+1];
1983             jnrC             = jjnr[jidx+2];
1984             jnrD             = jjnr[jidx+3];
1985             j_coord_offsetA  = DIM*jnrA;
1986             j_coord_offsetB  = DIM*jnrB;
1987             j_coord_offsetC  = DIM*jnrC;
1988             j_coord_offsetD  = DIM*jnrD;
1989
1990             /* load j atom coordinates */
1991             gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1992                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1993                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
1994                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
1995
1996             /* Calculate displacement vector */
1997             dx00             = _mm256_sub_pd(ix0,jx0);
1998             dy00             = _mm256_sub_pd(iy0,jy0);
1999             dz00             = _mm256_sub_pd(iz0,jz0);
2000             dx11             = _mm256_sub_pd(ix1,jx1);
2001             dy11             = _mm256_sub_pd(iy1,jy1);
2002             dz11             = _mm256_sub_pd(iz1,jz1);
2003             dx12             = _mm256_sub_pd(ix1,jx2);
2004             dy12             = _mm256_sub_pd(iy1,jy2);
2005             dz12             = _mm256_sub_pd(iz1,jz2);
2006             dx13             = _mm256_sub_pd(ix1,jx3);
2007             dy13             = _mm256_sub_pd(iy1,jy3);
2008             dz13             = _mm256_sub_pd(iz1,jz3);
2009             dx21             = _mm256_sub_pd(ix2,jx1);
2010             dy21             = _mm256_sub_pd(iy2,jy1);
2011             dz21             = _mm256_sub_pd(iz2,jz1);
2012             dx22             = _mm256_sub_pd(ix2,jx2);
2013             dy22             = _mm256_sub_pd(iy2,jy2);
2014             dz22             = _mm256_sub_pd(iz2,jz2);
2015             dx23             = _mm256_sub_pd(ix2,jx3);
2016             dy23             = _mm256_sub_pd(iy2,jy3);
2017             dz23             = _mm256_sub_pd(iz2,jz3);
2018             dx31             = _mm256_sub_pd(ix3,jx1);
2019             dy31             = _mm256_sub_pd(iy3,jy1);
2020             dz31             = _mm256_sub_pd(iz3,jz1);
2021             dx32             = _mm256_sub_pd(ix3,jx2);
2022             dy32             = _mm256_sub_pd(iy3,jy2);
2023             dz32             = _mm256_sub_pd(iz3,jz2);
2024             dx33             = _mm256_sub_pd(ix3,jx3);
2025             dy33             = _mm256_sub_pd(iy3,jy3);
2026             dz33             = _mm256_sub_pd(iz3,jz3);
2027
2028             /* Calculate squared distance and things based on it */
2029             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
2030             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
2031             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
2032             rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
2033             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
2034             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
2035             rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
2036             rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
2037             rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
2038             rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
2039
2040             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
2041             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
2042             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
2043             rinv13           = gmx_mm256_invsqrt_pd(rsq13);
2044             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
2045             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
2046             rinv23           = gmx_mm256_invsqrt_pd(rsq23);
2047             rinv31           = gmx_mm256_invsqrt_pd(rsq31);
2048             rinv32           = gmx_mm256_invsqrt_pd(rsq32);
2049             rinv33           = gmx_mm256_invsqrt_pd(rsq33);
2050
2051             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
2052             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
2053             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
2054             rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
2055             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
2056             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
2057             rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
2058             rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
2059             rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
2060             rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
2061
2062             fjx0             = _mm256_setzero_pd();
2063             fjy0             = _mm256_setzero_pd();
2064             fjz0             = _mm256_setzero_pd();
2065             fjx1             = _mm256_setzero_pd();
2066             fjy1             = _mm256_setzero_pd();
2067             fjz1             = _mm256_setzero_pd();
2068             fjx2             = _mm256_setzero_pd();
2069             fjy2             = _mm256_setzero_pd();
2070             fjz2             = _mm256_setzero_pd();
2071             fjx3             = _mm256_setzero_pd();
2072             fjy3             = _mm256_setzero_pd();
2073             fjz3             = _mm256_setzero_pd();
2074
2075             /**************************
2076              * CALCULATE INTERACTIONS *
2077              **************************/
2078
2079             if (gmx_mm256_any_lt(rsq00,rcutoff2))
2080             {
2081
2082             r00              = _mm256_mul_pd(rsq00,rinv00);
2083
2084             /* LENNARD-JONES DISPERSION/REPULSION */
2085
2086             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
2087             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
2088             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
2089             vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
2090             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
2091
2092             d                = _mm256_sub_pd(r00,rswitch);
2093             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2094             d2               = _mm256_mul_pd(d,d);
2095             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2096
2097             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2098
2099             /* Evaluate switch function */
2100             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2101             fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
2102             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
2103
2104             fscal            = fvdw;
2105
2106             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2107
2108             /* Calculate temporary vectorial force */
2109             tx               = _mm256_mul_pd(fscal,dx00);
2110             ty               = _mm256_mul_pd(fscal,dy00);
2111             tz               = _mm256_mul_pd(fscal,dz00);
2112
2113             /* Update vectorial force */
2114             fix0             = _mm256_add_pd(fix0,tx);
2115             fiy0             = _mm256_add_pd(fiy0,ty);
2116             fiz0             = _mm256_add_pd(fiz0,tz);
2117
2118             fjx0             = _mm256_add_pd(fjx0,tx);
2119             fjy0             = _mm256_add_pd(fjy0,ty);
2120             fjz0             = _mm256_add_pd(fjz0,tz);
2121
2122             }
2123
2124             /**************************
2125              * CALCULATE INTERACTIONS *
2126              **************************/
2127
2128             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2129             {
2130
2131             r11              = _mm256_mul_pd(rsq11,rinv11);
2132
2133             /* EWALD ELECTROSTATICS */
2134
2135             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2136             ewrt             = _mm256_mul_pd(r11,ewtabscale);
2137             ewitab           = _mm256_cvttpd_epi32(ewrt);
2138             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2139             ewitab           = _mm_slli_epi32(ewitab,2);
2140             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2141             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2142             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2143             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2144             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2145             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2146             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2147             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
2148             felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
2149
2150             d                = _mm256_sub_pd(r11,rswitch);
2151             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2152             d2               = _mm256_mul_pd(d,d);
2153             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2154
2155             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2156
2157             /* Evaluate switch function */
2158             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2159             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
2160             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
2161
2162             fscal            = felec;
2163
2164             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2165
2166             /* Calculate temporary vectorial force */
2167             tx               = _mm256_mul_pd(fscal,dx11);
2168             ty               = _mm256_mul_pd(fscal,dy11);
2169             tz               = _mm256_mul_pd(fscal,dz11);
2170
2171             /* Update vectorial force */
2172             fix1             = _mm256_add_pd(fix1,tx);
2173             fiy1             = _mm256_add_pd(fiy1,ty);
2174             fiz1             = _mm256_add_pd(fiz1,tz);
2175
2176             fjx1             = _mm256_add_pd(fjx1,tx);
2177             fjy1             = _mm256_add_pd(fjy1,ty);
2178             fjz1             = _mm256_add_pd(fjz1,tz);
2179
2180             }
2181
2182             /**************************
2183              * CALCULATE INTERACTIONS *
2184              **************************/
2185
2186             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2187             {
2188
2189             r12              = _mm256_mul_pd(rsq12,rinv12);
2190
2191             /* EWALD ELECTROSTATICS */
2192
2193             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2194             ewrt             = _mm256_mul_pd(r12,ewtabscale);
2195             ewitab           = _mm256_cvttpd_epi32(ewrt);
2196             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2197             ewitab           = _mm_slli_epi32(ewitab,2);
2198             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2199             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2200             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2201             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2202             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2203             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2204             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2205             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
2206             felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
2207
2208             d                = _mm256_sub_pd(r12,rswitch);
2209             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2210             d2               = _mm256_mul_pd(d,d);
2211             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2212
2213             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2214
2215             /* Evaluate switch function */
2216             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2217             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
2218             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2219
2220             fscal            = felec;
2221
2222             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2223
2224             /* Calculate temporary vectorial force */
2225             tx               = _mm256_mul_pd(fscal,dx12);
2226             ty               = _mm256_mul_pd(fscal,dy12);
2227             tz               = _mm256_mul_pd(fscal,dz12);
2228
2229             /* Update vectorial force */
2230             fix1             = _mm256_add_pd(fix1,tx);
2231             fiy1             = _mm256_add_pd(fiy1,ty);
2232             fiz1             = _mm256_add_pd(fiz1,tz);
2233
2234             fjx2             = _mm256_add_pd(fjx2,tx);
2235             fjy2             = _mm256_add_pd(fjy2,ty);
2236             fjz2             = _mm256_add_pd(fjz2,tz);
2237
2238             }
2239
2240             /**************************
2241              * CALCULATE INTERACTIONS *
2242              **************************/
2243
2244             if (gmx_mm256_any_lt(rsq13,rcutoff2))
2245             {
2246
2247             r13              = _mm256_mul_pd(rsq13,rinv13);
2248
2249             /* EWALD ELECTROSTATICS */
2250
2251             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2252             ewrt             = _mm256_mul_pd(r13,ewtabscale);
2253             ewitab           = _mm256_cvttpd_epi32(ewrt);
2254             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2255             ewitab           = _mm_slli_epi32(ewitab,2);
2256             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2257             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2258             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2259             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2260             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2261             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2262             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2263             velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
2264             felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
2265
2266             d                = _mm256_sub_pd(r13,rswitch);
2267             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2268             d2               = _mm256_mul_pd(d,d);
2269             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2270
2271             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2272
2273             /* Evaluate switch function */
2274             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2275             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv13,_mm256_mul_pd(velec,dsw)) );
2276             cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
2277
2278             fscal            = felec;
2279
2280             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2281
2282             /* Calculate temporary vectorial force */
2283             tx               = _mm256_mul_pd(fscal,dx13);
2284             ty               = _mm256_mul_pd(fscal,dy13);
2285             tz               = _mm256_mul_pd(fscal,dz13);
2286
2287             /* Update vectorial force */
2288             fix1             = _mm256_add_pd(fix1,tx);
2289             fiy1             = _mm256_add_pd(fiy1,ty);
2290             fiz1             = _mm256_add_pd(fiz1,tz);
2291
2292             fjx3             = _mm256_add_pd(fjx3,tx);
2293             fjy3             = _mm256_add_pd(fjy3,ty);
2294             fjz3             = _mm256_add_pd(fjz3,tz);
2295
2296             }
2297
2298             /**************************
2299              * CALCULATE INTERACTIONS *
2300              **************************/
2301
2302             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2303             {
2304
2305             r21              = _mm256_mul_pd(rsq21,rinv21);
2306
2307             /* EWALD ELECTROSTATICS */
2308
2309             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2310             ewrt             = _mm256_mul_pd(r21,ewtabscale);
2311             ewitab           = _mm256_cvttpd_epi32(ewrt);
2312             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2313             ewitab           = _mm_slli_epi32(ewitab,2);
2314             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2315             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2316             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2317             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2318             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2319             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2320             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2321             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
2322             felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
2323
2324             d                = _mm256_sub_pd(r21,rswitch);
2325             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2326             d2               = _mm256_mul_pd(d,d);
2327             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2328
2329             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2330
2331             /* Evaluate switch function */
2332             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2333             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
2334             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
2335
2336             fscal            = felec;
2337
2338             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2339
2340             /* Calculate temporary vectorial force */
2341             tx               = _mm256_mul_pd(fscal,dx21);
2342             ty               = _mm256_mul_pd(fscal,dy21);
2343             tz               = _mm256_mul_pd(fscal,dz21);
2344
2345             /* Update vectorial force */
2346             fix2             = _mm256_add_pd(fix2,tx);
2347             fiy2             = _mm256_add_pd(fiy2,ty);
2348             fiz2             = _mm256_add_pd(fiz2,tz);
2349
2350             fjx1             = _mm256_add_pd(fjx1,tx);
2351             fjy1             = _mm256_add_pd(fjy1,ty);
2352             fjz1             = _mm256_add_pd(fjz1,tz);
2353
2354             }
2355
2356             /**************************
2357              * CALCULATE INTERACTIONS *
2358              **************************/
2359
2360             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2361             {
2362
2363             r22              = _mm256_mul_pd(rsq22,rinv22);
2364
2365             /* EWALD ELECTROSTATICS */
2366
2367             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2368             ewrt             = _mm256_mul_pd(r22,ewtabscale);
2369             ewitab           = _mm256_cvttpd_epi32(ewrt);
2370             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2371             ewitab           = _mm_slli_epi32(ewitab,2);
2372             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2373             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2374             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2375             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2376             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2377             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2378             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2379             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
2380             felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
2381
2382             d                = _mm256_sub_pd(r22,rswitch);
2383             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2384             d2               = _mm256_mul_pd(d,d);
2385             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2386
2387             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2388
2389             /* Evaluate switch function */
2390             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2391             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
2392             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
2393
2394             fscal            = felec;
2395
2396             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2397
2398             /* Calculate temporary vectorial force */
2399             tx               = _mm256_mul_pd(fscal,dx22);
2400             ty               = _mm256_mul_pd(fscal,dy22);
2401             tz               = _mm256_mul_pd(fscal,dz22);
2402
2403             /* Update vectorial force */
2404             fix2             = _mm256_add_pd(fix2,tx);
2405             fiy2             = _mm256_add_pd(fiy2,ty);
2406             fiz2             = _mm256_add_pd(fiz2,tz);
2407
2408             fjx2             = _mm256_add_pd(fjx2,tx);
2409             fjy2             = _mm256_add_pd(fjy2,ty);
2410             fjz2             = _mm256_add_pd(fjz2,tz);
2411
2412             }
2413
2414             /**************************
2415              * CALCULATE INTERACTIONS *
2416              **************************/
2417
2418             if (gmx_mm256_any_lt(rsq23,rcutoff2))
2419             {
2420
2421             r23              = _mm256_mul_pd(rsq23,rinv23);
2422
2423             /* EWALD ELECTROSTATICS */
2424
2425             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2426             ewrt             = _mm256_mul_pd(r23,ewtabscale);
2427             ewitab           = _mm256_cvttpd_epi32(ewrt);
2428             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2429             ewitab           = _mm_slli_epi32(ewitab,2);
2430             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2431             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2432             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2433             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2434             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2435             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2436             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2437             velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
2438             felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
2439
2440             d                = _mm256_sub_pd(r23,rswitch);
2441             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2442             d2               = _mm256_mul_pd(d,d);
2443             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2444
2445             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2446
2447             /* Evaluate switch function */
2448             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2449             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv23,_mm256_mul_pd(velec,dsw)) );
2450             cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
2451
2452             fscal            = felec;
2453
2454             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2455
2456             /* Calculate temporary vectorial force */
2457             tx               = _mm256_mul_pd(fscal,dx23);
2458             ty               = _mm256_mul_pd(fscal,dy23);
2459             tz               = _mm256_mul_pd(fscal,dz23);
2460
2461             /* Update vectorial force */
2462             fix2             = _mm256_add_pd(fix2,tx);
2463             fiy2             = _mm256_add_pd(fiy2,ty);
2464             fiz2             = _mm256_add_pd(fiz2,tz);
2465
2466             fjx3             = _mm256_add_pd(fjx3,tx);
2467             fjy3             = _mm256_add_pd(fjy3,ty);
2468             fjz3             = _mm256_add_pd(fjz3,tz);
2469
2470             }
2471
2472             /**************************
2473              * CALCULATE INTERACTIONS *
2474              **************************/
2475
2476             if (gmx_mm256_any_lt(rsq31,rcutoff2))
2477             {
2478
2479             r31              = _mm256_mul_pd(rsq31,rinv31);
2480
2481             /* EWALD ELECTROSTATICS */
2482
2483             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2484             ewrt             = _mm256_mul_pd(r31,ewtabscale);
2485             ewitab           = _mm256_cvttpd_epi32(ewrt);
2486             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2487             ewitab           = _mm_slli_epi32(ewitab,2);
2488             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2489             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2490             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2491             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2492             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2493             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2494             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2495             velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
2496             felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
2497
2498             d                = _mm256_sub_pd(r31,rswitch);
2499             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2500             d2               = _mm256_mul_pd(d,d);
2501             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2502
2503             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2504
2505             /* Evaluate switch function */
2506             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2507             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv31,_mm256_mul_pd(velec,dsw)) );
2508             cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
2509
2510             fscal            = felec;
2511
2512             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2513
2514             /* Calculate temporary vectorial force */
2515             tx               = _mm256_mul_pd(fscal,dx31);
2516             ty               = _mm256_mul_pd(fscal,dy31);
2517             tz               = _mm256_mul_pd(fscal,dz31);
2518
2519             /* Update vectorial force */
2520             fix3             = _mm256_add_pd(fix3,tx);
2521             fiy3             = _mm256_add_pd(fiy3,ty);
2522             fiz3             = _mm256_add_pd(fiz3,tz);
2523
2524             fjx1             = _mm256_add_pd(fjx1,tx);
2525             fjy1             = _mm256_add_pd(fjy1,ty);
2526             fjz1             = _mm256_add_pd(fjz1,tz);
2527
2528             }
2529
2530             /**************************
2531              * CALCULATE INTERACTIONS *
2532              **************************/
2533
2534             if (gmx_mm256_any_lt(rsq32,rcutoff2))
2535             {
2536
2537             r32              = _mm256_mul_pd(rsq32,rinv32);
2538
2539             /* EWALD ELECTROSTATICS */
2540
2541             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2542             ewrt             = _mm256_mul_pd(r32,ewtabscale);
2543             ewitab           = _mm256_cvttpd_epi32(ewrt);
2544             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2545             ewitab           = _mm_slli_epi32(ewitab,2);
2546             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2547             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2548             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2549             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2550             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2551             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2552             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2553             velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
2554             felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
2555
2556             d                = _mm256_sub_pd(r32,rswitch);
2557             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2558             d2               = _mm256_mul_pd(d,d);
2559             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2560
2561             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2562
2563             /* Evaluate switch function */
2564             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2565             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv32,_mm256_mul_pd(velec,dsw)) );
2566             cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
2567
2568             fscal            = felec;
2569
2570             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2571
2572             /* Calculate temporary vectorial force */
2573             tx               = _mm256_mul_pd(fscal,dx32);
2574             ty               = _mm256_mul_pd(fscal,dy32);
2575             tz               = _mm256_mul_pd(fscal,dz32);
2576
2577             /* Update vectorial force */
2578             fix3             = _mm256_add_pd(fix3,tx);
2579             fiy3             = _mm256_add_pd(fiy3,ty);
2580             fiz3             = _mm256_add_pd(fiz3,tz);
2581
2582             fjx2             = _mm256_add_pd(fjx2,tx);
2583             fjy2             = _mm256_add_pd(fjy2,ty);
2584             fjz2             = _mm256_add_pd(fjz2,tz);
2585
2586             }
2587
2588             /**************************
2589              * CALCULATE INTERACTIONS *
2590              **************************/
2591
2592             if (gmx_mm256_any_lt(rsq33,rcutoff2))
2593             {
2594
2595             r33              = _mm256_mul_pd(rsq33,rinv33);
2596
2597             /* EWALD ELECTROSTATICS */
2598
2599             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2600             ewrt             = _mm256_mul_pd(r33,ewtabscale);
2601             ewitab           = _mm256_cvttpd_epi32(ewrt);
2602             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2603             ewitab           = _mm_slli_epi32(ewitab,2);
2604             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2605             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2606             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2607             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2608             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2609             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2610             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2611             velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
2612             felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
2613
2614             d                = _mm256_sub_pd(r33,rswitch);
2615             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2616             d2               = _mm256_mul_pd(d,d);
2617             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2618
2619             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2620
2621             /* Evaluate switch function */
2622             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2623             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv33,_mm256_mul_pd(velec,dsw)) );
2624             cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
2625
2626             fscal            = felec;
2627
2628             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2629
2630             /* Calculate temporary vectorial force */
2631             tx               = _mm256_mul_pd(fscal,dx33);
2632             ty               = _mm256_mul_pd(fscal,dy33);
2633             tz               = _mm256_mul_pd(fscal,dz33);
2634
2635             /* Update vectorial force */
2636             fix3             = _mm256_add_pd(fix3,tx);
2637             fiy3             = _mm256_add_pd(fiy3,ty);
2638             fiz3             = _mm256_add_pd(fiz3,tz);
2639
2640             fjx3             = _mm256_add_pd(fjx3,tx);
2641             fjy3             = _mm256_add_pd(fjy3,ty);
2642             fjz3             = _mm256_add_pd(fjz3,tz);
2643
2644             }
2645
2646             fjptrA             = f+j_coord_offsetA;
2647             fjptrB             = f+j_coord_offsetB;
2648             fjptrC             = f+j_coord_offsetC;
2649             fjptrD             = f+j_coord_offsetD;
2650
2651             gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
2652                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
2653                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
2654
2655             /* Inner loop uses 617 flops */
2656         }
2657
2658         if(jidx<j_index_end)
2659         {
2660
2661             /* Get j neighbor index, and coordinate index */
2662             jnrlistA         = jjnr[jidx];
2663             jnrlistB         = jjnr[jidx+1];
2664             jnrlistC         = jjnr[jidx+2];
2665             jnrlistD         = jjnr[jidx+3];
2666             /* Sign of each element will be negative for non-real atoms.
2667              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2668              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
2669              */
2670             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
2671
2672             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
2673             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
2674             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
2675
2676             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
2677             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
2678             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
2679             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
2680             j_coord_offsetA  = DIM*jnrA;
2681             j_coord_offsetB  = DIM*jnrB;
2682             j_coord_offsetC  = DIM*jnrC;
2683             j_coord_offsetD  = DIM*jnrD;
2684
2685             /* load j atom coordinates */
2686             gmx_mm256_load_4rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
2687                                                  x+j_coord_offsetC,x+j_coord_offsetD,
2688                                                  &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,
2689                                                  &jy2,&jz2,&jx3,&jy3,&jz3);
2690
2691             /* Calculate displacement vector */
2692             dx00             = _mm256_sub_pd(ix0,jx0);
2693             dy00             = _mm256_sub_pd(iy0,jy0);
2694             dz00             = _mm256_sub_pd(iz0,jz0);
2695             dx11             = _mm256_sub_pd(ix1,jx1);
2696             dy11             = _mm256_sub_pd(iy1,jy1);
2697             dz11             = _mm256_sub_pd(iz1,jz1);
2698             dx12             = _mm256_sub_pd(ix1,jx2);
2699             dy12             = _mm256_sub_pd(iy1,jy2);
2700             dz12             = _mm256_sub_pd(iz1,jz2);
2701             dx13             = _mm256_sub_pd(ix1,jx3);
2702             dy13             = _mm256_sub_pd(iy1,jy3);
2703             dz13             = _mm256_sub_pd(iz1,jz3);
2704             dx21             = _mm256_sub_pd(ix2,jx1);
2705             dy21             = _mm256_sub_pd(iy2,jy1);
2706             dz21             = _mm256_sub_pd(iz2,jz1);
2707             dx22             = _mm256_sub_pd(ix2,jx2);
2708             dy22             = _mm256_sub_pd(iy2,jy2);
2709             dz22             = _mm256_sub_pd(iz2,jz2);
2710             dx23             = _mm256_sub_pd(ix2,jx3);
2711             dy23             = _mm256_sub_pd(iy2,jy3);
2712             dz23             = _mm256_sub_pd(iz2,jz3);
2713             dx31             = _mm256_sub_pd(ix3,jx1);
2714             dy31             = _mm256_sub_pd(iy3,jy1);
2715             dz31             = _mm256_sub_pd(iz3,jz1);
2716             dx32             = _mm256_sub_pd(ix3,jx2);
2717             dy32             = _mm256_sub_pd(iy3,jy2);
2718             dz32             = _mm256_sub_pd(iz3,jz2);
2719             dx33             = _mm256_sub_pd(ix3,jx3);
2720             dy33             = _mm256_sub_pd(iy3,jy3);
2721             dz33             = _mm256_sub_pd(iz3,jz3);
2722
2723             /* Calculate squared distance and things based on it */
2724             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
2725             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
2726             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
2727             rsq13            = gmx_mm256_calc_rsq_pd(dx13,dy13,dz13);
2728             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
2729             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
2730             rsq23            = gmx_mm256_calc_rsq_pd(dx23,dy23,dz23);
2731             rsq31            = gmx_mm256_calc_rsq_pd(dx31,dy31,dz31);
2732             rsq32            = gmx_mm256_calc_rsq_pd(dx32,dy32,dz32);
2733             rsq33            = gmx_mm256_calc_rsq_pd(dx33,dy33,dz33);
2734
2735             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
2736             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
2737             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
2738             rinv13           = gmx_mm256_invsqrt_pd(rsq13);
2739             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
2740             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
2741             rinv23           = gmx_mm256_invsqrt_pd(rsq23);
2742             rinv31           = gmx_mm256_invsqrt_pd(rsq31);
2743             rinv32           = gmx_mm256_invsqrt_pd(rsq32);
2744             rinv33           = gmx_mm256_invsqrt_pd(rsq33);
2745
2746             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
2747             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
2748             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
2749             rinvsq13         = _mm256_mul_pd(rinv13,rinv13);
2750             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
2751             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
2752             rinvsq23         = _mm256_mul_pd(rinv23,rinv23);
2753             rinvsq31         = _mm256_mul_pd(rinv31,rinv31);
2754             rinvsq32         = _mm256_mul_pd(rinv32,rinv32);
2755             rinvsq33         = _mm256_mul_pd(rinv33,rinv33);
2756
2757             fjx0             = _mm256_setzero_pd();
2758             fjy0             = _mm256_setzero_pd();
2759             fjz0             = _mm256_setzero_pd();
2760             fjx1             = _mm256_setzero_pd();
2761             fjy1             = _mm256_setzero_pd();
2762             fjz1             = _mm256_setzero_pd();
2763             fjx2             = _mm256_setzero_pd();
2764             fjy2             = _mm256_setzero_pd();
2765             fjz2             = _mm256_setzero_pd();
2766             fjx3             = _mm256_setzero_pd();
2767             fjy3             = _mm256_setzero_pd();
2768             fjz3             = _mm256_setzero_pd();
2769
2770             /**************************
2771              * CALCULATE INTERACTIONS *
2772              **************************/
2773
2774             if (gmx_mm256_any_lt(rsq00,rcutoff2))
2775             {
2776
2777             r00              = _mm256_mul_pd(rsq00,rinv00);
2778             r00              = _mm256_andnot_pd(dummy_mask,r00);
2779
2780             /* LENNARD-JONES DISPERSION/REPULSION */
2781
2782             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
2783             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
2784             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
2785             vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
2786             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
2787
2788             d                = _mm256_sub_pd(r00,rswitch);
2789             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2790             d2               = _mm256_mul_pd(d,d);
2791             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2792
2793             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2794
2795             /* Evaluate switch function */
2796             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2797             fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
2798             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
2799
2800             fscal            = fvdw;
2801
2802             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2803
2804             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2805
2806             /* Calculate temporary vectorial force */
2807             tx               = _mm256_mul_pd(fscal,dx00);
2808             ty               = _mm256_mul_pd(fscal,dy00);
2809             tz               = _mm256_mul_pd(fscal,dz00);
2810
2811             /* Update vectorial force */
2812             fix0             = _mm256_add_pd(fix0,tx);
2813             fiy0             = _mm256_add_pd(fiy0,ty);
2814             fiz0             = _mm256_add_pd(fiz0,tz);
2815
2816             fjx0             = _mm256_add_pd(fjx0,tx);
2817             fjy0             = _mm256_add_pd(fjy0,ty);
2818             fjz0             = _mm256_add_pd(fjz0,tz);
2819
2820             }
2821
2822             /**************************
2823              * CALCULATE INTERACTIONS *
2824              **************************/
2825
2826             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2827             {
2828
2829             r11              = _mm256_mul_pd(rsq11,rinv11);
2830             r11              = _mm256_andnot_pd(dummy_mask,r11);
2831
2832             /* EWALD ELECTROSTATICS */
2833
2834             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2835             ewrt             = _mm256_mul_pd(r11,ewtabscale);
2836             ewitab           = _mm256_cvttpd_epi32(ewrt);
2837             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2838             ewitab           = _mm_slli_epi32(ewitab,2);
2839             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2840             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2841             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2842             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2843             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2844             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2845             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2846             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
2847             felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
2848
2849             d                = _mm256_sub_pd(r11,rswitch);
2850             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2851             d2               = _mm256_mul_pd(d,d);
2852             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2853
2854             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2855
2856             /* Evaluate switch function */
2857             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2858             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
2859             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
2860
2861             fscal            = felec;
2862
2863             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2864
2865             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2866
2867             /* Calculate temporary vectorial force */
2868             tx               = _mm256_mul_pd(fscal,dx11);
2869             ty               = _mm256_mul_pd(fscal,dy11);
2870             tz               = _mm256_mul_pd(fscal,dz11);
2871
2872             /* Update vectorial force */
2873             fix1             = _mm256_add_pd(fix1,tx);
2874             fiy1             = _mm256_add_pd(fiy1,ty);
2875             fiz1             = _mm256_add_pd(fiz1,tz);
2876
2877             fjx1             = _mm256_add_pd(fjx1,tx);
2878             fjy1             = _mm256_add_pd(fjy1,ty);
2879             fjz1             = _mm256_add_pd(fjz1,tz);
2880
2881             }
2882
2883             /**************************
2884              * CALCULATE INTERACTIONS *
2885              **************************/
2886
2887             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2888             {
2889
2890             r12              = _mm256_mul_pd(rsq12,rinv12);
2891             r12              = _mm256_andnot_pd(dummy_mask,r12);
2892
2893             /* EWALD ELECTROSTATICS */
2894
2895             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2896             ewrt             = _mm256_mul_pd(r12,ewtabscale);
2897             ewitab           = _mm256_cvttpd_epi32(ewrt);
2898             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2899             ewitab           = _mm_slli_epi32(ewitab,2);
2900             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2901             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2902             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2903             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2904             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2905             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2906             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2907             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
2908             felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
2909
2910             d                = _mm256_sub_pd(r12,rswitch);
2911             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2912             d2               = _mm256_mul_pd(d,d);
2913             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2914
2915             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2916
2917             /* Evaluate switch function */
2918             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2919             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
2920             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2921
2922             fscal            = felec;
2923
2924             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2925
2926             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2927
2928             /* Calculate temporary vectorial force */
2929             tx               = _mm256_mul_pd(fscal,dx12);
2930             ty               = _mm256_mul_pd(fscal,dy12);
2931             tz               = _mm256_mul_pd(fscal,dz12);
2932
2933             /* Update vectorial force */
2934             fix1             = _mm256_add_pd(fix1,tx);
2935             fiy1             = _mm256_add_pd(fiy1,ty);
2936             fiz1             = _mm256_add_pd(fiz1,tz);
2937
2938             fjx2             = _mm256_add_pd(fjx2,tx);
2939             fjy2             = _mm256_add_pd(fjy2,ty);
2940             fjz2             = _mm256_add_pd(fjz2,tz);
2941
2942             }
2943
2944             /**************************
2945              * CALCULATE INTERACTIONS *
2946              **************************/
2947
2948             if (gmx_mm256_any_lt(rsq13,rcutoff2))
2949             {
2950
2951             r13              = _mm256_mul_pd(rsq13,rinv13);
2952             r13              = _mm256_andnot_pd(dummy_mask,r13);
2953
2954             /* EWALD ELECTROSTATICS */
2955
2956             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2957             ewrt             = _mm256_mul_pd(r13,ewtabscale);
2958             ewitab           = _mm256_cvttpd_epi32(ewrt);
2959             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2960             ewitab           = _mm_slli_epi32(ewitab,2);
2961             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2962             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2963             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2964             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2965             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2966             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2967             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2968             velec            = _mm256_mul_pd(qq13,_mm256_sub_pd(rinv13,velec));
2969             felec            = _mm256_mul_pd(_mm256_mul_pd(qq13,rinv13),_mm256_sub_pd(rinvsq13,felec));
2970
2971             d                = _mm256_sub_pd(r13,rswitch);
2972             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2973             d2               = _mm256_mul_pd(d,d);
2974             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2975
2976             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2977
2978             /* Evaluate switch function */
2979             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2980             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv13,_mm256_mul_pd(velec,dsw)) );
2981             cutoff_mask      = _mm256_cmp_pd(rsq13,rcutoff2,_CMP_LT_OQ);
2982
2983             fscal            = felec;
2984
2985             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2986
2987             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2988
2989             /* Calculate temporary vectorial force */
2990             tx               = _mm256_mul_pd(fscal,dx13);
2991             ty               = _mm256_mul_pd(fscal,dy13);
2992             tz               = _mm256_mul_pd(fscal,dz13);
2993
2994             /* Update vectorial force */
2995             fix1             = _mm256_add_pd(fix1,tx);
2996             fiy1             = _mm256_add_pd(fiy1,ty);
2997             fiz1             = _mm256_add_pd(fiz1,tz);
2998
2999             fjx3             = _mm256_add_pd(fjx3,tx);
3000             fjy3             = _mm256_add_pd(fjy3,ty);
3001             fjz3             = _mm256_add_pd(fjz3,tz);
3002
3003             }
3004
3005             /**************************
3006              * CALCULATE INTERACTIONS *
3007              **************************/
3008
3009             if (gmx_mm256_any_lt(rsq21,rcutoff2))
3010             {
3011
3012             r21              = _mm256_mul_pd(rsq21,rinv21);
3013             r21              = _mm256_andnot_pd(dummy_mask,r21);
3014
3015             /* EWALD ELECTROSTATICS */
3016
3017             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
3018             ewrt             = _mm256_mul_pd(r21,ewtabscale);
3019             ewitab           = _mm256_cvttpd_epi32(ewrt);
3020             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
3021             ewitab           = _mm_slli_epi32(ewitab,2);
3022             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
3023             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
3024             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
3025             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
3026             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
3027             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
3028             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
3029             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
3030             felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
3031
3032             d                = _mm256_sub_pd(r21,rswitch);
3033             d                = _mm256_max_pd(d,_mm256_setzero_pd());
3034             d2               = _mm256_mul_pd(d,d);
3035             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
3036
3037             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
3038
3039             /* Evaluate switch function */
3040             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3041             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
3042             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
3043
3044             fscal            = felec;
3045
3046             fscal            = _mm256_and_pd(fscal,cutoff_mask);
3047
3048             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
3049
3050             /* Calculate temporary vectorial force */
3051             tx               = _mm256_mul_pd(fscal,dx21);
3052             ty               = _mm256_mul_pd(fscal,dy21);
3053             tz               = _mm256_mul_pd(fscal,dz21);
3054
3055             /* Update vectorial force */
3056             fix2             = _mm256_add_pd(fix2,tx);
3057             fiy2             = _mm256_add_pd(fiy2,ty);
3058             fiz2             = _mm256_add_pd(fiz2,tz);
3059
3060             fjx1             = _mm256_add_pd(fjx1,tx);
3061             fjy1             = _mm256_add_pd(fjy1,ty);
3062             fjz1             = _mm256_add_pd(fjz1,tz);
3063
3064             }
3065
3066             /**************************
3067              * CALCULATE INTERACTIONS *
3068              **************************/
3069
3070             if (gmx_mm256_any_lt(rsq22,rcutoff2))
3071             {
3072
3073             r22              = _mm256_mul_pd(rsq22,rinv22);
3074             r22              = _mm256_andnot_pd(dummy_mask,r22);
3075
3076             /* EWALD ELECTROSTATICS */
3077
3078             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
3079             ewrt             = _mm256_mul_pd(r22,ewtabscale);
3080             ewitab           = _mm256_cvttpd_epi32(ewrt);
3081             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
3082             ewitab           = _mm_slli_epi32(ewitab,2);
3083             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
3084             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
3085             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
3086             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
3087             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
3088             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
3089             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
3090             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
3091             felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
3092
3093             d                = _mm256_sub_pd(r22,rswitch);
3094             d                = _mm256_max_pd(d,_mm256_setzero_pd());
3095             d2               = _mm256_mul_pd(d,d);
3096             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
3097
3098             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
3099
3100             /* Evaluate switch function */
3101             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3102             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
3103             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
3104
3105             fscal            = felec;
3106
3107             fscal            = _mm256_and_pd(fscal,cutoff_mask);
3108
3109             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
3110
3111             /* Calculate temporary vectorial force */
3112             tx               = _mm256_mul_pd(fscal,dx22);
3113             ty               = _mm256_mul_pd(fscal,dy22);
3114             tz               = _mm256_mul_pd(fscal,dz22);
3115
3116             /* Update vectorial force */
3117             fix2             = _mm256_add_pd(fix2,tx);
3118             fiy2             = _mm256_add_pd(fiy2,ty);
3119             fiz2             = _mm256_add_pd(fiz2,tz);
3120
3121             fjx2             = _mm256_add_pd(fjx2,tx);
3122             fjy2             = _mm256_add_pd(fjy2,ty);
3123             fjz2             = _mm256_add_pd(fjz2,tz);
3124
3125             }
3126
3127             /**************************
3128              * CALCULATE INTERACTIONS *
3129              **************************/
3130
3131             if (gmx_mm256_any_lt(rsq23,rcutoff2))
3132             {
3133
3134             r23              = _mm256_mul_pd(rsq23,rinv23);
3135             r23              = _mm256_andnot_pd(dummy_mask,r23);
3136
3137             /* EWALD ELECTROSTATICS */
3138
3139             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
3140             ewrt             = _mm256_mul_pd(r23,ewtabscale);
3141             ewitab           = _mm256_cvttpd_epi32(ewrt);
3142             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
3143             ewitab           = _mm_slli_epi32(ewitab,2);
3144             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
3145             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
3146             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
3147             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
3148             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
3149             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
3150             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
3151             velec            = _mm256_mul_pd(qq23,_mm256_sub_pd(rinv23,velec));
3152             felec            = _mm256_mul_pd(_mm256_mul_pd(qq23,rinv23),_mm256_sub_pd(rinvsq23,felec));
3153
3154             d                = _mm256_sub_pd(r23,rswitch);
3155             d                = _mm256_max_pd(d,_mm256_setzero_pd());
3156             d2               = _mm256_mul_pd(d,d);
3157             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
3158
3159             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
3160
3161             /* Evaluate switch function */
3162             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3163             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv23,_mm256_mul_pd(velec,dsw)) );
3164             cutoff_mask      = _mm256_cmp_pd(rsq23,rcutoff2,_CMP_LT_OQ);
3165
3166             fscal            = felec;
3167
3168             fscal            = _mm256_and_pd(fscal,cutoff_mask);
3169
3170             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
3171
3172             /* Calculate temporary vectorial force */
3173             tx               = _mm256_mul_pd(fscal,dx23);
3174             ty               = _mm256_mul_pd(fscal,dy23);
3175             tz               = _mm256_mul_pd(fscal,dz23);
3176
3177             /* Update vectorial force */
3178             fix2             = _mm256_add_pd(fix2,tx);
3179             fiy2             = _mm256_add_pd(fiy2,ty);
3180             fiz2             = _mm256_add_pd(fiz2,tz);
3181
3182             fjx3             = _mm256_add_pd(fjx3,tx);
3183             fjy3             = _mm256_add_pd(fjy3,ty);
3184             fjz3             = _mm256_add_pd(fjz3,tz);
3185
3186             }
3187
3188             /**************************
3189              * CALCULATE INTERACTIONS *
3190              **************************/
3191
3192             if (gmx_mm256_any_lt(rsq31,rcutoff2))
3193             {
3194
3195             r31              = _mm256_mul_pd(rsq31,rinv31);
3196             r31              = _mm256_andnot_pd(dummy_mask,r31);
3197
3198             /* EWALD ELECTROSTATICS */
3199
3200             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
3201             ewrt             = _mm256_mul_pd(r31,ewtabscale);
3202             ewitab           = _mm256_cvttpd_epi32(ewrt);
3203             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
3204             ewitab           = _mm_slli_epi32(ewitab,2);
3205             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
3206             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
3207             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
3208             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
3209             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
3210             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
3211             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
3212             velec            = _mm256_mul_pd(qq31,_mm256_sub_pd(rinv31,velec));
3213             felec            = _mm256_mul_pd(_mm256_mul_pd(qq31,rinv31),_mm256_sub_pd(rinvsq31,felec));
3214
3215             d                = _mm256_sub_pd(r31,rswitch);
3216             d                = _mm256_max_pd(d,_mm256_setzero_pd());
3217             d2               = _mm256_mul_pd(d,d);
3218             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
3219
3220             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
3221
3222             /* Evaluate switch function */
3223             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3224             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv31,_mm256_mul_pd(velec,dsw)) );
3225             cutoff_mask      = _mm256_cmp_pd(rsq31,rcutoff2,_CMP_LT_OQ);
3226
3227             fscal            = felec;
3228
3229             fscal            = _mm256_and_pd(fscal,cutoff_mask);
3230
3231             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
3232
3233             /* Calculate temporary vectorial force */
3234             tx               = _mm256_mul_pd(fscal,dx31);
3235             ty               = _mm256_mul_pd(fscal,dy31);
3236             tz               = _mm256_mul_pd(fscal,dz31);
3237
3238             /* Update vectorial force */
3239             fix3             = _mm256_add_pd(fix3,tx);
3240             fiy3             = _mm256_add_pd(fiy3,ty);
3241             fiz3             = _mm256_add_pd(fiz3,tz);
3242
3243             fjx1             = _mm256_add_pd(fjx1,tx);
3244             fjy1             = _mm256_add_pd(fjy1,ty);
3245             fjz1             = _mm256_add_pd(fjz1,tz);
3246
3247             }
3248
3249             /**************************
3250              * CALCULATE INTERACTIONS *
3251              **************************/
3252
3253             if (gmx_mm256_any_lt(rsq32,rcutoff2))
3254             {
3255
3256             r32              = _mm256_mul_pd(rsq32,rinv32);
3257             r32              = _mm256_andnot_pd(dummy_mask,r32);
3258
3259             /* EWALD ELECTROSTATICS */
3260
3261             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
3262             ewrt             = _mm256_mul_pd(r32,ewtabscale);
3263             ewitab           = _mm256_cvttpd_epi32(ewrt);
3264             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
3265             ewitab           = _mm_slli_epi32(ewitab,2);
3266             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
3267             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
3268             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
3269             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
3270             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
3271             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
3272             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
3273             velec            = _mm256_mul_pd(qq32,_mm256_sub_pd(rinv32,velec));
3274             felec            = _mm256_mul_pd(_mm256_mul_pd(qq32,rinv32),_mm256_sub_pd(rinvsq32,felec));
3275
3276             d                = _mm256_sub_pd(r32,rswitch);
3277             d                = _mm256_max_pd(d,_mm256_setzero_pd());
3278             d2               = _mm256_mul_pd(d,d);
3279             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
3280
3281             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
3282
3283             /* Evaluate switch function */
3284             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3285             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv32,_mm256_mul_pd(velec,dsw)) );
3286             cutoff_mask      = _mm256_cmp_pd(rsq32,rcutoff2,_CMP_LT_OQ);
3287
3288             fscal            = felec;
3289
3290             fscal            = _mm256_and_pd(fscal,cutoff_mask);
3291
3292             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
3293
3294             /* Calculate temporary vectorial force */
3295             tx               = _mm256_mul_pd(fscal,dx32);
3296             ty               = _mm256_mul_pd(fscal,dy32);
3297             tz               = _mm256_mul_pd(fscal,dz32);
3298
3299             /* Update vectorial force */
3300             fix3             = _mm256_add_pd(fix3,tx);
3301             fiy3             = _mm256_add_pd(fiy3,ty);
3302             fiz3             = _mm256_add_pd(fiz3,tz);
3303
3304             fjx2             = _mm256_add_pd(fjx2,tx);
3305             fjy2             = _mm256_add_pd(fjy2,ty);
3306             fjz2             = _mm256_add_pd(fjz2,tz);
3307
3308             }
3309
3310             /**************************
3311              * CALCULATE INTERACTIONS *
3312              **************************/
3313
3314             if (gmx_mm256_any_lt(rsq33,rcutoff2))
3315             {
3316
3317             r33              = _mm256_mul_pd(rsq33,rinv33);
3318             r33              = _mm256_andnot_pd(dummy_mask,r33);
3319
3320             /* EWALD ELECTROSTATICS */
3321
3322             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
3323             ewrt             = _mm256_mul_pd(r33,ewtabscale);
3324             ewitab           = _mm256_cvttpd_epi32(ewrt);
3325             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
3326             ewitab           = _mm_slli_epi32(ewitab,2);
3327             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
3328             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
3329             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
3330             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
3331             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
3332             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
3333             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
3334             velec            = _mm256_mul_pd(qq33,_mm256_sub_pd(rinv33,velec));
3335             felec            = _mm256_mul_pd(_mm256_mul_pd(qq33,rinv33),_mm256_sub_pd(rinvsq33,felec));
3336
3337             d                = _mm256_sub_pd(r33,rswitch);
3338             d                = _mm256_max_pd(d,_mm256_setzero_pd());
3339             d2               = _mm256_mul_pd(d,d);
3340             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
3341
3342             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
3343
3344             /* Evaluate switch function */
3345             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3346             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv33,_mm256_mul_pd(velec,dsw)) );
3347             cutoff_mask      = _mm256_cmp_pd(rsq33,rcutoff2,_CMP_LT_OQ);
3348
3349             fscal            = felec;
3350
3351             fscal            = _mm256_and_pd(fscal,cutoff_mask);
3352
3353             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
3354
3355             /* Calculate temporary vectorial force */
3356             tx               = _mm256_mul_pd(fscal,dx33);
3357             ty               = _mm256_mul_pd(fscal,dy33);
3358             tz               = _mm256_mul_pd(fscal,dz33);
3359
3360             /* Update vectorial force */
3361             fix3             = _mm256_add_pd(fix3,tx);
3362             fiy3             = _mm256_add_pd(fiy3,ty);
3363             fiz3             = _mm256_add_pd(fiz3,tz);
3364
3365             fjx3             = _mm256_add_pd(fjx3,tx);
3366             fjy3             = _mm256_add_pd(fjy3,ty);
3367             fjz3             = _mm256_add_pd(fjz3,tz);
3368
3369             }
3370
3371             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
3372             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
3373             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
3374             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
3375
3376             gmx_mm256_decrement_4rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
3377                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,
3378                                                       fjx2,fjy2,fjz2,fjx3,fjy3,fjz3);
3379
3380             /* Inner loop uses 627 flops */
3381         }
3382
3383         /* End of innermost loop */
3384
3385         gmx_mm256_update_iforce_4atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
3386                                                  f+i_coord_offset,fshift+i_shift_offset);
3387
3388         /* Increment number of inner iterations */
3389         inneriter                  += j_index_end - j_index_start;
3390
3391         /* Outer loop uses 24 flops */
3392     }
3393
3394     /* Increment number of outer iterations */
3395     outeriter        += nri;
3396
3397     /* Update outer/inner flops */
3398
3399     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4W4_F,outeriter*24 + inneriter*627);
3400 }