Valgrind suppression for OS X 10.9
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_avx_256_double.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_double kernel generator.
37  */
38 #ifdef HAVE_CONFIG_H
39 #include <config.h>
40 #endif
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "types/simple.h"
46 #include "vec.h"
47 #include "nrnb.h"
48
49 #include "gromacs/simd/math_x86_avx_256_double.h"
50 #include "kernelutil_x86_avx_256_double.h"
51
52 /*
53  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_VF_avx_256_double
54  * Electrostatics interaction: Ewald
55  * VdW interaction:            LennardJones
56  * Geometry:                   Water3-Water3
57  * Calculate force/pot:        PotentialAndForce
58  */
59 void
60 nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_VF_avx_256_double
61                     (t_nblist                    * gmx_restrict       nlist,
62                      rvec                        * gmx_restrict          xx,
63                      rvec                        * gmx_restrict          ff,
64                      t_forcerec                  * gmx_restrict          fr,
65                      t_mdatoms                   * gmx_restrict     mdatoms,
66                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67                      t_nrnb                      * gmx_restrict        nrnb)
68 {
69     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
70      * just 0 for non-waters.
71      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
72      * jnr indices corresponding to data put in the four positions in the SIMD register.
73      */
74     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
75     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76     int              jnrA,jnrB,jnrC,jnrD;
77     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
78     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
79     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
80     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
81     real             rcutoff_scalar;
82     real             *shiftvec,*fshift,*x,*f;
83     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
84     real             scratch[4*DIM];
85     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86     real *           vdwioffsetptr0;
87     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
88     real *           vdwioffsetptr1;
89     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
90     real *           vdwioffsetptr2;
91     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
92     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
93     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
94     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
95     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
96     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
97     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
98     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
99     __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
100     __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
101     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
102     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
103     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
104     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
105     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
106     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
107     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
108     real             *charge;
109     int              nvdwtype;
110     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
111     int              *vdwtype;
112     real             *vdwparam;
113     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
114     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
115     __m128i          ewitab;
116     __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
117     __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
118     real             *ewtab;
119     __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
120     real             rswitch_scalar,d_scalar;
121     __m256d          dummy_mask,cutoff_mask;
122     __m128           tmpmask0,tmpmask1;
123     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
124     __m256d          one     = _mm256_set1_pd(1.0);
125     __m256d          two     = _mm256_set1_pd(2.0);
126     x                = xx[0];
127     f                = ff[0];
128
129     nri              = nlist->nri;
130     iinr             = nlist->iinr;
131     jindex           = nlist->jindex;
132     jjnr             = nlist->jjnr;
133     shiftidx         = nlist->shift;
134     gid              = nlist->gid;
135     shiftvec         = fr->shift_vec[0];
136     fshift           = fr->fshift[0];
137     facel            = _mm256_set1_pd(fr->epsfac);
138     charge           = mdatoms->chargeA;
139     nvdwtype         = fr->ntype;
140     vdwparam         = fr->nbfp;
141     vdwtype          = mdatoms->typeA;
142
143     sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
144     beta             = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
145     beta2            = _mm256_mul_pd(beta,beta);
146     beta3            = _mm256_mul_pd(beta,beta2);
147
148     ewtab            = fr->ic->tabq_coul_FDV0;
149     ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
150     ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
151
152     /* Setup water-specific parameters */
153     inr              = nlist->iinr[0];
154     iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
155     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
156     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
157     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
158
159     jq0              = _mm256_set1_pd(charge[inr+0]);
160     jq1              = _mm256_set1_pd(charge[inr+1]);
161     jq2              = _mm256_set1_pd(charge[inr+2]);
162     vdwjidx0A        = 2*vdwtype[inr+0];
163     qq00             = _mm256_mul_pd(iq0,jq0);
164     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
165     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
166     qq01             = _mm256_mul_pd(iq0,jq1);
167     qq02             = _mm256_mul_pd(iq0,jq2);
168     qq10             = _mm256_mul_pd(iq1,jq0);
169     qq11             = _mm256_mul_pd(iq1,jq1);
170     qq12             = _mm256_mul_pd(iq1,jq2);
171     qq20             = _mm256_mul_pd(iq2,jq0);
172     qq21             = _mm256_mul_pd(iq2,jq1);
173     qq22             = _mm256_mul_pd(iq2,jq2);
174
175     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
176     rcutoff_scalar   = fr->rcoulomb;
177     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
178     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
179
180     rswitch_scalar   = fr->rcoulomb_switch;
181     rswitch          = _mm256_set1_pd(rswitch_scalar);
182     /* Setup switch parameters */
183     d_scalar         = rcutoff_scalar-rswitch_scalar;
184     d                = _mm256_set1_pd(d_scalar);
185     swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
186     swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
187     swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
188     swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
189     swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
190     swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
191
192     /* Avoid stupid compiler warnings */
193     jnrA = jnrB = jnrC = jnrD = 0;
194     j_coord_offsetA = 0;
195     j_coord_offsetB = 0;
196     j_coord_offsetC = 0;
197     j_coord_offsetD = 0;
198
199     outeriter        = 0;
200     inneriter        = 0;
201
202     for(iidx=0;iidx<4*DIM;iidx++)
203     {
204         scratch[iidx] = 0.0;
205     }
206
207     /* Start outer loop over neighborlists */
208     for(iidx=0; iidx<nri; iidx++)
209     {
210         /* Load shift vector for this list */
211         i_shift_offset   = DIM*shiftidx[iidx];
212
213         /* Load limits for loop over neighbors */
214         j_index_start    = jindex[iidx];
215         j_index_end      = jindex[iidx+1];
216
217         /* Get outer coordinate index */
218         inr              = iinr[iidx];
219         i_coord_offset   = DIM*inr;
220
221         /* Load i particle coords and add shift vector */
222         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
223                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
224
225         fix0             = _mm256_setzero_pd();
226         fiy0             = _mm256_setzero_pd();
227         fiz0             = _mm256_setzero_pd();
228         fix1             = _mm256_setzero_pd();
229         fiy1             = _mm256_setzero_pd();
230         fiz1             = _mm256_setzero_pd();
231         fix2             = _mm256_setzero_pd();
232         fiy2             = _mm256_setzero_pd();
233         fiz2             = _mm256_setzero_pd();
234
235         /* Reset potential sums */
236         velecsum         = _mm256_setzero_pd();
237         vvdwsum          = _mm256_setzero_pd();
238
239         /* Start inner kernel loop */
240         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
241         {
242
243             /* Get j neighbor index, and coordinate index */
244             jnrA             = jjnr[jidx];
245             jnrB             = jjnr[jidx+1];
246             jnrC             = jjnr[jidx+2];
247             jnrD             = jjnr[jidx+3];
248             j_coord_offsetA  = DIM*jnrA;
249             j_coord_offsetB  = DIM*jnrB;
250             j_coord_offsetC  = DIM*jnrC;
251             j_coord_offsetD  = DIM*jnrD;
252
253             /* load j atom coordinates */
254             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
255                                                  x+j_coord_offsetC,x+j_coord_offsetD,
256                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
257
258             /* Calculate displacement vector */
259             dx00             = _mm256_sub_pd(ix0,jx0);
260             dy00             = _mm256_sub_pd(iy0,jy0);
261             dz00             = _mm256_sub_pd(iz0,jz0);
262             dx01             = _mm256_sub_pd(ix0,jx1);
263             dy01             = _mm256_sub_pd(iy0,jy1);
264             dz01             = _mm256_sub_pd(iz0,jz1);
265             dx02             = _mm256_sub_pd(ix0,jx2);
266             dy02             = _mm256_sub_pd(iy0,jy2);
267             dz02             = _mm256_sub_pd(iz0,jz2);
268             dx10             = _mm256_sub_pd(ix1,jx0);
269             dy10             = _mm256_sub_pd(iy1,jy0);
270             dz10             = _mm256_sub_pd(iz1,jz0);
271             dx11             = _mm256_sub_pd(ix1,jx1);
272             dy11             = _mm256_sub_pd(iy1,jy1);
273             dz11             = _mm256_sub_pd(iz1,jz1);
274             dx12             = _mm256_sub_pd(ix1,jx2);
275             dy12             = _mm256_sub_pd(iy1,jy2);
276             dz12             = _mm256_sub_pd(iz1,jz2);
277             dx20             = _mm256_sub_pd(ix2,jx0);
278             dy20             = _mm256_sub_pd(iy2,jy0);
279             dz20             = _mm256_sub_pd(iz2,jz0);
280             dx21             = _mm256_sub_pd(ix2,jx1);
281             dy21             = _mm256_sub_pd(iy2,jy1);
282             dz21             = _mm256_sub_pd(iz2,jz1);
283             dx22             = _mm256_sub_pd(ix2,jx2);
284             dy22             = _mm256_sub_pd(iy2,jy2);
285             dz22             = _mm256_sub_pd(iz2,jz2);
286
287             /* Calculate squared distance and things based on it */
288             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
289             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
290             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
291             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
292             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
293             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
294             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
295             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
296             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
297
298             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
299             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
300             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
301             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
302             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
303             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
304             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
305             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
306             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
307
308             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
309             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
310             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
311             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
312             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
313             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
314             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
315             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
316             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
317
318             fjx0             = _mm256_setzero_pd();
319             fjy0             = _mm256_setzero_pd();
320             fjz0             = _mm256_setzero_pd();
321             fjx1             = _mm256_setzero_pd();
322             fjy1             = _mm256_setzero_pd();
323             fjz1             = _mm256_setzero_pd();
324             fjx2             = _mm256_setzero_pd();
325             fjy2             = _mm256_setzero_pd();
326             fjz2             = _mm256_setzero_pd();
327
328             /**************************
329              * CALCULATE INTERACTIONS *
330              **************************/
331
332             if (gmx_mm256_any_lt(rsq00,rcutoff2))
333             {
334
335             r00              = _mm256_mul_pd(rsq00,rinv00);
336
337             /* EWALD ELECTROSTATICS */
338
339             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
340             ewrt             = _mm256_mul_pd(r00,ewtabscale);
341             ewitab           = _mm256_cvttpd_epi32(ewrt);
342             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
343             ewitab           = _mm_slli_epi32(ewitab,2);
344             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
345             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
346             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
347             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
348             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
349             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
350             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
351             velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
352             felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
353
354             /* LENNARD-JONES DISPERSION/REPULSION */
355
356             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
357             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
358             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
359             vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
360             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
361
362             d                = _mm256_sub_pd(r00,rswitch);
363             d                = _mm256_max_pd(d,_mm256_setzero_pd());
364             d2               = _mm256_mul_pd(d,d);
365             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
366
367             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
368
369             /* Evaluate switch function */
370             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
371             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
372             fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
373             velec            = _mm256_mul_pd(velec,sw);
374             vvdw             = _mm256_mul_pd(vvdw,sw);
375             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
376
377             /* Update potential sum for this i atom from the interaction with this j atom. */
378             velec            = _mm256_and_pd(velec,cutoff_mask);
379             velecsum         = _mm256_add_pd(velecsum,velec);
380             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
381             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
382
383             fscal            = _mm256_add_pd(felec,fvdw);
384
385             fscal            = _mm256_and_pd(fscal,cutoff_mask);
386
387             /* Calculate temporary vectorial force */
388             tx               = _mm256_mul_pd(fscal,dx00);
389             ty               = _mm256_mul_pd(fscal,dy00);
390             tz               = _mm256_mul_pd(fscal,dz00);
391
392             /* Update vectorial force */
393             fix0             = _mm256_add_pd(fix0,tx);
394             fiy0             = _mm256_add_pd(fiy0,ty);
395             fiz0             = _mm256_add_pd(fiz0,tz);
396
397             fjx0             = _mm256_add_pd(fjx0,tx);
398             fjy0             = _mm256_add_pd(fjy0,ty);
399             fjz0             = _mm256_add_pd(fjz0,tz);
400
401             }
402
403             /**************************
404              * CALCULATE INTERACTIONS *
405              **************************/
406
407             if (gmx_mm256_any_lt(rsq01,rcutoff2))
408             {
409
410             r01              = _mm256_mul_pd(rsq01,rinv01);
411
412             /* EWALD ELECTROSTATICS */
413
414             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
415             ewrt             = _mm256_mul_pd(r01,ewtabscale);
416             ewitab           = _mm256_cvttpd_epi32(ewrt);
417             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
418             ewitab           = _mm_slli_epi32(ewitab,2);
419             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
420             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
421             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
422             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
423             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
424             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
425             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
426             velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
427             felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
428
429             d                = _mm256_sub_pd(r01,rswitch);
430             d                = _mm256_max_pd(d,_mm256_setzero_pd());
431             d2               = _mm256_mul_pd(d,d);
432             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
433
434             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
435
436             /* Evaluate switch function */
437             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
438             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
439             velec            = _mm256_mul_pd(velec,sw);
440             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
441
442             /* Update potential sum for this i atom from the interaction with this j atom. */
443             velec            = _mm256_and_pd(velec,cutoff_mask);
444             velecsum         = _mm256_add_pd(velecsum,velec);
445
446             fscal            = felec;
447
448             fscal            = _mm256_and_pd(fscal,cutoff_mask);
449
450             /* Calculate temporary vectorial force */
451             tx               = _mm256_mul_pd(fscal,dx01);
452             ty               = _mm256_mul_pd(fscal,dy01);
453             tz               = _mm256_mul_pd(fscal,dz01);
454
455             /* Update vectorial force */
456             fix0             = _mm256_add_pd(fix0,tx);
457             fiy0             = _mm256_add_pd(fiy0,ty);
458             fiz0             = _mm256_add_pd(fiz0,tz);
459
460             fjx1             = _mm256_add_pd(fjx1,tx);
461             fjy1             = _mm256_add_pd(fjy1,ty);
462             fjz1             = _mm256_add_pd(fjz1,tz);
463
464             }
465
466             /**************************
467              * CALCULATE INTERACTIONS *
468              **************************/
469
470             if (gmx_mm256_any_lt(rsq02,rcutoff2))
471             {
472
473             r02              = _mm256_mul_pd(rsq02,rinv02);
474
475             /* EWALD ELECTROSTATICS */
476
477             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
478             ewrt             = _mm256_mul_pd(r02,ewtabscale);
479             ewitab           = _mm256_cvttpd_epi32(ewrt);
480             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
481             ewitab           = _mm_slli_epi32(ewitab,2);
482             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
483             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
484             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
485             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
486             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
487             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
488             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
489             velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
490             felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
491
492             d                = _mm256_sub_pd(r02,rswitch);
493             d                = _mm256_max_pd(d,_mm256_setzero_pd());
494             d2               = _mm256_mul_pd(d,d);
495             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
496
497             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
498
499             /* Evaluate switch function */
500             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
501             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
502             velec            = _mm256_mul_pd(velec,sw);
503             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
504
505             /* Update potential sum for this i atom from the interaction with this j atom. */
506             velec            = _mm256_and_pd(velec,cutoff_mask);
507             velecsum         = _mm256_add_pd(velecsum,velec);
508
509             fscal            = felec;
510
511             fscal            = _mm256_and_pd(fscal,cutoff_mask);
512
513             /* Calculate temporary vectorial force */
514             tx               = _mm256_mul_pd(fscal,dx02);
515             ty               = _mm256_mul_pd(fscal,dy02);
516             tz               = _mm256_mul_pd(fscal,dz02);
517
518             /* Update vectorial force */
519             fix0             = _mm256_add_pd(fix0,tx);
520             fiy0             = _mm256_add_pd(fiy0,ty);
521             fiz0             = _mm256_add_pd(fiz0,tz);
522
523             fjx2             = _mm256_add_pd(fjx2,tx);
524             fjy2             = _mm256_add_pd(fjy2,ty);
525             fjz2             = _mm256_add_pd(fjz2,tz);
526
527             }
528
529             /**************************
530              * CALCULATE INTERACTIONS *
531              **************************/
532
533             if (gmx_mm256_any_lt(rsq10,rcutoff2))
534             {
535
536             r10              = _mm256_mul_pd(rsq10,rinv10);
537
538             /* EWALD ELECTROSTATICS */
539
540             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
541             ewrt             = _mm256_mul_pd(r10,ewtabscale);
542             ewitab           = _mm256_cvttpd_epi32(ewrt);
543             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
544             ewitab           = _mm_slli_epi32(ewitab,2);
545             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
546             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
547             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
548             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
549             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
550             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
551             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
552             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
553             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
554
555             d                = _mm256_sub_pd(r10,rswitch);
556             d                = _mm256_max_pd(d,_mm256_setzero_pd());
557             d2               = _mm256_mul_pd(d,d);
558             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
559
560             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
561
562             /* Evaluate switch function */
563             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
564             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
565             velec            = _mm256_mul_pd(velec,sw);
566             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
567
568             /* Update potential sum for this i atom from the interaction with this j atom. */
569             velec            = _mm256_and_pd(velec,cutoff_mask);
570             velecsum         = _mm256_add_pd(velecsum,velec);
571
572             fscal            = felec;
573
574             fscal            = _mm256_and_pd(fscal,cutoff_mask);
575
576             /* Calculate temporary vectorial force */
577             tx               = _mm256_mul_pd(fscal,dx10);
578             ty               = _mm256_mul_pd(fscal,dy10);
579             tz               = _mm256_mul_pd(fscal,dz10);
580
581             /* Update vectorial force */
582             fix1             = _mm256_add_pd(fix1,tx);
583             fiy1             = _mm256_add_pd(fiy1,ty);
584             fiz1             = _mm256_add_pd(fiz1,tz);
585
586             fjx0             = _mm256_add_pd(fjx0,tx);
587             fjy0             = _mm256_add_pd(fjy0,ty);
588             fjz0             = _mm256_add_pd(fjz0,tz);
589
590             }
591
592             /**************************
593              * CALCULATE INTERACTIONS *
594              **************************/
595
596             if (gmx_mm256_any_lt(rsq11,rcutoff2))
597             {
598
599             r11              = _mm256_mul_pd(rsq11,rinv11);
600
601             /* EWALD ELECTROSTATICS */
602
603             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
604             ewrt             = _mm256_mul_pd(r11,ewtabscale);
605             ewitab           = _mm256_cvttpd_epi32(ewrt);
606             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
607             ewitab           = _mm_slli_epi32(ewitab,2);
608             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
609             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
610             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
611             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
612             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
613             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
614             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
615             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
616             felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
617
618             d                = _mm256_sub_pd(r11,rswitch);
619             d                = _mm256_max_pd(d,_mm256_setzero_pd());
620             d2               = _mm256_mul_pd(d,d);
621             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
622
623             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
624
625             /* Evaluate switch function */
626             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
627             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
628             velec            = _mm256_mul_pd(velec,sw);
629             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
630
631             /* Update potential sum for this i atom from the interaction with this j atom. */
632             velec            = _mm256_and_pd(velec,cutoff_mask);
633             velecsum         = _mm256_add_pd(velecsum,velec);
634
635             fscal            = felec;
636
637             fscal            = _mm256_and_pd(fscal,cutoff_mask);
638
639             /* Calculate temporary vectorial force */
640             tx               = _mm256_mul_pd(fscal,dx11);
641             ty               = _mm256_mul_pd(fscal,dy11);
642             tz               = _mm256_mul_pd(fscal,dz11);
643
644             /* Update vectorial force */
645             fix1             = _mm256_add_pd(fix1,tx);
646             fiy1             = _mm256_add_pd(fiy1,ty);
647             fiz1             = _mm256_add_pd(fiz1,tz);
648
649             fjx1             = _mm256_add_pd(fjx1,tx);
650             fjy1             = _mm256_add_pd(fjy1,ty);
651             fjz1             = _mm256_add_pd(fjz1,tz);
652
653             }
654
655             /**************************
656              * CALCULATE INTERACTIONS *
657              **************************/
658
659             if (gmx_mm256_any_lt(rsq12,rcutoff2))
660             {
661
662             r12              = _mm256_mul_pd(rsq12,rinv12);
663
664             /* EWALD ELECTROSTATICS */
665
666             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
667             ewrt             = _mm256_mul_pd(r12,ewtabscale);
668             ewitab           = _mm256_cvttpd_epi32(ewrt);
669             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
670             ewitab           = _mm_slli_epi32(ewitab,2);
671             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
672             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
673             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
674             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
675             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
676             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
677             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
678             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
679             felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
680
681             d                = _mm256_sub_pd(r12,rswitch);
682             d                = _mm256_max_pd(d,_mm256_setzero_pd());
683             d2               = _mm256_mul_pd(d,d);
684             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
685
686             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
687
688             /* Evaluate switch function */
689             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
690             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
691             velec            = _mm256_mul_pd(velec,sw);
692             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
693
694             /* Update potential sum for this i atom from the interaction with this j atom. */
695             velec            = _mm256_and_pd(velec,cutoff_mask);
696             velecsum         = _mm256_add_pd(velecsum,velec);
697
698             fscal            = felec;
699
700             fscal            = _mm256_and_pd(fscal,cutoff_mask);
701
702             /* Calculate temporary vectorial force */
703             tx               = _mm256_mul_pd(fscal,dx12);
704             ty               = _mm256_mul_pd(fscal,dy12);
705             tz               = _mm256_mul_pd(fscal,dz12);
706
707             /* Update vectorial force */
708             fix1             = _mm256_add_pd(fix1,tx);
709             fiy1             = _mm256_add_pd(fiy1,ty);
710             fiz1             = _mm256_add_pd(fiz1,tz);
711
712             fjx2             = _mm256_add_pd(fjx2,tx);
713             fjy2             = _mm256_add_pd(fjy2,ty);
714             fjz2             = _mm256_add_pd(fjz2,tz);
715
716             }
717
718             /**************************
719              * CALCULATE INTERACTIONS *
720              **************************/
721
722             if (gmx_mm256_any_lt(rsq20,rcutoff2))
723             {
724
725             r20              = _mm256_mul_pd(rsq20,rinv20);
726
727             /* EWALD ELECTROSTATICS */
728
729             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
730             ewrt             = _mm256_mul_pd(r20,ewtabscale);
731             ewitab           = _mm256_cvttpd_epi32(ewrt);
732             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
733             ewitab           = _mm_slli_epi32(ewitab,2);
734             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
735             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
736             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
737             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
738             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
739             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
740             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
741             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
742             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
743
744             d                = _mm256_sub_pd(r20,rswitch);
745             d                = _mm256_max_pd(d,_mm256_setzero_pd());
746             d2               = _mm256_mul_pd(d,d);
747             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
748
749             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
750
751             /* Evaluate switch function */
752             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
753             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
754             velec            = _mm256_mul_pd(velec,sw);
755             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
756
757             /* Update potential sum for this i atom from the interaction with this j atom. */
758             velec            = _mm256_and_pd(velec,cutoff_mask);
759             velecsum         = _mm256_add_pd(velecsum,velec);
760
761             fscal            = felec;
762
763             fscal            = _mm256_and_pd(fscal,cutoff_mask);
764
765             /* Calculate temporary vectorial force */
766             tx               = _mm256_mul_pd(fscal,dx20);
767             ty               = _mm256_mul_pd(fscal,dy20);
768             tz               = _mm256_mul_pd(fscal,dz20);
769
770             /* Update vectorial force */
771             fix2             = _mm256_add_pd(fix2,tx);
772             fiy2             = _mm256_add_pd(fiy2,ty);
773             fiz2             = _mm256_add_pd(fiz2,tz);
774
775             fjx0             = _mm256_add_pd(fjx0,tx);
776             fjy0             = _mm256_add_pd(fjy0,ty);
777             fjz0             = _mm256_add_pd(fjz0,tz);
778
779             }
780
781             /**************************
782              * CALCULATE INTERACTIONS *
783              **************************/
784
785             if (gmx_mm256_any_lt(rsq21,rcutoff2))
786             {
787
788             r21              = _mm256_mul_pd(rsq21,rinv21);
789
790             /* EWALD ELECTROSTATICS */
791
792             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
793             ewrt             = _mm256_mul_pd(r21,ewtabscale);
794             ewitab           = _mm256_cvttpd_epi32(ewrt);
795             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
796             ewitab           = _mm_slli_epi32(ewitab,2);
797             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
798             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
799             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
800             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
801             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
802             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
803             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
804             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
805             felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
806
807             d                = _mm256_sub_pd(r21,rswitch);
808             d                = _mm256_max_pd(d,_mm256_setzero_pd());
809             d2               = _mm256_mul_pd(d,d);
810             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
811
812             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
813
814             /* Evaluate switch function */
815             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
816             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
817             velec            = _mm256_mul_pd(velec,sw);
818             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
819
820             /* Update potential sum for this i atom from the interaction with this j atom. */
821             velec            = _mm256_and_pd(velec,cutoff_mask);
822             velecsum         = _mm256_add_pd(velecsum,velec);
823
824             fscal            = felec;
825
826             fscal            = _mm256_and_pd(fscal,cutoff_mask);
827
828             /* Calculate temporary vectorial force */
829             tx               = _mm256_mul_pd(fscal,dx21);
830             ty               = _mm256_mul_pd(fscal,dy21);
831             tz               = _mm256_mul_pd(fscal,dz21);
832
833             /* Update vectorial force */
834             fix2             = _mm256_add_pd(fix2,tx);
835             fiy2             = _mm256_add_pd(fiy2,ty);
836             fiz2             = _mm256_add_pd(fiz2,tz);
837
838             fjx1             = _mm256_add_pd(fjx1,tx);
839             fjy1             = _mm256_add_pd(fjy1,ty);
840             fjz1             = _mm256_add_pd(fjz1,tz);
841
842             }
843
844             /**************************
845              * CALCULATE INTERACTIONS *
846              **************************/
847
848             if (gmx_mm256_any_lt(rsq22,rcutoff2))
849             {
850
851             r22              = _mm256_mul_pd(rsq22,rinv22);
852
853             /* EWALD ELECTROSTATICS */
854
855             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
856             ewrt             = _mm256_mul_pd(r22,ewtabscale);
857             ewitab           = _mm256_cvttpd_epi32(ewrt);
858             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
859             ewitab           = _mm_slli_epi32(ewitab,2);
860             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
861             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
862             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
863             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
864             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
865             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
866             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
867             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
868             felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
869
870             d                = _mm256_sub_pd(r22,rswitch);
871             d                = _mm256_max_pd(d,_mm256_setzero_pd());
872             d2               = _mm256_mul_pd(d,d);
873             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
874
875             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
876
877             /* Evaluate switch function */
878             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
879             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
880             velec            = _mm256_mul_pd(velec,sw);
881             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
882
883             /* Update potential sum for this i atom from the interaction with this j atom. */
884             velec            = _mm256_and_pd(velec,cutoff_mask);
885             velecsum         = _mm256_add_pd(velecsum,velec);
886
887             fscal            = felec;
888
889             fscal            = _mm256_and_pd(fscal,cutoff_mask);
890
891             /* Calculate temporary vectorial force */
892             tx               = _mm256_mul_pd(fscal,dx22);
893             ty               = _mm256_mul_pd(fscal,dy22);
894             tz               = _mm256_mul_pd(fscal,dz22);
895
896             /* Update vectorial force */
897             fix2             = _mm256_add_pd(fix2,tx);
898             fiy2             = _mm256_add_pd(fiy2,ty);
899             fiz2             = _mm256_add_pd(fiz2,tz);
900
901             fjx2             = _mm256_add_pd(fjx2,tx);
902             fjy2             = _mm256_add_pd(fjy2,ty);
903             fjz2             = _mm256_add_pd(fjz2,tz);
904
905             }
906
907             fjptrA             = f+j_coord_offsetA;
908             fjptrB             = f+j_coord_offsetB;
909             fjptrC             = f+j_coord_offsetC;
910             fjptrD             = f+j_coord_offsetD;
911
912             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
913                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
914
915             /* Inner loop uses 603 flops */
916         }
917
918         if(jidx<j_index_end)
919         {
920
921             /* Get j neighbor index, and coordinate index */
922             jnrlistA         = jjnr[jidx];
923             jnrlistB         = jjnr[jidx+1];
924             jnrlistC         = jjnr[jidx+2];
925             jnrlistD         = jjnr[jidx+3];
926             /* Sign of each element will be negative for non-real atoms.
927              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
928              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
929              */
930             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
931
932             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
933             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
934             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
935
936             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
937             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
938             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
939             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
940             j_coord_offsetA  = DIM*jnrA;
941             j_coord_offsetB  = DIM*jnrB;
942             j_coord_offsetC  = DIM*jnrC;
943             j_coord_offsetD  = DIM*jnrD;
944
945             /* load j atom coordinates */
946             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
947                                                  x+j_coord_offsetC,x+j_coord_offsetD,
948                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
949
950             /* Calculate displacement vector */
951             dx00             = _mm256_sub_pd(ix0,jx0);
952             dy00             = _mm256_sub_pd(iy0,jy0);
953             dz00             = _mm256_sub_pd(iz0,jz0);
954             dx01             = _mm256_sub_pd(ix0,jx1);
955             dy01             = _mm256_sub_pd(iy0,jy1);
956             dz01             = _mm256_sub_pd(iz0,jz1);
957             dx02             = _mm256_sub_pd(ix0,jx2);
958             dy02             = _mm256_sub_pd(iy0,jy2);
959             dz02             = _mm256_sub_pd(iz0,jz2);
960             dx10             = _mm256_sub_pd(ix1,jx0);
961             dy10             = _mm256_sub_pd(iy1,jy0);
962             dz10             = _mm256_sub_pd(iz1,jz0);
963             dx11             = _mm256_sub_pd(ix1,jx1);
964             dy11             = _mm256_sub_pd(iy1,jy1);
965             dz11             = _mm256_sub_pd(iz1,jz1);
966             dx12             = _mm256_sub_pd(ix1,jx2);
967             dy12             = _mm256_sub_pd(iy1,jy2);
968             dz12             = _mm256_sub_pd(iz1,jz2);
969             dx20             = _mm256_sub_pd(ix2,jx0);
970             dy20             = _mm256_sub_pd(iy2,jy0);
971             dz20             = _mm256_sub_pd(iz2,jz0);
972             dx21             = _mm256_sub_pd(ix2,jx1);
973             dy21             = _mm256_sub_pd(iy2,jy1);
974             dz21             = _mm256_sub_pd(iz2,jz1);
975             dx22             = _mm256_sub_pd(ix2,jx2);
976             dy22             = _mm256_sub_pd(iy2,jy2);
977             dz22             = _mm256_sub_pd(iz2,jz2);
978
979             /* Calculate squared distance and things based on it */
980             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
981             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
982             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
983             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
984             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
985             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
986             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
987             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
988             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
989
990             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
991             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
992             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
993             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
994             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
995             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
996             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
997             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
998             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
999
1000             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1001             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
1002             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
1003             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1004             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1005             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1006             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1007             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1008             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1009
1010             fjx0             = _mm256_setzero_pd();
1011             fjy0             = _mm256_setzero_pd();
1012             fjz0             = _mm256_setzero_pd();
1013             fjx1             = _mm256_setzero_pd();
1014             fjy1             = _mm256_setzero_pd();
1015             fjz1             = _mm256_setzero_pd();
1016             fjx2             = _mm256_setzero_pd();
1017             fjy2             = _mm256_setzero_pd();
1018             fjz2             = _mm256_setzero_pd();
1019
1020             /**************************
1021              * CALCULATE INTERACTIONS *
1022              **************************/
1023
1024             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1025             {
1026
1027             r00              = _mm256_mul_pd(rsq00,rinv00);
1028             r00              = _mm256_andnot_pd(dummy_mask,r00);
1029
1030             /* EWALD ELECTROSTATICS */
1031
1032             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1033             ewrt             = _mm256_mul_pd(r00,ewtabscale);
1034             ewitab           = _mm256_cvttpd_epi32(ewrt);
1035             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1036             ewitab           = _mm_slli_epi32(ewitab,2);
1037             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1038             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1039             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1040             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1041             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1042             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1043             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1044             velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
1045             felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
1046
1047             /* LENNARD-JONES DISPERSION/REPULSION */
1048
1049             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
1050             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
1051             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
1052             vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
1053             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
1054
1055             d                = _mm256_sub_pd(r00,rswitch);
1056             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1057             d2               = _mm256_mul_pd(d,d);
1058             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1059
1060             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1061
1062             /* Evaluate switch function */
1063             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1064             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
1065             fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
1066             velec            = _mm256_mul_pd(velec,sw);
1067             vvdw             = _mm256_mul_pd(vvdw,sw);
1068             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1069
1070             /* Update potential sum for this i atom from the interaction with this j atom. */
1071             velec            = _mm256_and_pd(velec,cutoff_mask);
1072             velec            = _mm256_andnot_pd(dummy_mask,velec);
1073             velecsum         = _mm256_add_pd(velecsum,velec);
1074             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
1075             vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
1076             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
1077
1078             fscal            = _mm256_add_pd(felec,fvdw);
1079
1080             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1081
1082             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1083
1084             /* Calculate temporary vectorial force */
1085             tx               = _mm256_mul_pd(fscal,dx00);
1086             ty               = _mm256_mul_pd(fscal,dy00);
1087             tz               = _mm256_mul_pd(fscal,dz00);
1088
1089             /* Update vectorial force */
1090             fix0             = _mm256_add_pd(fix0,tx);
1091             fiy0             = _mm256_add_pd(fiy0,ty);
1092             fiz0             = _mm256_add_pd(fiz0,tz);
1093
1094             fjx0             = _mm256_add_pd(fjx0,tx);
1095             fjy0             = _mm256_add_pd(fjy0,ty);
1096             fjz0             = _mm256_add_pd(fjz0,tz);
1097
1098             }
1099
1100             /**************************
1101              * CALCULATE INTERACTIONS *
1102              **************************/
1103
1104             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1105             {
1106
1107             r01              = _mm256_mul_pd(rsq01,rinv01);
1108             r01              = _mm256_andnot_pd(dummy_mask,r01);
1109
1110             /* EWALD ELECTROSTATICS */
1111
1112             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1113             ewrt             = _mm256_mul_pd(r01,ewtabscale);
1114             ewitab           = _mm256_cvttpd_epi32(ewrt);
1115             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1116             ewitab           = _mm_slli_epi32(ewitab,2);
1117             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1118             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1119             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1120             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1121             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1122             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1123             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1124             velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
1125             felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
1126
1127             d                = _mm256_sub_pd(r01,rswitch);
1128             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1129             d2               = _mm256_mul_pd(d,d);
1130             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1131
1132             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1133
1134             /* Evaluate switch function */
1135             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1136             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
1137             velec            = _mm256_mul_pd(velec,sw);
1138             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
1139
1140             /* Update potential sum for this i atom from the interaction with this j atom. */
1141             velec            = _mm256_and_pd(velec,cutoff_mask);
1142             velec            = _mm256_andnot_pd(dummy_mask,velec);
1143             velecsum         = _mm256_add_pd(velecsum,velec);
1144
1145             fscal            = felec;
1146
1147             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1148
1149             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1150
1151             /* Calculate temporary vectorial force */
1152             tx               = _mm256_mul_pd(fscal,dx01);
1153             ty               = _mm256_mul_pd(fscal,dy01);
1154             tz               = _mm256_mul_pd(fscal,dz01);
1155
1156             /* Update vectorial force */
1157             fix0             = _mm256_add_pd(fix0,tx);
1158             fiy0             = _mm256_add_pd(fiy0,ty);
1159             fiz0             = _mm256_add_pd(fiz0,tz);
1160
1161             fjx1             = _mm256_add_pd(fjx1,tx);
1162             fjy1             = _mm256_add_pd(fjy1,ty);
1163             fjz1             = _mm256_add_pd(fjz1,tz);
1164
1165             }
1166
1167             /**************************
1168              * CALCULATE INTERACTIONS *
1169              **************************/
1170
1171             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1172             {
1173
1174             r02              = _mm256_mul_pd(rsq02,rinv02);
1175             r02              = _mm256_andnot_pd(dummy_mask,r02);
1176
1177             /* EWALD ELECTROSTATICS */
1178
1179             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1180             ewrt             = _mm256_mul_pd(r02,ewtabscale);
1181             ewitab           = _mm256_cvttpd_epi32(ewrt);
1182             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1183             ewitab           = _mm_slli_epi32(ewitab,2);
1184             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1185             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1186             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1187             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1188             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1189             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1190             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1191             velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
1192             felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
1193
1194             d                = _mm256_sub_pd(r02,rswitch);
1195             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1196             d2               = _mm256_mul_pd(d,d);
1197             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1198
1199             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1200
1201             /* Evaluate switch function */
1202             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1203             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
1204             velec            = _mm256_mul_pd(velec,sw);
1205             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
1206
1207             /* Update potential sum for this i atom from the interaction with this j atom. */
1208             velec            = _mm256_and_pd(velec,cutoff_mask);
1209             velec            = _mm256_andnot_pd(dummy_mask,velec);
1210             velecsum         = _mm256_add_pd(velecsum,velec);
1211
1212             fscal            = felec;
1213
1214             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1215
1216             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1217
1218             /* Calculate temporary vectorial force */
1219             tx               = _mm256_mul_pd(fscal,dx02);
1220             ty               = _mm256_mul_pd(fscal,dy02);
1221             tz               = _mm256_mul_pd(fscal,dz02);
1222
1223             /* Update vectorial force */
1224             fix0             = _mm256_add_pd(fix0,tx);
1225             fiy0             = _mm256_add_pd(fiy0,ty);
1226             fiz0             = _mm256_add_pd(fiz0,tz);
1227
1228             fjx2             = _mm256_add_pd(fjx2,tx);
1229             fjy2             = _mm256_add_pd(fjy2,ty);
1230             fjz2             = _mm256_add_pd(fjz2,tz);
1231
1232             }
1233
1234             /**************************
1235              * CALCULATE INTERACTIONS *
1236              **************************/
1237
1238             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1239             {
1240
1241             r10              = _mm256_mul_pd(rsq10,rinv10);
1242             r10              = _mm256_andnot_pd(dummy_mask,r10);
1243
1244             /* EWALD ELECTROSTATICS */
1245
1246             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1247             ewrt             = _mm256_mul_pd(r10,ewtabscale);
1248             ewitab           = _mm256_cvttpd_epi32(ewrt);
1249             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1250             ewitab           = _mm_slli_epi32(ewitab,2);
1251             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1252             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1253             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1254             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1255             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1256             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1257             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1258             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
1259             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
1260
1261             d                = _mm256_sub_pd(r10,rswitch);
1262             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1263             d2               = _mm256_mul_pd(d,d);
1264             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1265
1266             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1267
1268             /* Evaluate switch function */
1269             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1270             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
1271             velec            = _mm256_mul_pd(velec,sw);
1272             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1273
1274             /* Update potential sum for this i atom from the interaction with this j atom. */
1275             velec            = _mm256_and_pd(velec,cutoff_mask);
1276             velec            = _mm256_andnot_pd(dummy_mask,velec);
1277             velecsum         = _mm256_add_pd(velecsum,velec);
1278
1279             fscal            = felec;
1280
1281             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1282
1283             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1284
1285             /* Calculate temporary vectorial force */
1286             tx               = _mm256_mul_pd(fscal,dx10);
1287             ty               = _mm256_mul_pd(fscal,dy10);
1288             tz               = _mm256_mul_pd(fscal,dz10);
1289
1290             /* Update vectorial force */
1291             fix1             = _mm256_add_pd(fix1,tx);
1292             fiy1             = _mm256_add_pd(fiy1,ty);
1293             fiz1             = _mm256_add_pd(fiz1,tz);
1294
1295             fjx0             = _mm256_add_pd(fjx0,tx);
1296             fjy0             = _mm256_add_pd(fjy0,ty);
1297             fjz0             = _mm256_add_pd(fjz0,tz);
1298
1299             }
1300
1301             /**************************
1302              * CALCULATE INTERACTIONS *
1303              **************************/
1304
1305             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1306             {
1307
1308             r11              = _mm256_mul_pd(rsq11,rinv11);
1309             r11              = _mm256_andnot_pd(dummy_mask,r11);
1310
1311             /* EWALD ELECTROSTATICS */
1312
1313             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1314             ewrt             = _mm256_mul_pd(r11,ewtabscale);
1315             ewitab           = _mm256_cvttpd_epi32(ewrt);
1316             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1317             ewitab           = _mm_slli_epi32(ewitab,2);
1318             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1319             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1320             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1321             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1322             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1323             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1324             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1325             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
1326             felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
1327
1328             d                = _mm256_sub_pd(r11,rswitch);
1329             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1330             d2               = _mm256_mul_pd(d,d);
1331             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1332
1333             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1334
1335             /* Evaluate switch function */
1336             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1337             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
1338             velec            = _mm256_mul_pd(velec,sw);
1339             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
1340
1341             /* Update potential sum for this i atom from the interaction with this j atom. */
1342             velec            = _mm256_and_pd(velec,cutoff_mask);
1343             velec            = _mm256_andnot_pd(dummy_mask,velec);
1344             velecsum         = _mm256_add_pd(velecsum,velec);
1345
1346             fscal            = felec;
1347
1348             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1349
1350             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1351
1352             /* Calculate temporary vectorial force */
1353             tx               = _mm256_mul_pd(fscal,dx11);
1354             ty               = _mm256_mul_pd(fscal,dy11);
1355             tz               = _mm256_mul_pd(fscal,dz11);
1356
1357             /* Update vectorial force */
1358             fix1             = _mm256_add_pd(fix1,tx);
1359             fiy1             = _mm256_add_pd(fiy1,ty);
1360             fiz1             = _mm256_add_pd(fiz1,tz);
1361
1362             fjx1             = _mm256_add_pd(fjx1,tx);
1363             fjy1             = _mm256_add_pd(fjy1,ty);
1364             fjz1             = _mm256_add_pd(fjz1,tz);
1365
1366             }
1367
1368             /**************************
1369              * CALCULATE INTERACTIONS *
1370              **************************/
1371
1372             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1373             {
1374
1375             r12              = _mm256_mul_pd(rsq12,rinv12);
1376             r12              = _mm256_andnot_pd(dummy_mask,r12);
1377
1378             /* EWALD ELECTROSTATICS */
1379
1380             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1381             ewrt             = _mm256_mul_pd(r12,ewtabscale);
1382             ewitab           = _mm256_cvttpd_epi32(ewrt);
1383             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1384             ewitab           = _mm_slli_epi32(ewitab,2);
1385             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1386             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1387             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1388             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1389             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1390             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1391             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1392             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
1393             felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
1394
1395             d                = _mm256_sub_pd(r12,rswitch);
1396             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1397             d2               = _mm256_mul_pd(d,d);
1398             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1399
1400             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1401
1402             /* Evaluate switch function */
1403             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1404             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
1405             velec            = _mm256_mul_pd(velec,sw);
1406             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
1407
1408             /* Update potential sum for this i atom from the interaction with this j atom. */
1409             velec            = _mm256_and_pd(velec,cutoff_mask);
1410             velec            = _mm256_andnot_pd(dummy_mask,velec);
1411             velecsum         = _mm256_add_pd(velecsum,velec);
1412
1413             fscal            = felec;
1414
1415             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1416
1417             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1418
1419             /* Calculate temporary vectorial force */
1420             tx               = _mm256_mul_pd(fscal,dx12);
1421             ty               = _mm256_mul_pd(fscal,dy12);
1422             tz               = _mm256_mul_pd(fscal,dz12);
1423
1424             /* Update vectorial force */
1425             fix1             = _mm256_add_pd(fix1,tx);
1426             fiy1             = _mm256_add_pd(fiy1,ty);
1427             fiz1             = _mm256_add_pd(fiz1,tz);
1428
1429             fjx2             = _mm256_add_pd(fjx2,tx);
1430             fjy2             = _mm256_add_pd(fjy2,ty);
1431             fjz2             = _mm256_add_pd(fjz2,tz);
1432
1433             }
1434
1435             /**************************
1436              * CALCULATE INTERACTIONS *
1437              **************************/
1438
1439             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1440             {
1441
1442             r20              = _mm256_mul_pd(rsq20,rinv20);
1443             r20              = _mm256_andnot_pd(dummy_mask,r20);
1444
1445             /* EWALD ELECTROSTATICS */
1446
1447             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1448             ewrt             = _mm256_mul_pd(r20,ewtabscale);
1449             ewitab           = _mm256_cvttpd_epi32(ewrt);
1450             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1451             ewitab           = _mm_slli_epi32(ewitab,2);
1452             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1453             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1454             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1455             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1456             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1457             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1458             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1459             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
1460             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
1461
1462             d                = _mm256_sub_pd(r20,rswitch);
1463             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1464             d2               = _mm256_mul_pd(d,d);
1465             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1466
1467             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1468
1469             /* Evaluate switch function */
1470             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1471             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
1472             velec            = _mm256_mul_pd(velec,sw);
1473             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1474
1475             /* Update potential sum for this i atom from the interaction with this j atom. */
1476             velec            = _mm256_and_pd(velec,cutoff_mask);
1477             velec            = _mm256_andnot_pd(dummy_mask,velec);
1478             velecsum         = _mm256_add_pd(velecsum,velec);
1479
1480             fscal            = felec;
1481
1482             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1483
1484             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1485
1486             /* Calculate temporary vectorial force */
1487             tx               = _mm256_mul_pd(fscal,dx20);
1488             ty               = _mm256_mul_pd(fscal,dy20);
1489             tz               = _mm256_mul_pd(fscal,dz20);
1490
1491             /* Update vectorial force */
1492             fix2             = _mm256_add_pd(fix2,tx);
1493             fiy2             = _mm256_add_pd(fiy2,ty);
1494             fiz2             = _mm256_add_pd(fiz2,tz);
1495
1496             fjx0             = _mm256_add_pd(fjx0,tx);
1497             fjy0             = _mm256_add_pd(fjy0,ty);
1498             fjz0             = _mm256_add_pd(fjz0,tz);
1499
1500             }
1501
1502             /**************************
1503              * CALCULATE INTERACTIONS *
1504              **************************/
1505
1506             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1507             {
1508
1509             r21              = _mm256_mul_pd(rsq21,rinv21);
1510             r21              = _mm256_andnot_pd(dummy_mask,r21);
1511
1512             /* EWALD ELECTROSTATICS */
1513
1514             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1515             ewrt             = _mm256_mul_pd(r21,ewtabscale);
1516             ewitab           = _mm256_cvttpd_epi32(ewrt);
1517             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1518             ewitab           = _mm_slli_epi32(ewitab,2);
1519             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1520             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1521             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1522             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1523             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1524             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1525             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1526             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
1527             felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
1528
1529             d                = _mm256_sub_pd(r21,rswitch);
1530             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1531             d2               = _mm256_mul_pd(d,d);
1532             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1533
1534             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1535
1536             /* Evaluate switch function */
1537             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1538             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
1539             velec            = _mm256_mul_pd(velec,sw);
1540             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
1541
1542             /* Update potential sum for this i atom from the interaction with this j atom. */
1543             velec            = _mm256_and_pd(velec,cutoff_mask);
1544             velec            = _mm256_andnot_pd(dummy_mask,velec);
1545             velecsum         = _mm256_add_pd(velecsum,velec);
1546
1547             fscal            = felec;
1548
1549             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1550
1551             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1552
1553             /* Calculate temporary vectorial force */
1554             tx               = _mm256_mul_pd(fscal,dx21);
1555             ty               = _mm256_mul_pd(fscal,dy21);
1556             tz               = _mm256_mul_pd(fscal,dz21);
1557
1558             /* Update vectorial force */
1559             fix2             = _mm256_add_pd(fix2,tx);
1560             fiy2             = _mm256_add_pd(fiy2,ty);
1561             fiz2             = _mm256_add_pd(fiz2,tz);
1562
1563             fjx1             = _mm256_add_pd(fjx1,tx);
1564             fjy1             = _mm256_add_pd(fjy1,ty);
1565             fjz1             = _mm256_add_pd(fjz1,tz);
1566
1567             }
1568
1569             /**************************
1570              * CALCULATE INTERACTIONS *
1571              **************************/
1572
1573             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1574             {
1575
1576             r22              = _mm256_mul_pd(rsq22,rinv22);
1577             r22              = _mm256_andnot_pd(dummy_mask,r22);
1578
1579             /* EWALD ELECTROSTATICS */
1580
1581             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1582             ewrt             = _mm256_mul_pd(r22,ewtabscale);
1583             ewitab           = _mm256_cvttpd_epi32(ewrt);
1584             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1585             ewitab           = _mm_slli_epi32(ewitab,2);
1586             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1587             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1588             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1589             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1590             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1591             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1592             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1593             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
1594             felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
1595
1596             d                = _mm256_sub_pd(r22,rswitch);
1597             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1598             d2               = _mm256_mul_pd(d,d);
1599             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1600
1601             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1602
1603             /* Evaluate switch function */
1604             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1605             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
1606             velec            = _mm256_mul_pd(velec,sw);
1607             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
1608
1609             /* Update potential sum for this i atom from the interaction with this j atom. */
1610             velec            = _mm256_and_pd(velec,cutoff_mask);
1611             velec            = _mm256_andnot_pd(dummy_mask,velec);
1612             velecsum         = _mm256_add_pd(velecsum,velec);
1613
1614             fscal            = felec;
1615
1616             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1617
1618             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1619
1620             /* Calculate temporary vectorial force */
1621             tx               = _mm256_mul_pd(fscal,dx22);
1622             ty               = _mm256_mul_pd(fscal,dy22);
1623             tz               = _mm256_mul_pd(fscal,dz22);
1624
1625             /* Update vectorial force */
1626             fix2             = _mm256_add_pd(fix2,tx);
1627             fiy2             = _mm256_add_pd(fiy2,ty);
1628             fiz2             = _mm256_add_pd(fiz2,tz);
1629
1630             fjx2             = _mm256_add_pd(fjx2,tx);
1631             fjy2             = _mm256_add_pd(fjy2,ty);
1632             fjz2             = _mm256_add_pd(fjz2,tz);
1633
1634             }
1635
1636             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1637             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1638             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1639             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1640
1641             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1642                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1643
1644             /* Inner loop uses 612 flops */
1645         }
1646
1647         /* End of innermost loop */
1648
1649         gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1650                                                  f+i_coord_offset,fshift+i_shift_offset);
1651
1652         ggid                        = gid[iidx];
1653         /* Update potential energies */
1654         gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1655         gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
1656
1657         /* Increment number of inner iterations */
1658         inneriter                  += j_index_end - j_index_start;
1659
1660         /* Outer loop uses 20 flops */
1661     }
1662
1663     /* Increment number of outer iterations */
1664     outeriter        += nri;
1665
1666     /* Update outer/inner flops */
1667
1668     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*612);
1669 }
1670 /*
1671  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_F_avx_256_double
1672  * Electrostatics interaction: Ewald
1673  * VdW interaction:            LennardJones
1674  * Geometry:                   Water3-Water3
1675  * Calculate force/pot:        Force
1676  */
1677 void
1678 nb_kernel_ElecEwSw_VdwLJSw_GeomW3W3_F_avx_256_double
1679                     (t_nblist                    * gmx_restrict       nlist,
1680                      rvec                        * gmx_restrict          xx,
1681                      rvec                        * gmx_restrict          ff,
1682                      t_forcerec                  * gmx_restrict          fr,
1683                      t_mdatoms                   * gmx_restrict     mdatoms,
1684                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1685                      t_nrnb                      * gmx_restrict        nrnb)
1686 {
1687     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1688      * just 0 for non-waters.
1689      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1690      * jnr indices corresponding to data put in the four positions in the SIMD register.
1691      */
1692     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1693     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1694     int              jnrA,jnrB,jnrC,jnrD;
1695     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1696     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1697     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1698     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1699     real             rcutoff_scalar;
1700     real             *shiftvec,*fshift,*x,*f;
1701     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1702     real             scratch[4*DIM];
1703     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1704     real *           vdwioffsetptr0;
1705     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1706     real *           vdwioffsetptr1;
1707     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1708     real *           vdwioffsetptr2;
1709     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1710     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
1711     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1712     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1713     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1714     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1715     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1716     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1717     __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1718     __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1719     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1720     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1721     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1722     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1723     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1724     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1725     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
1726     real             *charge;
1727     int              nvdwtype;
1728     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1729     int              *vdwtype;
1730     real             *vdwparam;
1731     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
1732     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
1733     __m128i          ewitab;
1734     __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1735     __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1736     real             *ewtab;
1737     __m256d          rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
1738     real             rswitch_scalar,d_scalar;
1739     __m256d          dummy_mask,cutoff_mask;
1740     __m128           tmpmask0,tmpmask1;
1741     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1742     __m256d          one     = _mm256_set1_pd(1.0);
1743     __m256d          two     = _mm256_set1_pd(2.0);
1744     x                = xx[0];
1745     f                = ff[0];
1746
1747     nri              = nlist->nri;
1748     iinr             = nlist->iinr;
1749     jindex           = nlist->jindex;
1750     jjnr             = nlist->jjnr;
1751     shiftidx         = nlist->shift;
1752     gid              = nlist->gid;
1753     shiftvec         = fr->shift_vec[0];
1754     fshift           = fr->fshift[0];
1755     facel            = _mm256_set1_pd(fr->epsfac);
1756     charge           = mdatoms->chargeA;
1757     nvdwtype         = fr->ntype;
1758     vdwparam         = fr->nbfp;
1759     vdwtype          = mdatoms->typeA;
1760
1761     sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
1762     beta             = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
1763     beta2            = _mm256_mul_pd(beta,beta);
1764     beta3            = _mm256_mul_pd(beta,beta2);
1765
1766     ewtab            = fr->ic->tabq_coul_FDV0;
1767     ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
1768     ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
1769
1770     /* Setup water-specific parameters */
1771     inr              = nlist->iinr[0];
1772     iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
1773     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1774     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1775     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1776
1777     jq0              = _mm256_set1_pd(charge[inr+0]);
1778     jq1              = _mm256_set1_pd(charge[inr+1]);
1779     jq2              = _mm256_set1_pd(charge[inr+2]);
1780     vdwjidx0A        = 2*vdwtype[inr+0];
1781     qq00             = _mm256_mul_pd(iq0,jq0);
1782     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
1783     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
1784     qq01             = _mm256_mul_pd(iq0,jq1);
1785     qq02             = _mm256_mul_pd(iq0,jq2);
1786     qq10             = _mm256_mul_pd(iq1,jq0);
1787     qq11             = _mm256_mul_pd(iq1,jq1);
1788     qq12             = _mm256_mul_pd(iq1,jq2);
1789     qq20             = _mm256_mul_pd(iq2,jq0);
1790     qq21             = _mm256_mul_pd(iq2,jq1);
1791     qq22             = _mm256_mul_pd(iq2,jq2);
1792
1793     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1794     rcutoff_scalar   = fr->rcoulomb;
1795     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
1796     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
1797
1798     rswitch_scalar   = fr->rcoulomb_switch;
1799     rswitch          = _mm256_set1_pd(rswitch_scalar);
1800     /* Setup switch parameters */
1801     d_scalar         = rcutoff_scalar-rswitch_scalar;
1802     d                = _mm256_set1_pd(d_scalar);
1803     swV3             = _mm256_set1_pd(-10.0/(d_scalar*d_scalar*d_scalar));
1804     swV4             = _mm256_set1_pd( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1805     swV5             = _mm256_set1_pd( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1806     swF2             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar));
1807     swF3             = _mm256_set1_pd( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
1808     swF4             = _mm256_set1_pd(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
1809
1810     /* Avoid stupid compiler warnings */
1811     jnrA = jnrB = jnrC = jnrD = 0;
1812     j_coord_offsetA = 0;
1813     j_coord_offsetB = 0;
1814     j_coord_offsetC = 0;
1815     j_coord_offsetD = 0;
1816
1817     outeriter        = 0;
1818     inneriter        = 0;
1819
1820     for(iidx=0;iidx<4*DIM;iidx++)
1821     {
1822         scratch[iidx] = 0.0;
1823     }
1824
1825     /* Start outer loop over neighborlists */
1826     for(iidx=0; iidx<nri; iidx++)
1827     {
1828         /* Load shift vector for this list */
1829         i_shift_offset   = DIM*shiftidx[iidx];
1830
1831         /* Load limits for loop over neighbors */
1832         j_index_start    = jindex[iidx];
1833         j_index_end      = jindex[iidx+1];
1834
1835         /* Get outer coordinate index */
1836         inr              = iinr[iidx];
1837         i_coord_offset   = DIM*inr;
1838
1839         /* Load i particle coords and add shift vector */
1840         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1841                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1842
1843         fix0             = _mm256_setzero_pd();
1844         fiy0             = _mm256_setzero_pd();
1845         fiz0             = _mm256_setzero_pd();
1846         fix1             = _mm256_setzero_pd();
1847         fiy1             = _mm256_setzero_pd();
1848         fiz1             = _mm256_setzero_pd();
1849         fix2             = _mm256_setzero_pd();
1850         fiy2             = _mm256_setzero_pd();
1851         fiz2             = _mm256_setzero_pd();
1852
1853         /* Start inner kernel loop */
1854         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1855         {
1856
1857             /* Get j neighbor index, and coordinate index */
1858             jnrA             = jjnr[jidx];
1859             jnrB             = jjnr[jidx+1];
1860             jnrC             = jjnr[jidx+2];
1861             jnrD             = jjnr[jidx+3];
1862             j_coord_offsetA  = DIM*jnrA;
1863             j_coord_offsetB  = DIM*jnrB;
1864             j_coord_offsetC  = DIM*jnrC;
1865             j_coord_offsetD  = DIM*jnrD;
1866
1867             /* load j atom coordinates */
1868             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1869                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1870                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1871
1872             /* Calculate displacement vector */
1873             dx00             = _mm256_sub_pd(ix0,jx0);
1874             dy00             = _mm256_sub_pd(iy0,jy0);
1875             dz00             = _mm256_sub_pd(iz0,jz0);
1876             dx01             = _mm256_sub_pd(ix0,jx1);
1877             dy01             = _mm256_sub_pd(iy0,jy1);
1878             dz01             = _mm256_sub_pd(iz0,jz1);
1879             dx02             = _mm256_sub_pd(ix0,jx2);
1880             dy02             = _mm256_sub_pd(iy0,jy2);
1881             dz02             = _mm256_sub_pd(iz0,jz2);
1882             dx10             = _mm256_sub_pd(ix1,jx0);
1883             dy10             = _mm256_sub_pd(iy1,jy0);
1884             dz10             = _mm256_sub_pd(iz1,jz0);
1885             dx11             = _mm256_sub_pd(ix1,jx1);
1886             dy11             = _mm256_sub_pd(iy1,jy1);
1887             dz11             = _mm256_sub_pd(iz1,jz1);
1888             dx12             = _mm256_sub_pd(ix1,jx2);
1889             dy12             = _mm256_sub_pd(iy1,jy2);
1890             dz12             = _mm256_sub_pd(iz1,jz2);
1891             dx20             = _mm256_sub_pd(ix2,jx0);
1892             dy20             = _mm256_sub_pd(iy2,jy0);
1893             dz20             = _mm256_sub_pd(iz2,jz0);
1894             dx21             = _mm256_sub_pd(ix2,jx1);
1895             dy21             = _mm256_sub_pd(iy2,jy1);
1896             dz21             = _mm256_sub_pd(iz2,jz1);
1897             dx22             = _mm256_sub_pd(ix2,jx2);
1898             dy22             = _mm256_sub_pd(iy2,jy2);
1899             dz22             = _mm256_sub_pd(iz2,jz2);
1900
1901             /* Calculate squared distance and things based on it */
1902             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1903             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1904             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1905             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1906             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1907             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1908             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1909             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1910             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1911
1912             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1913             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
1914             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
1915             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1916             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
1917             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
1918             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1919             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
1920             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
1921
1922             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1923             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
1924             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
1925             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1926             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1927             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1928             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1929             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1930             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1931
1932             fjx0             = _mm256_setzero_pd();
1933             fjy0             = _mm256_setzero_pd();
1934             fjz0             = _mm256_setzero_pd();
1935             fjx1             = _mm256_setzero_pd();
1936             fjy1             = _mm256_setzero_pd();
1937             fjz1             = _mm256_setzero_pd();
1938             fjx2             = _mm256_setzero_pd();
1939             fjy2             = _mm256_setzero_pd();
1940             fjz2             = _mm256_setzero_pd();
1941
1942             /**************************
1943              * CALCULATE INTERACTIONS *
1944              **************************/
1945
1946             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1947             {
1948
1949             r00              = _mm256_mul_pd(rsq00,rinv00);
1950
1951             /* EWALD ELECTROSTATICS */
1952
1953             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1954             ewrt             = _mm256_mul_pd(r00,ewtabscale);
1955             ewitab           = _mm256_cvttpd_epi32(ewrt);
1956             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1957             ewitab           = _mm_slli_epi32(ewitab,2);
1958             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1959             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1960             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1961             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1962             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1963             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1964             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1965             velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
1966             felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
1967
1968             /* LENNARD-JONES DISPERSION/REPULSION */
1969
1970             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
1971             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
1972             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
1973             vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
1974             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
1975
1976             d                = _mm256_sub_pd(r00,rswitch);
1977             d                = _mm256_max_pd(d,_mm256_setzero_pd());
1978             d2               = _mm256_mul_pd(d,d);
1979             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
1980
1981             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
1982
1983             /* Evaluate switch function */
1984             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1985             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
1986             fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
1987             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1988
1989             fscal            = _mm256_add_pd(felec,fvdw);
1990
1991             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1992
1993             /* Calculate temporary vectorial force */
1994             tx               = _mm256_mul_pd(fscal,dx00);
1995             ty               = _mm256_mul_pd(fscal,dy00);
1996             tz               = _mm256_mul_pd(fscal,dz00);
1997
1998             /* Update vectorial force */
1999             fix0             = _mm256_add_pd(fix0,tx);
2000             fiy0             = _mm256_add_pd(fiy0,ty);
2001             fiz0             = _mm256_add_pd(fiz0,tz);
2002
2003             fjx0             = _mm256_add_pd(fjx0,tx);
2004             fjy0             = _mm256_add_pd(fjy0,ty);
2005             fjz0             = _mm256_add_pd(fjz0,tz);
2006
2007             }
2008
2009             /**************************
2010              * CALCULATE INTERACTIONS *
2011              **************************/
2012
2013             if (gmx_mm256_any_lt(rsq01,rcutoff2))
2014             {
2015
2016             r01              = _mm256_mul_pd(rsq01,rinv01);
2017
2018             /* EWALD ELECTROSTATICS */
2019
2020             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2021             ewrt             = _mm256_mul_pd(r01,ewtabscale);
2022             ewitab           = _mm256_cvttpd_epi32(ewrt);
2023             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2024             ewitab           = _mm_slli_epi32(ewitab,2);
2025             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2026             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2027             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2028             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2029             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2030             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2031             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2032             velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
2033             felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
2034
2035             d                = _mm256_sub_pd(r01,rswitch);
2036             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2037             d2               = _mm256_mul_pd(d,d);
2038             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2039
2040             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2041
2042             /* Evaluate switch function */
2043             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2044             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
2045             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
2046
2047             fscal            = felec;
2048
2049             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2050
2051             /* Calculate temporary vectorial force */
2052             tx               = _mm256_mul_pd(fscal,dx01);
2053             ty               = _mm256_mul_pd(fscal,dy01);
2054             tz               = _mm256_mul_pd(fscal,dz01);
2055
2056             /* Update vectorial force */
2057             fix0             = _mm256_add_pd(fix0,tx);
2058             fiy0             = _mm256_add_pd(fiy0,ty);
2059             fiz0             = _mm256_add_pd(fiz0,tz);
2060
2061             fjx1             = _mm256_add_pd(fjx1,tx);
2062             fjy1             = _mm256_add_pd(fjy1,ty);
2063             fjz1             = _mm256_add_pd(fjz1,tz);
2064
2065             }
2066
2067             /**************************
2068              * CALCULATE INTERACTIONS *
2069              **************************/
2070
2071             if (gmx_mm256_any_lt(rsq02,rcutoff2))
2072             {
2073
2074             r02              = _mm256_mul_pd(rsq02,rinv02);
2075
2076             /* EWALD ELECTROSTATICS */
2077
2078             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2079             ewrt             = _mm256_mul_pd(r02,ewtabscale);
2080             ewitab           = _mm256_cvttpd_epi32(ewrt);
2081             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2082             ewitab           = _mm_slli_epi32(ewitab,2);
2083             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2084             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2085             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2086             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2087             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2088             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2089             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2090             velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
2091             felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
2092
2093             d                = _mm256_sub_pd(r02,rswitch);
2094             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2095             d2               = _mm256_mul_pd(d,d);
2096             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2097
2098             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2099
2100             /* Evaluate switch function */
2101             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2102             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
2103             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
2104
2105             fscal            = felec;
2106
2107             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2108
2109             /* Calculate temporary vectorial force */
2110             tx               = _mm256_mul_pd(fscal,dx02);
2111             ty               = _mm256_mul_pd(fscal,dy02);
2112             tz               = _mm256_mul_pd(fscal,dz02);
2113
2114             /* Update vectorial force */
2115             fix0             = _mm256_add_pd(fix0,tx);
2116             fiy0             = _mm256_add_pd(fiy0,ty);
2117             fiz0             = _mm256_add_pd(fiz0,tz);
2118
2119             fjx2             = _mm256_add_pd(fjx2,tx);
2120             fjy2             = _mm256_add_pd(fjy2,ty);
2121             fjz2             = _mm256_add_pd(fjz2,tz);
2122
2123             }
2124
2125             /**************************
2126              * CALCULATE INTERACTIONS *
2127              **************************/
2128
2129             if (gmx_mm256_any_lt(rsq10,rcutoff2))
2130             {
2131
2132             r10              = _mm256_mul_pd(rsq10,rinv10);
2133
2134             /* EWALD ELECTROSTATICS */
2135
2136             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2137             ewrt             = _mm256_mul_pd(r10,ewtabscale);
2138             ewitab           = _mm256_cvttpd_epi32(ewrt);
2139             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2140             ewitab           = _mm_slli_epi32(ewitab,2);
2141             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2142             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2143             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2144             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2145             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2146             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2147             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2148             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
2149             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
2150
2151             d                = _mm256_sub_pd(r10,rswitch);
2152             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2153             d2               = _mm256_mul_pd(d,d);
2154             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2155
2156             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2157
2158             /* Evaluate switch function */
2159             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2160             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
2161             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
2162
2163             fscal            = felec;
2164
2165             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2166
2167             /* Calculate temporary vectorial force */
2168             tx               = _mm256_mul_pd(fscal,dx10);
2169             ty               = _mm256_mul_pd(fscal,dy10);
2170             tz               = _mm256_mul_pd(fscal,dz10);
2171
2172             /* Update vectorial force */
2173             fix1             = _mm256_add_pd(fix1,tx);
2174             fiy1             = _mm256_add_pd(fiy1,ty);
2175             fiz1             = _mm256_add_pd(fiz1,tz);
2176
2177             fjx0             = _mm256_add_pd(fjx0,tx);
2178             fjy0             = _mm256_add_pd(fjy0,ty);
2179             fjz0             = _mm256_add_pd(fjz0,tz);
2180
2181             }
2182
2183             /**************************
2184              * CALCULATE INTERACTIONS *
2185              **************************/
2186
2187             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2188             {
2189
2190             r11              = _mm256_mul_pd(rsq11,rinv11);
2191
2192             /* EWALD ELECTROSTATICS */
2193
2194             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2195             ewrt             = _mm256_mul_pd(r11,ewtabscale);
2196             ewitab           = _mm256_cvttpd_epi32(ewrt);
2197             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2198             ewitab           = _mm_slli_epi32(ewitab,2);
2199             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2200             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2201             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2202             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2203             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2204             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2205             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2206             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
2207             felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
2208
2209             d                = _mm256_sub_pd(r11,rswitch);
2210             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2211             d2               = _mm256_mul_pd(d,d);
2212             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2213
2214             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2215
2216             /* Evaluate switch function */
2217             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2218             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
2219             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
2220
2221             fscal            = felec;
2222
2223             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2224
2225             /* Calculate temporary vectorial force */
2226             tx               = _mm256_mul_pd(fscal,dx11);
2227             ty               = _mm256_mul_pd(fscal,dy11);
2228             tz               = _mm256_mul_pd(fscal,dz11);
2229
2230             /* Update vectorial force */
2231             fix1             = _mm256_add_pd(fix1,tx);
2232             fiy1             = _mm256_add_pd(fiy1,ty);
2233             fiz1             = _mm256_add_pd(fiz1,tz);
2234
2235             fjx1             = _mm256_add_pd(fjx1,tx);
2236             fjy1             = _mm256_add_pd(fjy1,ty);
2237             fjz1             = _mm256_add_pd(fjz1,tz);
2238
2239             }
2240
2241             /**************************
2242              * CALCULATE INTERACTIONS *
2243              **************************/
2244
2245             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2246             {
2247
2248             r12              = _mm256_mul_pd(rsq12,rinv12);
2249
2250             /* EWALD ELECTROSTATICS */
2251
2252             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2253             ewrt             = _mm256_mul_pd(r12,ewtabscale);
2254             ewitab           = _mm256_cvttpd_epi32(ewrt);
2255             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2256             ewitab           = _mm_slli_epi32(ewitab,2);
2257             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2258             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2259             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2260             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2261             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2262             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2263             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2264             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
2265             felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
2266
2267             d                = _mm256_sub_pd(r12,rswitch);
2268             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2269             d2               = _mm256_mul_pd(d,d);
2270             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2271
2272             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2273
2274             /* Evaluate switch function */
2275             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2276             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
2277             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2278
2279             fscal            = felec;
2280
2281             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2282
2283             /* Calculate temporary vectorial force */
2284             tx               = _mm256_mul_pd(fscal,dx12);
2285             ty               = _mm256_mul_pd(fscal,dy12);
2286             tz               = _mm256_mul_pd(fscal,dz12);
2287
2288             /* Update vectorial force */
2289             fix1             = _mm256_add_pd(fix1,tx);
2290             fiy1             = _mm256_add_pd(fiy1,ty);
2291             fiz1             = _mm256_add_pd(fiz1,tz);
2292
2293             fjx2             = _mm256_add_pd(fjx2,tx);
2294             fjy2             = _mm256_add_pd(fjy2,ty);
2295             fjz2             = _mm256_add_pd(fjz2,tz);
2296
2297             }
2298
2299             /**************************
2300              * CALCULATE INTERACTIONS *
2301              **************************/
2302
2303             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2304             {
2305
2306             r20              = _mm256_mul_pd(rsq20,rinv20);
2307
2308             /* EWALD ELECTROSTATICS */
2309
2310             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2311             ewrt             = _mm256_mul_pd(r20,ewtabscale);
2312             ewitab           = _mm256_cvttpd_epi32(ewrt);
2313             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2314             ewitab           = _mm_slli_epi32(ewitab,2);
2315             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2316             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2317             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2318             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2319             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2320             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2321             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2322             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
2323             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
2324
2325             d                = _mm256_sub_pd(r20,rswitch);
2326             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2327             d2               = _mm256_mul_pd(d,d);
2328             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2329
2330             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2331
2332             /* Evaluate switch function */
2333             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2334             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
2335             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
2336
2337             fscal            = felec;
2338
2339             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2340
2341             /* Calculate temporary vectorial force */
2342             tx               = _mm256_mul_pd(fscal,dx20);
2343             ty               = _mm256_mul_pd(fscal,dy20);
2344             tz               = _mm256_mul_pd(fscal,dz20);
2345
2346             /* Update vectorial force */
2347             fix2             = _mm256_add_pd(fix2,tx);
2348             fiy2             = _mm256_add_pd(fiy2,ty);
2349             fiz2             = _mm256_add_pd(fiz2,tz);
2350
2351             fjx0             = _mm256_add_pd(fjx0,tx);
2352             fjy0             = _mm256_add_pd(fjy0,ty);
2353             fjz0             = _mm256_add_pd(fjz0,tz);
2354
2355             }
2356
2357             /**************************
2358              * CALCULATE INTERACTIONS *
2359              **************************/
2360
2361             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2362             {
2363
2364             r21              = _mm256_mul_pd(rsq21,rinv21);
2365
2366             /* EWALD ELECTROSTATICS */
2367
2368             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2369             ewrt             = _mm256_mul_pd(r21,ewtabscale);
2370             ewitab           = _mm256_cvttpd_epi32(ewrt);
2371             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2372             ewitab           = _mm_slli_epi32(ewitab,2);
2373             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2374             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2375             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2376             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2377             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2378             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2379             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2380             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
2381             felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
2382
2383             d                = _mm256_sub_pd(r21,rswitch);
2384             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2385             d2               = _mm256_mul_pd(d,d);
2386             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2387
2388             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2389
2390             /* Evaluate switch function */
2391             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2392             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
2393             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
2394
2395             fscal            = felec;
2396
2397             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2398
2399             /* Calculate temporary vectorial force */
2400             tx               = _mm256_mul_pd(fscal,dx21);
2401             ty               = _mm256_mul_pd(fscal,dy21);
2402             tz               = _mm256_mul_pd(fscal,dz21);
2403
2404             /* Update vectorial force */
2405             fix2             = _mm256_add_pd(fix2,tx);
2406             fiy2             = _mm256_add_pd(fiy2,ty);
2407             fiz2             = _mm256_add_pd(fiz2,tz);
2408
2409             fjx1             = _mm256_add_pd(fjx1,tx);
2410             fjy1             = _mm256_add_pd(fjy1,ty);
2411             fjz1             = _mm256_add_pd(fjz1,tz);
2412
2413             }
2414
2415             /**************************
2416              * CALCULATE INTERACTIONS *
2417              **************************/
2418
2419             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2420             {
2421
2422             r22              = _mm256_mul_pd(rsq22,rinv22);
2423
2424             /* EWALD ELECTROSTATICS */
2425
2426             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2427             ewrt             = _mm256_mul_pd(r22,ewtabscale);
2428             ewitab           = _mm256_cvttpd_epi32(ewrt);
2429             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2430             ewitab           = _mm_slli_epi32(ewitab,2);
2431             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2432             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2433             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2434             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2435             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2436             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2437             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2438             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
2439             felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
2440
2441             d                = _mm256_sub_pd(r22,rswitch);
2442             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2443             d2               = _mm256_mul_pd(d,d);
2444             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2445
2446             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2447
2448             /* Evaluate switch function */
2449             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2450             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
2451             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
2452
2453             fscal            = felec;
2454
2455             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2456
2457             /* Calculate temporary vectorial force */
2458             tx               = _mm256_mul_pd(fscal,dx22);
2459             ty               = _mm256_mul_pd(fscal,dy22);
2460             tz               = _mm256_mul_pd(fscal,dz22);
2461
2462             /* Update vectorial force */
2463             fix2             = _mm256_add_pd(fix2,tx);
2464             fiy2             = _mm256_add_pd(fiy2,ty);
2465             fiz2             = _mm256_add_pd(fiz2,tz);
2466
2467             fjx2             = _mm256_add_pd(fjx2,tx);
2468             fjy2             = _mm256_add_pd(fjy2,ty);
2469             fjz2             = _mm256_add_pd(fjz2,tz);
2470
2471             }
2472
2473             fjptrA             = f+j_coord_offsetA;
2474             fjptrB             = f+j_coord_offsetB;
2475             fjptrC             = f+j_coord_offsetC;
2476             fjptrD             = f+j_coord_offsetD;
2477
2478             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
2479                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2480
2481             /* Inner loop uses 573 flops */
2482         }
2483
2484         if(jidx<j_index_end)
2485         {
2486
2487             /* Get j neighbor index, and coordinate index */
2488             jnrlistA         = jjnr[jidx];
2489             jnrlistB         = jjnr[jidx+1];
2490             jnrlistC         = jjnr[jidx+2];
2491             jnrlistD         = jjnr[jidx+3];
2492             /* Sign of each element will be negative for non-real atoms.
2493              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2494              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
2495              */
2496             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
2497
2498             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
2499             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
2500             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
2501
2502             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
2503             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
2504             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
2505             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
2506             j_coord_offsetA  = DIM*jnrA;
2507             j_coord_offsetB  = DIM*jnrB;
2508             j_coord_offsetC  = DIM*jnrC;
2509             j_coord_offsetD  = DIM*jnrD;
2510
2511             /* load j atom coordinates */
2512             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
2513                                                  x+j_coord_offsetC,x+j_coord_offsetD,
2514                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
2515
2516             /* Calculate displacement vector */
2517             dx00             = _mm256_sub_pd(ix0,jx0);
2518             dy00             = _mm256_sub_pd(iy0,jy0);
2519             dz00             = _mm256_sub_pd(iz0,jz0);
2520             dx01             = _mm256_sub_pd(ix0,jx1);
2521             dy01             = _mm256_sub_pd(iy0,jy1);
2522             dz01             = _mm256_sub_pd(iz0,jz1);
2523             dx02             = _mm256_sub_pd(ix0,jx2);
2524             dy02             = _mm256_sub_pd(iy0,jy2);
2525             dz02             = _mm256_sub_pd(iz0,jz2);
2526             dx10             = _mm256_sub_pd(ix1,jx0);
2527             dy10             = _mm256_sub_pd(iy1,jy0);
2528             dz10             = _mm256_sub_pd(iz1,jz0);
2529             dx11             = _mm256_sub_pd(ix1,jx1);
2530             dy11             = _mm256_sub_pd(iy1,jy1);
2531             dz11             = _mm256_sub_pd(iz1,jz1);
2532             dx12             = _mm256_sub_pd(ix1,jx2);
2533             dy12             = _mm256_sub_pd(iy1,jy2);
2534             dz12             = _mm256_sub_pd(iz1,jz2);
2535             dx20             = _mm256_sub_pd(ix2,jx0);
2536             dy20             = _mm256_sub_pd(iy2,jy0);
2537             dz20             = _mm256_sub_pd(iz2,jz0);
2538             dx21             = _mm256_sub_pd(ix2,jx1);
2539             dy21             = _mm256_sub_pd(iy2,jy1);
2540             dz21             = _mm256_sub_pd(iz2,jz1);
2541             dx22             = _mm256_sub_pd(ix2,jx2);
2542             dy22             = _mm256_sub_pd(iy2,jy2);
2543             dz22             = _mm256_sub_pd(iz2,jz2);
2544
2545             /* Calculate squared distance and things based on it */
2546             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
2547             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
2548             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
2549             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
2550             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
2551             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
2552             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
2553             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
2554             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
2555
2556             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
2557             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
2558             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
2559             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
2560             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
2561             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
2562             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
2563             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
2564             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
2565
2566             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
2567             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
2568             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
2569             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
2570             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
2571             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
2572             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
2573             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
2574             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
2575
2576             fjx0             = _mm256_setzero_pd();
2577             fjy0             = _mm256_setzero_pd();
2578             fjz0             = _mm256_setzero_pd();
2579             fjx1             = _mm256_setzero_pd();
2580             fjy1             = _mm256_setzero_pd();
2581             fjz1             = _mm256_setzero_pd();
2582             fjx2             = _mm256_setzero_pd();
2583             fjy2             = _mm256_setzero_pd();
2584             fjz2             = _mm256_setzero_pd();
2585
2586             /**************************
2587              * CALCULATE INTERACTIONS *
2588              **************************/
2589
2590             if (gmx_mm256_any_lt(rsq00,rcutoff2))
2591             {
2592
2593             r00              = _mm256_mul_pd(rsq00,rinv00);
2594             r00              = _mm256_andnot_pd(dummy_mask,r00);
2595
2596             /* EWALD ELECTROSTATICS */
2597
2598             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2599             ewrt             = _mm256_mul_pd(r00,ewtabscale);
2600             ewitab           = _mm256_cvttpd_epi32(ewrt);
2601             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2602             ewitab           = _mm_slli_epi32(ewitab,2);
2603             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2604             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2605             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2606             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2607             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2608             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2609             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2610             velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(rinv00,velec));
2611             felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
2612
2613             /* LENNARD-JONES DISPERSION/REPULSION */
2614
2615             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
2616             vvdw6            = _mm256_mul_pd(c6_00,rinvsix);
2617             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
2618             vvdw             = _mm256_sub_pd( _mm256_mul_pd(vvdw12,one_twelfth) , _mm256_mul_pd(vvdw6,one_sixth) );
2619             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,vvdw6),rinvsq00);
2620
2621             d                = _mm256_sub_pd(r00,rswitch);
2622             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2623             d2               = _mm256_mul_pd(d,d);
2624             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2625
2626             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2627
2628             /* Evaluate switch function */
2629             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2630             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(velec,dsw)) );
2631             fvdw             = _mm256_sub_pd( _mm256_mul_pd(fvdw,sw) , _mm256_mul_pd(rinv00,_mm256_mul_pd(vvdw,dsw)) );
2632             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
2633
2634             fscal            = _mm256_add_pd(felec,fvdw);
2635
2636             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2637
2638             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2639
2640             /* Calculate temporary vectorial force */
2641             tx               = _mm256_mul_pd(fscal,dx00);
2642             ty               = _mm256_mul_pd(fscal,dy00);
2643             tz               = _mm256_mul_pd(fscal,dz00);
2644
2645             /* Update vectorial force */
2646             fix0             = _mm256_add_pd(fix0,tx);
2647             fiy0             = _mm256_add_pd(fiy0,ty);
2648             fiz0             = _mm256_add_pd(fiz0,tz);
2649
2650             fjx0             = _mm256_add_pd(fjx0,tx);
2651             fjy0             = _mm256_add_pd(fjy0,ty);
2652             fjz0             = _mm256_add_pd(fjz0,tz);
2653
2654             }
2655
2656             /**************************
2657              * CALCULATE INTERACTIONS *
2658              **************************/
2659
2660             if (gmx_mm256_any_lt(rsq01,rcutoff2))
2661             {
2662
2663             r01              = _mm256_mul_pd(rsq01,rinv01);
2664             r01              = _mm256_andnot_pd(dummy_mask,r01);
2665
2666             /* EWALD ELECTROSTATICS */
2667
2668             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2669             ewrt             = _mm256_mul_pd(r01,ewtabscale);
2670             ewitab           = _mm256_cvttpd_epi32(ewrt);
2671             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2672             ewitab           = _mm_slli_epi32(ewitab,2);
2673             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2674             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2675             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2676             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2677             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2678             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2679             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2680             velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(rinv01,velec));
2681             felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
2682
2683             d                = _mm256_sub_pd(r01,rswitch);
2684             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2685             d2               = _mm256_mul_pd(d,d);
2686             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2687
2688             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2689
2690             /* Evaluate switch function */
2691             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2692             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv01,_mm256_mul_pd(velec,dsw)) );
2693             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
2694
2695             fscal            = felec;
2696
2697             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2698
2699             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2700
2701             /* Calculate temporary vectorial force */
2702             tx               = _mm256_mul_pd(fscal,dx01);
2703             ty               = _mm256_mul_pd(fscal,dy01);
2704             tz               = _mm256_mul_pd(fscal,dz01);
2705
2706             /* Update vectorial force */
2707             fix0             = _mm256_add_pd(fix0,tx);
2708             fiy0             = _mm256_add_pd(fiy0,ty);
2709             fiz0             = _mm256_add_pd(fiz0,tz);
2710
2711             fjx1             = _mm256_add_pd(fjx1,tx);
2712             fjy1             = _mm256_add_pd(fjy1,ty);
2713             fjz1             = _mm256_add_pd(fjz1,tz);
2714
2715             }
2716
2717             /**************************
2718              * CALCULATE INTERACTIONS *
2719              **************************/
2720
2721             if (gmx_mm256_any_lt(rsq02,rcutoff2))
2722             {
2723
2724             r02              = _mm256_mul_pd(rsq02,rinv02);
2725             r02              = _mm256_andnot_pd(dummy_mask,r02);
2726
2727             /* EWALD ELECTROSTATICS */
2728
2729             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2730             ewrt             = _mm256_mul_pd(r02,ewtabscale);
2731             ewitab           = _mm256_cvttpd_epi32(ewrt);
2732             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2733             ewitab           = _mm_slli_epi32(ewitab,2);
2734             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2735             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2736             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2737             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2738             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2739             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2740             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2741             velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(rinv02,velec));
2742             felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
2743
2744             d                = _mm256_sub_pd(r02,rswitch);
2745             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2746             d2               = _mm256_mul_pd(d,d);
2747             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2748
2749             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2750
2751             /* Evaluate switch function */
2752             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2753             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv02,_mm256_mul_pd(velec,dsw)) );
2754             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
2755
2756             fscal            = felec;
2757
2758             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2759
2760             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2761
2762             /* Calculate temporary vectorial force */
2763             tx               = _mm256_mul_pd(fscal,dx02);
2764             ty               = _mm256_mul_pd(fscal,dy02);
2765             tz               = _mm256_mul_pd(fscal,dz02);
2766
2767             /* Update vectorial force */
2768             fix0             = _mm256_add_pd(fix0,tx);
2769             fiy0             = _mm256_add_pd(fiy0,ty);
2770             fiz0             = _mm256_add_pd(fiz0,tz);
2771
2772             fjx2             = _mm256_add_pd(fjx2,tx);
2773             fjy2             = _mm256_add_pd(fjy2,ty);
2774             fjz2             = _mm256_add_pd(fjz2,tz);
2775
2776             }
2777
2778             /**************************
2779              * CALCULATE INTERACTIONS *
2780              **************************/
2781
2782             if (gmx_mm256_any_lt(rsq10,rcutoff2))
2783             {
2784
2785             r10              = _mm256_mul_pd(rsq10,rinv10);
2786             r10              = _mm256_andnot_pd(dummy_mask,r10);
2787
2788             /* EWALD ELECTROSTATICS */
2789
2790             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2791             ewrt             = _mm256_mul_pd(r10,ewtabscale);
2792             ewitab           = _mm256_cvttpd_epi32(ewrt);
2793             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2794             ewitab           = _mm_slli_epi32(ewitab,2);
2795             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2796             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2797             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2798             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2799             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2800             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2801             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2802             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(rinv10,velec));
2803             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
2804
2805             d                = _mm256_sub_pd(r10,rswitch);
2806             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2807             d2               = _mm256_mul_pd(d,d);
2808             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2809
2810             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2811
2812             /* Evaluate switch function */
2813             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2814             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv10,_mm256_mul_pd(velec,dsw)) );
2815             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
2816
2817             fscal            = felec;
2818
2819             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2820
2821             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2822
2823             /* Calculate temporary vectorial force */
2824             tx               = _mm256_mul_pd(fscal,dx10);
2825             ty               = _mm256_mul_pd(fscal,dy10);
2826             tz               = _mm256_mul_pd(fscal,dz10);
2827
2828             /* Update vectorial force */
2829             fix1             = _mm256_add_pd(fix1,tx);
2830             fiy1             = _mm256_add_pd(fiy1,ty);
2831             fiz1             = _mm256_add_pd(fiz1,tz);
2832
2833             fjx0             = _mm256_add_pd(fjx0,tx);
2834             fjy0             = _mm256_add_pd(fjy0,ty);
2835             fjz0             = _mm256_add_pd(fjz0,tz);
2836
2837             }
2838
2839             /**************************
2840              * CALCULATE INTERACTIONS *
2841              **************************/
2842
2843             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2844             {
2845
2846             r11              = _mm256_mul_pd(rsq11,rinv11);
2847             r11              = _mm256_andnot_pd(dummy_mask,r11);
2848
2849             /* EWALD ELECTROSTATICS */
2850
2851             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2852             ewrt             = _mm256_mul_pd(r11,ewtabscale);
2853             ewitab           = _mm256_cvttpd_epi32(ewrt);
2854             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2855             ewitab           = _mm_slli_epi32(ewitab,2);
2856             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2857             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2858             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2859             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2860             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2861             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2862             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2863             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(rinv11,velec));
2864             felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
2865
2866             d                = _mm256_sub_pd(r11,rswitch);
2867             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2868             d2               = _mm256_mul_pd(d,d);
2869             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2870
2871             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2872
2873             /* Evaluate switch function */
2874             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2875             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv11,_mm256_mul_pd(velec,dsw)) );
2876             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
2877
2878             fscal            = felec;
2879
2880             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2881
2882             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2883
2884             /* Calculate temporary vectorial force */
2885             tx               = _mm256_mul_pd(fscal,dx11);
2886             ty               = _mm256_mul_pd(fscal,dy11);
2887             tz               = _mm256_mul_pd(fscal,dz11);
2888
2889             /* Update vectorial force */
2890             fix1             = _mm256_add_pd(fix1,tx);
2891             fiy1             = _mm256_add_pd(fiy1,ty);
2892             fiz1             = _mm256_add_pd(fiz1,tz);
2893
2894             fjx1             = _mm256_add_pd(fjx1,tx);
2895             fjy1             = _mm256_add_pd(fjy1,ty);
2896             fjz1             = _mm256_add_pd(fjz1,tz);
2897
2898             }
2899
2900             /**************************
2901              * CALCULATE INTERACTIONS *
2902              **************************/
2903
2904             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2905             {
2906
2907             r12              = _mm256_mul_pd(rsq12,rinv12);
2908             r12              = _mm256_andnot_pd(dummy_mask,r12);
2909
2910             /* EWALD ELECTROSTATICS */
2911
2912             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2913             ewrt             = _mm256_mul_pd(r12,ewtabscale);
2914             ewitab           = _mm256_cvttpd_epi32(ewrt);
2915             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2916             ewitab           = _mm_slli_epi32(ewitab,2);
2917             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2918             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2919             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2920             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2921             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2922             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2923             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2924             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(rinv12,velec));
2925             felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
2926
2927             d                = _mm256_sub_pd(r12,rswitch);
2928             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2929             d2               = _mm256_mul_pd(d,d);
2930             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2931
2932             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2933
2934             /* Evaluate switch function */
2935             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2936             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv12,_mm256_mul_pd(velec,dsw)) );
2937             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2938
2939             fscal            = felec;
2940
2941             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2942
2943             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2944
2945             /* Calculate temporary vectorial force */
2946             tx               = _mm256_mul_pd(fscal,dx12);
2947             ty               = _mm256_mul_pd(fscal,dy12);
2948             tz               = _mm256_mul_pd(fscal,dz12);
2949
2950             /* Update vectorial force */
2951             fix1             = _mm256_add_pd(fix1,tx);
2952             fiy1             = _mm256_add_pd(fiy1,ty);
2953             fiz1             = _mm256_add_pd(fiz1,tz);
2954
2955             fjx2             = _mm256_add_pd(fjx2,tx);
2956             fjy2             = _mm256_add_pd(fjy2,ty);
2957             fjz2             = _mm256_add_pd(fjz2,tz);
2958
2959             }
2960
2961             /**************************
2962              * CALCULATE INTERACTIONS *
2963              **************************/
2964
2965             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2966             {
2967
2968             r20              = _mm256_mul_pd(rsq20,rinv20);
2969             r20              = _mm256_andnot_pd(dummy_mask,r20);
2970
2971             /* EWALD ELECTROSTATICS */
2972
2973             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2974             ewrt             = _mm256_mul_pd(r20,ewtabscale);
2975             ewitab           = _mm256_cvttpd_epi32(ewrt);
2976             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2977             ewitab           = _mm_slli_epi32(ewitab,2);
2978             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
2979             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
2980             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
2981             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
2982             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
2983             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
2984             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
2985             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(rinv20,velec));
2986             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
2987
2988             d                = _mm256_sub_pd(r20,rswitch);
2989             d                = _mm256_max_pd(d,_mm256_setzero_pd());
2990             d2               = _mm256_mul_pd(d,d);
2991             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
2992
2993             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
2994
2995             /* Evaluate switch function */
2996             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
2997             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv20,_mm256_mul_pd(velec,dsw)) );
2998             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
2999
3000             fscal            = felec;
3001
3002             fscal            = _mm256_and_pd(fscal,cutoff_mask);
3003
3004             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
3005
3006             /* Calculate temporary vectorial force */
3007             tx               = _mm256_mul_pd(fscal,dx20);
3008             ty               = _mm256_mul_pd(fscal,dy20);
3009             tz               = _mm256_mul_pd(fscal,dz20);
3010
3011             /* Update vectorial force */
3012             fix2             = _mm256_add_pd(fix2,tx);
3013             fiy2             = _mm256_add_pd(fiy2,ty);
3014             fiz2             = _mm256_add_pd(fiz2,tz);
3015
3016             fjx0             = _mm256_add_pd(fjx0,tx);
3017             fjy0             = _mm256_add_pd(fjy0,ty);
3018             fjz0             = _mm256_add_pd(fjz0,tz);
3019
3020             }
3021
3022             /**************************
3023              * CALCULATE INTERACTIONS *
3024              **************************/
3025
3026             if (gmx_mm256_any_lt(rsq21,rcutoff2))
3027             {
3028
3029             r21              = _mm256_mul_pd(rsq21,rinv21);
3030             r21              = _mm256_andnot_pd(dummy_mask,r21);
3031
3032             /* EWALD ELECTROSTATICS */
3033
3034             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
3035             ewrt             = _mm256_mul_pd(r21,ewtabscale);
3036             ewitab           = _mm256_cvttpd_epi32(ewrt);
3037             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
3038             ewitab           = _mm_slli_epi32(ewitab,2);
3039             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
3040             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
3041             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
3042             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
3043             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
3044             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
3045             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
3046             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(rinv21,velec));
3047             felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
3048
3049             d                = _mm256_sub_pd(r21,rswitch);
3050             d                = _mm256_max_pd(d,_mm256_setzero_pd());
3051             d2               = _mm256_mul_pd(d,d);
3052             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
3053
3054             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
3055
3056             /* Evaluate switch function */
3057             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3058             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv21,_mm256_mul_pd(velec,dsw)) );
3059             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
3060
3061             fscal            = felec;
3062
3063             fscal            = _mm256_and_pd(fscal,cutoff_mask);
3064
3065             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
3066
3067             /* Calculate temporary vectorial force */
3068             tx               = _mm256_mul_pd(fscal,dx21);
3069             ty               = _mm256_mul_pd(fscal,dy21);
3070             tz               = _mm256_mul_pd(fscal,dz21);
3071
3072             /* Update vectorial force */
3073             fix2             = _mm256_add_pd(fix2,tx);
3074             fiy2             = _mm256_add_pd(fiy2,ty);
3075             fiz2             = _mm256_add_pd(fiz2,tz);
3076
3077             fjx1             = _mm256_add_pd(fjx1,tx);
3078             fjy1             = _mm256_add_pd(fjy1,ty);
3079             fjz1             = _mm256_add_pd(fjz1,tz);
3080
3081             }
3082
3083             /**************************
3084              * CALCULATE INTERACTIONS *
3085              **************************/
3086
3087             if (gmx_mm256_any_lt(rsq22,rcutoff2))
3088             {
3089
3090             r22              = _mm256_mul_pd(rsq22,rinv22);
3091             r22              = _mm256_andnot_pd(dummy_mask,r22);
3092
3093             /* EWALD ELECTROSTATICS */
3094
3095             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
3096             ewrt             = _mm256_mul_pd(r22,ewtabscale);
3097             ewitab           = _mm256_cvttpd_epi32(ewrt);
3098             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
3099             ewitab           = _mm_slli_epi32(ewitab,2);
3100             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
3101             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
3102             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
3103             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
3104             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
3105             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
3106             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
3107             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(rinv22,velec));
3108             felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
3109
3110             d                = _mm256_sub_pd(r22,rswitch);
3111             d                = _mm256_max_pd(d,_mm256_setzero_pd());
3112             d2               = _mm256_mul_pd(d,d);
3113             sw               = _mm256_add_pd(one,_mm256_mul_pd(d2,_mm256_mul_pd(d,_mm256_add_pd(swV3,_mm256_mul_pd(d,_mm256_add_pd(swV4,_mm256_mul_pd(d,swV5)))))));
3114
3115             dsw              = _mm256_mul_pd(d2,_mm256_add_pd(swF2,_mm256_mul_pd(d,_mm256_add_pd(swF3,_mm256_mul_pd(d,swF4)))));
3116
3117             /* Evaluate switch function */
3118             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
3119             felec            = _mm256_sub_pd( _mm256_mul_pd(felec,sw) , _mm256_mul_pd(rinv22,_mm256_mul_pd(velec,dsw)) );
3120             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
3121
3122             fscal            = felec;
3123
3124             fscal            = _mm256_and_pd(fscal,cutoff_mask);
3125
3126             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
3127
3128             /* Calculate temporary vectorial force */
3129             tx               = _mm256_mul_pd(fscal,dx22);
3130             ty               = _mm256_mul_pd(fscal,dy22);
3131             tz               = _mm256_mul_pd(fscal,dz22);
3132
3133             /* Update vectorial force */
3134             fix2             = _mm256_add_pd(fix2,tx);
3135             fiy2             = _mm256_add_pd(fiy2,ty);
3136             fiz2             = _mm256_add_pd(fiz2,tz);
3137
3138             fjx2             = _mm256_add_pd(fjx2,tx);
3139             fjy2             = _mm256_add_pd(fjy2,ty);
3140             fjz2             = _mm256_add_pd(fjz2,tz);
3141
3142             }
3143
3144             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
3145             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
3146             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
3147             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
3148
3149             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
3150                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
3151
3152             /* Inner loop uses 582 flops */
3153         }
3154
3155         /* End of innermost loop */
3156
3157         gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
3158                                                  f+i_coord_offset,fshift+i_shift_offset);
3159
3160         /* Increment number of inner iterations */
3161         inneriter                  += j_index_end - j_index_start;
3162
3163         /* Outer loop uses 18 flops */
3164     }
3165
3166     /* Increment number of outer iterations */
3167     outeriter        += nri;
3168
3169     /* Update outer/inner flops */
3170
3171     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*582);
3172 }