Introduce gmxpre.h for truly global definitions
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_avx_256_double.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_double kernel generator.
37  */
38 #include "gmxpre.h"
39
40 #include "config.h"
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "gromacs/legacyheaders/types/simple.h"
46 #include "gromacs/math/vec.h"
47 #include "gromacs/legacyheaders/nrnb.h"
48
49 #include "gromacs/simd/math_x86_avx_256_double.h"
50 #include "kernelutil_x86_avx_256_double.h"
51
52 /*
53  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_VF_avx_256_double
54  * Electrostatics interaction: Ewald
55  * VdW interaction:            LJEwald
56  * Geometry:                   Water3-Water3
57  * Calculate force/pot:        PotentialAndForce
58  */
59 void
60 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_VF_avx_256_double
61                     (t_nblist                    * gmx_restrict       nlist,
62                      rvec                        * gmx_restrict          xx,
63                      rvec                        * gmx_restrict          ff,
64                      t_forcerec                  * gmx_restrict          fr,
65                      t_mdatoms                   * gmx_restrict     mdatoms,
66                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67                      t_nrnb                      * gmx_restrict        nrnb)
68 {
69     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
70      * just 0 for non-waters.
71      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
72      * jnr indices corresponding to data put in the four positions in the SIMD register.
73      */
74     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
75     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76     int              jnrA,jnrB,jnrC,jnrD;
77     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
78     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
79     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
80     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
81     real             rcutoff_scalar;
82     real             *shiftvec,*fshift,*x,*f;
83     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
84     real             scratch[4*DIM];
85     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86     real *           vdwioffsetptr0;
87     real *           vdwgridioffsetptr0;
88     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
89     real *           vdwioffsetptr1;
90     real *           vdwgridioffsetptr1;
91     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
92     real *           vdwioffsetptr2;
93     real *           vdwgridioffsetptr2;
94     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
95     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
96     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
97     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
98     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
99     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
100     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
101     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
102     __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
103     __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
104     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
105     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
106     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
107     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
108     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
109     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
110     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
111     real             *charge;
112     int              nvdwtype;
113     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
114     int              *vdwtype;
115     real             *vdwparam;
116     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
117     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
118     __m256d           c6grid_00;
119     __m256d           c6grid_01;
120     __m256d           c6grid_02;
121     __m256d           c6grid_10;
122     __m256d           c6grid_11;
123     __m256d           c6grid_12;
124     __m256d           c6grid_20;
125     __m256d           c6grid_21;
126     __m256d           c6grid_22;
127     real             *vdwgridparam;
128     __m256d           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
129     __m256d           one_half  = _mm256_set1_pd(0.5);
130     __m256d           minus_one = _mm256_set1_pd(-1.0);
131     __m128i          ewitab;
132     __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
133     __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
134     real             *ewtab;
135     __m256d          dummy_mask,cutoff_mask;
136     __m128           tmpmask0,tmpmask1;
137     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
138     __m256d          one     = _mm256_set1_pd(1.0);
139     __m256d          two     = _mm256_set1_pd(2.0);
140     x                = xx[0];
141     f                = ff[0];
142
143     nri              = nlist->nri;
144     iinr             = nlist->iinr;
145     jindex           = nlist->jindex;
146     jjnr             = nlist->jjnr;
147     shiftidx         = nlist->shift;
148     gid              = nlist->gid;
149     shiftvec         = fr->shift_vec[0];
150     fshift           = fr->fshift[0];
151     facel            = _mm256_set1_pd(fr->epsfac);
152     charge           = mdatoms->chargeA;
153     nvdwtype         = fr->ntype;
154     vdwparam         = fr->nbfp;
155     vdwtype          = mdatoms->typeA;
156     vdwgridparam     = fr->ljpme_c6grid;
157     sh_lj_ewald      = _mm256_set1_pd(fr->ic->sh_lj_ewald);
158     ewclj            = _mm256_set1_pd(fr->ewaldcoeff_lj);
159     ewclj2           = _mm256_mul_pd(minus_one,_mm256_mul_pd(ewclj,ewclj));
160
161     sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
162     beta             = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
163     beta2            = _mm256_mul_pd(beta,beta);
164     beta3            = _mm256_mul_pd(beta,beta2);
165
166     ewtab            = fr->ic->tabq_coul_FDV0;
167     ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
168     ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
169
170     /* Setup water-specific parameters */
171     inr              = nlist->iinr[0];
172     iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
173     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
174     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
175     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
176     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
177
178     jq0              = _mm256_set1_pd(charge[inr+0]);
179     jq1              = _mm256_set1_pd(charge[inr+1]);
180     jq2              = _mm256_set1_pd(charge[inr+2]);
181     vdwjidx0A        = 2*vdwtype[inr+0];
182     qq00             = _mm256_mul_pd(iq0,jq0);
183     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
184     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
185     c6grid_00        = _mm256_set1_pd(vdwgridioffsetptr0[vdwjidx0A]);
186     qq01             = _mm256_mul_pd(iq0,jq1);
187     qq02             = _mm256_mul_pd(iq0,jq2);
188     qq10             = _mm256_mul_pd(iq1,jq0);
189     qq11             = _mm256_mul_pd(iq1,jq1);
190     qq12             = _mm256_mul_pd(iq1,jq2);
191     qq20             = _mm256_mul_pd(iq2,jq0);
192     qq21             = _mm256_mul_pd(iq2,jq1);
193     qq22             = _mm256_mul_pd(iq2,jq2);
194
195     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
196     rcutoff_scalar   = fr->rcoulomb;
197     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
198     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
199
200     sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
201     rvdw             = _mm256_set1_pd(fr->rvdw);
202
203     /* Avoid stupid compiler warnings */
204     jnrA = jnrB = jnrC = jnrD = 0;
205     j_coord_offsetA = 0;
206     j_coord_offsetB = 0;
207     j_coord_offsetC = 0;
208     j_coord_offsetD = 0;
209
210     outeriter        = 0;
211     inneriter        = 0;
212
213     for(iidx=0;iidx<4*DIM;iidx++)
214     {
215         scratch[iidx] = 0.0;
216     }
217
218     /* Start outer loop over neighborlists */
219     for(iidx=0; iidx<nri; iidx++)
220     {
221         /* Load shift vector for this list */
222         i_shift_offset   = DIM*shiftidx[iidx];
223
224         /* Load limits for loop over neighbors */
225         j_index_start    = jindex[iidx];
226         j_index_end      = jindex[iidx+1];
227
228         /* Get outer coordinate index */
229         inr              = iinr[iidx];
230         i_coord_offset   = DIM*inr;
231
232         /* Load i particle coords and add shift vector */
233         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
234                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
235
236         fix0             = _mm256_setzero_pd();
237         fiy0             = _mm256_setzero_pd();
238         fiz0             = _mm256_setzero_pd();
239         fix1             = _mm256_setzero_pd();
240         fiy1             = _mm256_setzero_pd();
241         fiz1             = _mm256_setzero_pd();
242         fix2             = _mm256_setzero_pd();
243         fiy2             = _mm256_setzero_pd();
244         fiz2             = _mm256_setzero_pd();
245
246         /* Reset potential sums */
247         velecsum         = _mm256_setzero_pd();
248         vvdwsum          = _mm256_setzero_pd();
249
250         /* Start inner kernel loop */
251         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
252         {
253
254             /* Get j neighbor index, and coordinate index */
255             jnrA             = jjnr[jidx];
256             jnrB             = jjnr[jidx+1];
257             jnrC             = jjnr[jidx+2];
258             jnrD             = jjnr[jidx+3];
259             j_coord_offsetA  = DIM*jnrA;
260             j_coord_offsetB  = DIM*jnrB;
261             j_coord_offsetC  = DIM*jnrC;
262             j_coord_offsetD  = DIM*jnrD;
263
264             /* load j atom coordinates */
265             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
266                                                  x+j_coord_offsetC,x+j_coord_offsetD,
267                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
268
269             /* Calculate displacement vector */
270             dx00             = _mm256_sub_pd(ix0,jx0);
271             dy00             = _mm256_sub_pd(iy0,jy0);
272             dz00             = _mm256_sub_pd(iz0,jz0);
273             dx01             = _mm256_sub_pd(ix0,jx1);
274             dy01             = _mm256_sub_pd(iy0,jy1);
275             dz01             = _mm256_sub_pd(iz0,jz1);
276             dx02             = _mm256_sub_pd(ix0,jx2);
277             dy02             = _mm256_sub_pd(iy0,jy2);
278             dz02             = _mm256_sub_pd(iz0,jz2);
279             dx10             = _mm256_sub_pd(ix1,jx0);
280             dy10             = _mm256_sub_pd(iy1,jy0);
281             dz10             = _mm256_sub_pd(iz1,jz0);
282             dx11             = _mm256_sub_pd(ix1,jx1);
283             dy11             = _mm256_sub_pd(iy1,jy1);
284             dz11             = _mm256_sub_pd(iz1,jz1);
285             dx12             = _mm256_sub_pd(ix1,jx2);
286             dy12             = _mm256_sub_pd(iy1,jy2);
287             dz12             = _mm256_sub_pd(iz1,jz2);
288             dx20             = _mm256_sub_pd(ix2,jx0);
289             dy20             = _mm256_sub_pd(iy2,jy0);
290             dz20             = _mm256_sub_pd(iz2,jz0);
291             dx21             = _mm256_sub_pd(ix2,jx1);
292             dy21             = _mm256_sub_pd(iy2,jy1);
293             dz21             = _mm256_sub_pd(iz2,jz1);
294             dx22             = _mm256_sub_pd(ix2,jx2);
295             dy22             = _mm256_sub_pd(iy2,jy2);
296             dz22             = _mm256_sub_pd(iz2,jz2);
297
298             /* Calculate squared distance and things based on it */
299             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
300             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
301             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
302             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
303             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
304             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
305             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
306             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
307             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
308
309             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
310             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
311             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
312             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
313             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
314             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
315             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
316             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
317             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
318
319             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
320             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
321             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
322             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
323             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
324             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
325             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
326             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
327             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
328
329             fjx0             = _mm256_setzero_pd();
330             fjy0             = _mm256_setzero_pd();
331             fjz0             = _mm256_setzero_pd();
332             fjx1             = _mm256_setzero_pd();
333             fjy1             = _mm256_setzero_pd();
334             fjz1             = _mm256_setzero_pd();
335             fjx2             = _mm256_setzero_pd();
336             fjy2             = _mm256_setzero_pd();
337             fjz2             = _mm256_setzero_pd();
338
339             /**************************
340              * CALCULATE INTERACTIONS *
341              **************************/
342
343             if (gmx_mm256_any_lt(rsq00,rcutoff2))
344             {
345
346             r00              = _mm256_mul_pd(rsq00,rinv00);
347
348             /* EWALD ELECTROSTATICS */
349
350             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
351             ewrt             = _mm256_mul_pd(r00,ewtabscale);
352             ewitab           = _mm256_cvttpd_epi32(ewrt);
353             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
354             ewitab           = _mm_slli_epi32(ewitab,2);
355             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
356             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
357             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
358             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
359             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
360             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
361             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
362             velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_sub_pd(rinv00,sh_ewald),velec));
363             felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
364
365             /* Analytical LJ-PME */
366             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
367             ewcljrsq         = _mm256_mul_pd(ewclj2,rsq00);
368             ewclj6           = _mm256_mul_pd(ewclj2,_mm256_mul_pd(ewclj2,ewclj2));
369             exponent         = gmx_simd_exp_d(ewcljrsq);
370             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
371             poly             = _mm256_mul_pd(exponent,_mm256_add_pd(_mm256_sub_pd(one,ewcljrsq),_mm256_mul_pd(_mm256_mul_pd(ewcljrsq,ewcljrsq),one_half)));
372             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
373             vvdw6            = _mm256_mul_pd(_mm256_sub_pd(c6_00,_mm256_mul_pd(c6grid_00,_mm256_sub_pd(one,poly))),rinvsix);
374             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
375             vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
376                                           _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_add_pd(_mm256_mul_pd(c6_00,sh_vdw_invrcut6),_mm256_mul_pd(c6grid_00,sh_lj_ewald))),one_sixth));
377             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
378             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,_mm256_sub_pd(vvdw6,_mm256_mul_pd(_mm256_mul_pd(c6grid_00,one_sixth),_mm256_mul_pd(exponent,ewclj6)))),rinvsq00);
379
380             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
381
382             /* Update potential sum for this i atom from the interaction with this j atom. */
383             velec            = _mm256_and_pd(velec,cutoff_mask);
384             velecsum         = _mm256_add_pd(velecsum,velec);
385             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
386             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
387
388             fscal            = _mm256_add_pd(felec,fvdw);
389
390             fscal            = _mm256_and_pd(fscal,cutoff_mask);
391
392             /* Calculate temporary vectorial force */
393             tx               = _mm256_mul_pd(fscal,dx00);
394             ty               = _mm256_mul_pd(fscal,dy00);
395             tz               = _mm256_mul_pd(fscal,dz00);
396
397             /* Update vectorial force */
398             fix0             = _mm256_add_pd(fix0,tx);
399             fiy0             = _mm256_add_pd(fiy0,ty);
400             fiz0             = _mm256_add_pd(fiz0,tz);
401
402             fjx0             = _mm256_add_pd(fjx0,tx);
403             fjy0             = _mm256_add_pd(fjy0,ty);
404             fjz0             = _mm256_add_pd(fjz0,tz);
405
406             }
407
408             /**************************
409              * CALCULATE INTERACTIONS *
410              **************************/
411
412             if (gmx_mm256_any_lt(rsq01,rcutoff2))
413             {
414
415             r01              = _mm256_mul_pd(rsq01,rinv01);
416
417             /* EWALD ELECTROSTATICS */
418
419             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
420             ewrt             = _mm256_mul_pd(r01,ewtabscale);
421             ewitab           = _mm256_cvttpd_epi32(ewrt);
422             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
423             ewitab           = _mm_slli_epi32(ewitab,2);
424             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
425             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
426             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
427             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
428             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
429             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
430             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
431             velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_sub_pd(rinv01,sh_ewald),velec));
432             felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
433
434             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
435
436             /* Update potential sum for this i atom from the interaction with this j atom. */
437             velec            = _mm256_and_pd(velec,cutoff_mask);
438             velecsum         = _mm256_add_pd(velecsum,velec);
439
440             fscal            = felec;
441
442             fscal            = _mm256_and_pd(fscal,cutoff_mask);
443
444             /* Calculate temporary vectorial force */
445             tx               = _mm256_mul_pd(fscal,dx01);
446             ty               = _mm256_mul_pd(fscal,dy01);
447             tz               = _mm256_mul_pd(fscal,dz01);
448
449             /* Update vectorial force */
450             fix0             = _mm256_add_pd(fix0,tx);
451             fiy0             = _mm256_add_pd(fiy0,ty);
452             fiz0             = _mm256_add_pd(fiz0,tz);
453
454             fjx1             = _mm256_add_pd(fjx1,tx);
455             fjy1             = _mm256_add_pd(fjy1,ty);
456             fjz1             = _mm256_add_pd(fjz1,tz);
457
458             }
459
460             /**************************
461              * CALCULATE INTERACTIONS *
462              **************************/
463
464             if (gmx_mm256_any_lt(rsq02,rcutoff2))
465             {
466
467             r02              = _mm256_mul_pd(rsq02,rinv02);
468
469             /* EWALD ELECTROSTATICS */
470
471             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
472             ewrt             = _mm256_mul_pd(r02,ewtabscale);
473             ewitab           = _mm256_cvttpd_epi32(ewrt);
474             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
475             ewitab           = _mm_slli_epi32(ewitab,2);
476             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
477             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
478             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
479             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
480             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
481             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
482             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
483             velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_sub_pd(rinv02,sh_ewald),velec));
484             felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
485
486             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
487
488             /* Update potential sum for this i atom from the interaction with this j atom. */
489             velec            = _mm256_and_pd(velec,cutoff_mask);
490             velecsum         = _mm256_add_pd(velecsum,velec);
491
492             fscal            = felec;
493
494             fscal            = _mm256_and_pd(fscal,cutoff_mask);
495
496             /* Calculate temporary vectorial force */
497             tx               = _mm256_mul_pd(fscal,dx02);
498             ty               = _mm256_mul_pd(fscal,dy02);
499             tz               = _mm256_mul_pd(fscal,dz02);
500
501             /* Update vectorial force */
502             fix0             = _mm256_add_pd(fix0,tx);
503             fiy0             = _mm256_add_pd(fiy0,ty);
504             fiz0             = _mm256_add_pd(fiz0,tz);
505
506             fjx2             = _mm256_add_pd(fjx2,tx);
507             fjy2             = _mm256_add_pd(fjy2,ty);
508             fjz2             = _mm256_add_pd(fjz2,tz);
509
510             }
511
512             /**************************
513              * CALCULATE INTERACTIONS *
514              **************************/
515
516             if (gmx_mm256_any_lt(rsq10,rcutoff2))
517             {
518
519             r10              = _mm256_mul_pd(rsq10,rinv10);
520
521             /* EWALD ELECTROSTATICS */
522
523             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
524             ewrt             = _mm256_mul_pd(r10,ewtabscale);
525             ewitab           = _mm256_cvttpd_epi32(ewrt);
526             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
527             ewitab           = _mm_slli_epi32(ewitab,2);
528             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
529             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
530             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
531             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
532             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
533             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
534             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
535             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
536             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
537
538             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
539
540             /* Update potential sum for this i atom from the interaction with this j atom. */
541             velec            = _mm256_and_pd(velec,cutoff_mask);
542             velecsum         = _mm256_add_pd(velecsum,velec);
543
544             fscal            = felec;
545
546             fscal            = _mm256_and_pd(fscal,cutoff_mask);
547
548             /* Calculate temporary vectorial force */
549             tx               = _mm256_mul_pd(fscal,dx10);
550             ty               = _mm256_mul_pd(fscal,dy10);
551             tz               = _mm256_mul_pd(fscal,dz10);
552
553             /* Update vectorial force */
554             fix1             = _mm256_add_pd(fix1,tx);
555             fiy1             = _mm256_add_pd(fiy1,ty);
556             fiz1             = _mm256_add_pd(fiz1,tz);
557
558             fjx0             = _mm256_add_pd(fjx0,tx);
559             fjy0             = _mm256_add_pd(fjy0,ty);
560             fjz0             = _mm256_add_pd(fjz0,tz);
561
562             }
563
564             /**************************
565              * CALCULATE INTERACTIONS *
566              **************************/
567
568             if (gmx_mm256_any_lt(rsq11,rcutoff2))
569             {
570
571             r11              = _mm256_mul_pd(rsq11,rinv11);
572
573             /* EWALD ELECTROSTATICS */
574
575             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
576             ewrt             = _mm256_mul_pd(r11,ewtabscale);
577             ewitab           = _mm256_cvttpd_epi32(ewrt);
578             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
579             ewitab           = _mm_slli_epi32(ewitab,2);
580             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
581             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
582             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
583             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
584             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
585             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
586             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
587             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_sub_pd(rinv11,sh_ewald),velec));
588             felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
589
590             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
591
592             /* Update potential sum for this i atom from the interaction with this j atom. */
593             velec            = _mm256_and_pd(velec,cutoff_mask);
594             velecsum         = _mm256_add_pd(velecsum,velec);
595
596             fscal            = felec;
597
598             fscal            = _mm256_and_pd(fscal,cutoff_mask);
599
600             /* Calculate temporary vectorial force */
601             tx               = _mm256_mul_pd(fscal,dx11);
602             ty               = _mm256_mul_pd(fscal,dy11);
603             tz               = _mm256_mul_pd(fscal,dz11);
604
605             /* Update vectorial force */
606             fix1             = _mm256_add_pd(fix1,tx);
607             fiy1             = _mm256_add_pd(fiy1,ty);
608             fiz1             = _mm256_add_pd(fiz1,tz);
609
610             fjx1             = _mm256_add_pd(fjx1,tx);
611             fjy1             = _mm256_add_pd(fjy1,ty);
612             fjz1             = _mm256_add_pd(fjz1,tz);
613
614             }
615
616             /**************************
617              * CALCULATE INTERACTIONS *
618              **************************/
619
620             if (gmx_mm256_any_lt(rsq12,rcutoff2))
621             {
622
623             r12              = _mm256_mul_pd(rsq12,rinv12);
624
625             /* EWALD ELECTROSTATICS */
626
627             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
628             ewrt             = _mm256_mul_pd(r12,ewtabscale);
629             ewitab           = _mm256_cvttpd_epi32(ewrt);
630             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
631             ewitab           = _mm_slli_epi32(ewitab,2);
632             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
633             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
634             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
635             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
636             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
637             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
638             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
639             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_sub_pd(rinv12,sh_ewald),velec));
640             felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
641
642             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
643
644             /* Update potential sum for this i atom from the interaction with this j atom. */
645             velec            = _mm256_and_pd(velec,cutoff_mask);
646             velecsum         = _mm256_add_pd(velecsum,velec);
647
648             fscal            = felec;
649
650             fscal            = _mm256_and_pd(fscal,cutoff_mask);
651
652             /* Calculate temporary vectorial force */
653             tx               = _mm256_mul_pd(fscal,dx12);
654             ty               = _mm256_mul_pd(fscal,dy12);
655             tz               = _mm256_mul_pd(fscal,dz12);
656
657             /* Update vectorial force */
658             fix1             = _mm256_add_pd(fix1,tx);
659             fiy1             = _mm256_add_pd(fiy1,ty);
660             fiz1             = _mm256_add_pd(fiz1,tz);
661
662             fjx2             = _mm256_add_pd(fjx2,tx);
663             fjy2             = _mm256_add_pd(fjy2,ty);
664             fjz2             = _mm256_add_pd(fjz2,tz);
665
666             }
667
668             /**************************
669              * CALCULATE INTERACTIONS *
670              **************************/
671
672             if (gmx_mm256_any_lt(rsq20,rcutoff2))
673             {
674
675             r20              = _mm256_mul_pd(rsq20,rinv20);
676
677             /* EWALD ELECTROSTATICS */
678
679             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
680             ewrt             = _mm256_mul_pd(r20,ewtabscale);
681             ewitab           = _mm256_cvttpd_epi32(ewrt);
682             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
683             ewitab           = _mm_slli_epi32(ewitab,2);
684             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
685             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
686             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
687             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
688             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
689             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
690             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
691             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
692             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
693
694             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
695
696             /* Update potential sum for this i atom from the interaction with this j atom. */
697             velec            = _mm256_and_pd(velec,cutoff_mask);
698             velecsum         = _mm256_add_pd(velecsum,velec);
699
700             fscal            = felec;
701
702             fscal            = _mm256_and_pd(fscal,cutoff_mask);
703
704             /* Calculate temporary vectorial force */
705             tx               = _mm256_mul_pd(fscal,dx20);
706             ty               = _mm256_mul_pd(fscal,dy20);
707             tz               = _mm256_mul_pd(fscal,dz20);
708
709             /* Update vectorial force */
710             fix2             = _mm256_add_pd(fix2,tx);
711             fiy2             = _mm256_add_pd(fiy2,ty);
712             fiz2             = _mm256_add_pd(fiz2,tz);
713
714             fjx0             = _mm256_add_pd(fjx0,tx);
715             fjy0             = _mm256_add_pd(fjy0,ty);
716             fjz0             = _mm256_add_pd(fjz0,tz);
717
718             }
719
720             /**************************
721              * CALCULATE INTERACTIONS *
722              **************************/
723
724             if (gmx_mm256_any_lt(rsq21,rcutoff2))
725             {
726
727             r21              = _mm256_mul_pd(rsq21,rinv21);
728
729             /* EWALD ELECTROSTATICS */
730
731             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
732             ewrt             = _mm256_mul_pd(r21,ewtabscale);
733             ewitab           = _mm256_cvttpd_epi32(ewrt);
734             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
735             ewitab           = _mm_slli_epi32(ewitab,2);
736             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
737             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
738             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
739             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
740             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
741             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
742             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
743             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_sub_pd(rinv21,sh_ewald),velec));
744             felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
745
746             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
747
748             /* Update potential sum for this i atom from the interaction with this j atom. */
749             velec            = _mm256_and_pd(velec,cutoff_mask);
750             velecsum         = _mm256_add_pd(velecsum,velec);
751
752             fscal            = felec;
753
754             fscal            = _mm256_and_pd(fscal,cutoff_mask);
755
756             /* Calculate temporary vectorial force */
757             tx               = _mm256_mul_pd(fscal,dx21);
758             ty               = _mm256_mul_pd(fscal,dy21);
759             tz               = _mm256_mul_pd(fscal,dz21);
760
761             /* Update vectorial force */
762             fix2             = _mm256_add_pd(fix2,tx);
763             fiy2             = _mm256_add_pd(fiy2,ty);
764             fiz2             = _mm256_add_pd(fiz2,tz);
765
766             fjx1             = _mm256_add_pd(fjx1,tx);
767             fjy1             = _mm256_add_pd(fjy1,ty);
768             fjz1             = _mm256_add_pd(fjz1,tz);
769
770             }
771
772             /**************************
773              * CALCULATE INTERACTIONS *
774              **************************/
775
776             if (gmx_mm256_any_lt(rsq22,rcutoff2))
777             {
778
779             r22              = _mm256_mul_pd(rsq22,rinv22);
780
781             /* EWALD ELECTROSTATICS */
782
783             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
784             ewrt             = _mm256_mul_pd(r22,ewtabscale);
785             ewitab           = _mm256_cvttpd_epi32(ewrt);
786             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
787             ewitab           = _mm_slli_epi32(ewitab,2);
788             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
789             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
790             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
791             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
792             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
793             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
794             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
795             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_sub_pd(rinv22,sh_ewald),velec));
796             felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
797
798             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
799
800             /* Update potential sum for this i atom from the interaction with this j atom. */
801             velec            = _mm256_and_pd(velec,cutoff_mask);
802             velecsum         = _mm256_add_pd(velecsum,velec);
803
804             fscal            = felec;
805
806             fscal            = _mm256_and_pd(fscal,cutoff_mask);
807
808             /* Calculate temporary vectorial force */
809             tx               = _mm256_mul_pd(fscal,dx22);
810             ty               = _mm256_mul_pd(fscal,dy22);
811             tz               = _mm256_mul_pd(fscal,dz22);
812
813             /* Update vectorial force */
814             fix2             = _mm256_add_pd(fix2,tx);
815             fiy2             = _mm256_add_pd(fiy2,ty);
816             fiz2             = _mm256_add_pd(fiz2,tz);
817
818             fjx2             = _mm256_add_pd(fjx2,tx);
819             fjy2             = _mm256_add_pd(fjy2,ty);
820             fjz2             = _mm256_add_pd(fjz2,tz);
821
822             }
823
824             fjptrA             = f+j_coord_offsetA;
825             fjptrB             = f+j_coord_offsetB;
826             fjptrC             = f+j_coord_offsetC;
827             fjptrD             = f+j_coord_offsetD;
828
829             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
830                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
831
832             /* Inner loop uses 450 flops */
833         }
834
835         if(jidx<j_index_end)
836         {
837
838             /* Get j neighbor index, and coordinate index */
839             jnrlistA         = jjnr[jidx];
840             jnrlistB         = jjnr[jidx+1];
841             jnrlistC         = jjnr[jidx+2];
842             jnrlistD         = jjnr[jidx+3];
843             /* Sign of each element will be negative for non-real atoms.
844              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
845              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
846              */
847             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
848
849             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
850             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
851             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
852
853             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
854             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
855             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
856             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
857             j_coord_offsetA  = DIM*jnrA;
858             j_coord_offsetB  = DIM*jnrB;
859             j_coord_offsetC  = DIM*jnrC;
860             j_coord_offsetD  = DIM*jnrD;
861
862             /* load j atom coordinates */
863             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
864                                                  x+j_coord_offsetC,x+j_coord_offsetD,
865                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
866
867             /* Calculate displacement vector */
868             dx00             = _mm256_sub_pd(ix0,jx0);
869             dy00             = _mm256_sub_pd(iy0,jy0);
870             dz00             = _mm256_sub_pd(iz0,jz0);
871             dx01             = _mm256_sub_pd(ix0,jx1);
872             dy01             = _mm256_sub_pd(iy0,jy1);
873             dz01             = _mm256_sub_pd(iz0,jz1);
874             dx02             = _mm256_sub_pd(ix0,jx2);
875             dy02             = _mm256_sub_pd(iy0,jy2);
876             dz02             = _mm256_sub_pd(iz0,jz2);
877             dx10             = _mm256_sub_pd(ix1,jx0);
878             dy10             = _mm256_sub_pd(iy1,jy0);
879             dz10             = _mm256_sub_pd(iz1,jz0);
880             dx11             = _mm256_sub_pd(ix1,jx1);
881             dy11             = _mm256_sub_pd(iy1,jy1);
882             dz11             = _mm256_sub_pd(iz1,jz1);
883             dx12             = _mm256_sub_pd(ix1,jx2);
884             dy12             = _mm256_sub_pd(iy1,jy2);
885             dz12             = _mm256_sub_pd(iz1,jz2);
886             dx20             = _mm256_sub_pd(ix2,jx0);
887             dy20             = _mm256_sub_pd(iy2,jy0);
888             dz20             = _mm256_sub_pd(iz2,jz0);
889             dx21             = _mm256_sub_pd(ix2,jx1);
890             dy21             = _mm256_sub_pd(iy2,jy1);
891             dz21             = _mm256_sub_pd(iz2,jz1);
892             dx22             = _mm256_sub_pd(ix2,jx2);
893             dy22             = _mm256_sub_pd(iy2,jy2);
894             dz22             = _mm256_sub_pd(iz2,jz2);
895
896             /* Calculate squared distance and things based on it */
897             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
898             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
899             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
900             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
901             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
902             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
903             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
904             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
905             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
906
907             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
908             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
909             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
910             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
911             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
912             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
913             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
914             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
915             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
916
917             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
918             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
919             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
920             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
921             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
922             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
923             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
924             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
925             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
926
927             fjx0             = _mm256_setzero_pd();
928             fjy0             = _mm256_setzero_pd();
929             fjz0             = _mm256_setzero_pd();
930             fjx1             = _mm256_setzero_pd();
931             fjy1             = _mm256_setzero_pd();
932             fjz1             = _mm256_setzero_pd();
933             fjx2             = _mm256_setzero_pd();
934             fjy2             = _mm256_setzero_pd();
935             fjz2             = _mm256_setzero_pd();
936
937             /**************************
938              * CALCULATE INTERACTIONS *
939              **************************/
940
941             if (gmx_mm256_any_lt(rsq00,rcutoff2))
942             {
943
944             r00              = _mm256_mul_pd(rsq00,rinv00);
945             r00              = _mm256_andnot_pd(dummy_mask,r00);
946
947             /* EWALD ELECTROSTATICS */
948
949             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
950             ewrt             = _mm256_mul_pd(r00,ewtabscale);
951             ewitab           = _mm256_cvttpd_epi32(ewrt);
952             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
953             ewitab           = _mm_slli_epi32(ewitab,2);
954             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
955             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
956             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
957             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
958             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
959             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
960             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
961             velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_sub_pd(rinv00,sh_ewald),velec));
962             felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
963
964             /* Analytical LJ-PME */
965             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
966             ewcljrsq         = _mm256_mul_pd(ewclj2,rsq00);
967             ewclj6           = _mm256_mul_pd(ewclj2,_mm256_mul_pd(ewclj2,ewclj2));
968             exponent         = gmx_simd_exp_d(ewcljrsq);
969             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
970             poly             = _mm256_mul_pd(exponent,_mm256_add_pd(_mm256_sub_pd(one,ewcljrsq),_mm256_mul_pd(_mm256_mul_pd(ewcljrsq,ewcljrsq),one_half)));
971             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
972             vvdw6            = _mm256_mul_pd(_mm256_sub_pd(c6_00,_mm256_mul_pd(c6grid_00,_mm256_sub_pd(one,poly))),rinvsix);
973             vvdw12           = _mm256_mul_pd(c12_00,_mm256_mul_pd(rinvsix,rinvsix));
974             vvdw             = _mm256_sub_pd(_mm256_mul_pd( _mm256_sub_pd(vvdw12 , _mm256_mul_pd(c12_00,_mm256_mul_pd(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
975                                           _mm256_mul_pd( _mm256_sub_pd(vvdw6,_mm256_add_pd(_mm256_mul_pd(c6_00,sh_vdw_invrcut6),_mm256_mul_pd(c6grid_00,sh_lj_ewald))),one_sixth));
976             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
977             fvdw             = _mm256_mul_pd(_mm256_sub_pd(vvdw12,_mm256_sub_pd(vvdw6,_mm256_mul_pd(_mm256_mul_pd(c6grid_00,one_sixth),_mm256_mul_pd(exponent,ewclj6)))),rinvsq00);
978
979             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
980
981             /* Update potential sum for this i atom from the interaction with this j atom. */
982             velec            = _mm256_and_pd(velec,cutoff_mask);
983             velec            = _mm256_andnot_pd(dummy_mask,velec);
984             velecsum         = _mm256_add_pd(velecsum,velec);
985             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
986             vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
987             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
988
989             fscal            = _mm256_add_pd(felec,fvdw);
990
991             fscal            = _mm256_and_pd(fscal,cutoff_mask);
992
993             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
994
995             /* Calculate temporary vectorial force */
996             tx               = _mm256_mul_pd(fscal,dx00);
997             ty               = _mm256_mul_pd(fscal,dy00);
998             tz               = _mm256_mul_pd(fscal,dz00);
999
1000             /* Update vectorial force */
1001             fix0             = _mm256_add_pd(fix0,tx);
1002             fiy0             = _mm256_add_pd(fiy0,ty);
1003             fiz0             = _mm256_add_pd(fiz0,tz);
1004
1005             fjx0             = _mm256_add_pd(fjx0,tx);
1006             fjy0             = _mm256_add_pd(fjy0,ty);
1007             fjz0             = _mm256_add_pd(fjz0,tz);
1008
1009             }
1010
1011             /**************************
1012              * CALCULATE INTERACTIONS *
1013              **************************/
1014
1015             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1016             {
1017
1018             r01              = _mm256_mul_pd(rsq01,rinv01);
1019             r01              = _mm256_andnot_pd(dummy_mask,r01);
1020
1021             /* EWALD ELECTROSTATICS */
1022
1023             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1024             ewrt             = _mm256_mul_pd(r01,ewtabscale);
1025             ewitab           = _mm256_cvttpd_epi32(ewrt);
1026             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1027             ewitab           = _mm_slli_epi32(ewitab,2);
1028             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1029             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1030             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1031             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1032             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1033             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1034             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1035             velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_sub_pd(rinv01,sh_ewald),velec));
1036             felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
1037
1038             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
1039
1040             /* Update potential sum for this i atom from the interaction with this j atom. */
1041             velec            = _mm256_and_pd(velec,cutoff_mask);
1042             velec            = _mm256_andnot_pd(dummy_mask,velec);
1043             velecsum         = _mm256_add_pd(velecsum,velec);
1044
1045             fscal            = felec;
1046
1047             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1048
1049             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1050
1051             /* Calculate temporary vectorial force */
1052             tx               = _mm256_mul_pd(fscal,dx01);
1053             ty               = _mm256_mul_pd(fscal,dy01);
1054             tz               = _mm256_mul_pd(fscal,dz01);
1055
1056             /* Update vectorial force */
1057             fix0             = _mm256_add_pd(fix0,tx);
1058             fiy0             = _mm256_add_pd(fiy0,ty);
1059             fiz0             = _mm256_add_pd(fiz0,tz);
1060
1061             fjx1             = _mm256_add_pd(fjx1,tx);
1062             fjy1             = _mm256_add_pd(fjy1,ty);
1063             fjz1             = _mm256_add_pd(fjz1,tz);
1064
1065             }
1066
1067             /**************************
1068              * CALCULATE INTERACTIONS *
1069              **************************/
1070
1071             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1072             {
1073
1074             r02              = _mm256_mul_pd(rsq02,rinv02);
1075             r02              = _mm256_andnot_pd(dummy_mask,r02);
1076
1077             /* EWALD ELECTROSTATICS */
1078
1079             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1080             ewrt             = _mm256_mul_pd(r02,ewtabscale);
1081             ewitab           = _mm256_cvttpd_epi32(ewrt);
1082             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1083             ewitab           = _mm_slli_epi32(ewitab,2);
1084             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1085             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1086             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1087             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1088             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1089             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1090             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1091             velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_sub_pd(rinv02,sh_ewald),velec));
1092             felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
1093
1094             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
1095
1096             /* Update potential sum for this i atom from the interaction with this j atom. */
1097             velec            = _mm256_and_pd(velec,cutoff_mask);
1098             velec            = _mm256_andnot_pd(dummy_mask,velec);
1099             velecsum         = _mm256_add_pd(velecsum,velec);
1100
1101             fscal            = felec;
1102
1103             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1104
1105             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1106
1107             /* Calculate temporary vectorial force */
1108             tx               = _mm256_mul_pd(fscal,dx02);
1109             ty               = _mm256_mul_pd(fscal,dy02);
1110             tz               = _mm256_mul_pd(fscal,dz02);
1111
1112             /* Update vectorial force */
1113             fix0             = _mm256_add_pd(fix0,tx);
1114             fiy0             = _mm256_add_pd(fiy0,ty);
1115             fiz0             = _mm256_add_pd(fiz0,tz);
1116
1117             fjx2             = _mm256_add_pd(fjx2,tx);
1118             fjy2             = _mm256_add_pd(fjy2,ty);
1119             fjz2             = _mm256_add_pd(fjz2,tz);
1120
1121             }
1122
1123             /**************************
1124              * CALCULATE INTERACTIONS *
1125              **************************/
1126
1127             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1128             {
1129
1130             r10              = _mm256_mul_pd(rsq10,rinv10);
1131             r10              = _mm256_andnot_pd(dummy_mask,r10);
1132
1133             /* EWALD ELECTROSTATICS */
1134
1135             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1136             ewrt             = _mm256_mul_pd(r10,ewtabscale);
1137             ewitab           = _mm256_cvttpd_epi32(ewrt);
1138             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1139             ewitab           = _mm_slli_epi32(ewitab,2);
1140             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1141             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1142             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1143             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1144             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1145             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1146             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1147             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_sub_pd(rinv10,sh_ewald),velec));
1148             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
1149
1150             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1151
1152             /* Update potential sum for this i atom from the interaction with this j atom. */
1153             velec            = _mm256_and_pd(velec,cutoff_mask);
1154             velec            = _mm256_andnot_pd(dummy_mask,velec);
1155             velecsum         = _mm256_add_pd(velecsum,velec);
1156
1157             fscal            = felec;
1158
1159             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1160
1161             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1162
1163             /* Calculate temporary vectorial force */
1164             tx               = _mm256_mul_pd(fscal,dx10);
1165             ty               = _mm256_mul_pd(fscal,dy10);
1166             tz               = _mm256_mul_pd(fscal,dz10);
1167
1168             /* Update vectorial force */
1169             fix1             = _mm256_add_pd(fix1,tx);
1170             fiy1             = _mm256_add_pd(fiy1,ty);
1171             fiz1             = _mm256_add_pd(fiz1,tz);
1172
1173             fjx0             = _mm256_add_pd(fjx0,tx);
1174             fjy0             = _mm256_add_pd(fjy0,ty);
1175             fjz0             = _mm256_add_pd(fjz0,tz);
1176
1177             }
1178
1179             /**************************
1180              * CALCULATE INTERACTIONS *
1181              **************************/
1182
1183             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1184             {
1185
1186             r11              = _mm256_mul_pd(rsq11,rinv11);
1187             r11              = _mm256_andnot_pd(dummy_mask,r11);
1188
1189             /* EWALD ELECTROSTATICS */
1190
1191             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1192             ewrt             = _mm256_mul_pd(r11,ewtabscale);
1193             ewitab           = _mm256_cvttpd_epi32(ewrt);
1194             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1195             ewitab           = _mm_slli_epi32(ewitab,2);
1196             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1197             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1198             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1199             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1200             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1201             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1202             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1203             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_sub_pd(rinv11,sh_ewald),velec));
1204             felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
1205
1206             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
1207
1208             /* Update potential sum for this i atom from the interaction with this j atom. */
1209             velec            = _mm256_and_pd(velec,cutoff_mask);
1210             velec            = _mm256_andnot_pd(dummy_mask,velec);
1211             velecsum         = _mm256_add_pd(velecsum,velec);
1212
1213             fscal            = felec;
1214
1215             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1216
1217             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1218
1219             /* Calculate temporary vectorial force */
1220             tx               = _mm256_mul_pd(fscal,dx11);
1221             ty               = _mm256_mul_pd(fscal,dy11);
1222             tz               = _mm256_mul_pd(fscal,dz11);
1223
1224             /* Update vectorial force */
1225             fix1             = _mm256_add_pd(fix1,tx);
1226             fiy1             = _mm256_add_pd(fiy1,ty);
1227             fiz1             = _mm256_add_pd(fiz1,tz);
1228
1229             fjx1             = _mm256_add_pd(fjx1,tx);
1230             fjy1             = _mm256_add_pd(fjy1,ty);
1231             fjz1             = _mm256_add_pd(fjz1,tz);
1232
1233             }
1234
1235             /**************************
1236              * CALCULATE INTERACTIONS *
1237              **************************/
1238
1239             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1240             {
1241
1242             r12              = _mm256_mul_pd(rsq12,rinv12);
1243             r12              = _mm256_andnot_pd(dummy_mask,r12);
1244
1245             /* EWALD ELECTROSTATICS */
1246
1247             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1248             ewrt             = _mm256_mul_pd(r12,ewtabscale);
1249             ewitab           = _mm256_cvttpd_epi32(ewrt);
1250             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1251             ewitab           = _mm_slli_epi32(ewitab,2);
1252             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1253             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1254             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1255             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1256             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1257             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1258             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1259             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_sub_pd(rinv12,sh_ewald),velec));
1260             felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
1261
1262             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
1263
1264             /* Update potential sum for this i atom from the interaction with this j atom. */
1265             velec            = _mm256_and_pd(velec,cutoff_mask);
1266             velec            = _mm256_andnot_pd(dummy_mask,velec);
1267             velecsum         = _mm256_add_pd(velecsum,velec);
1268
1269             fscal            = felec;
1270
1271             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1272
1273             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1274
1275             /* Calculate temporary vectorial force */
1276             tx               = _mm256_mul_pd(fscal,dx12);
1277             ty               = _mm256_mul_pd(fscal,dy12);
1278             tz               = _mm256_mul_pd(fscal,dz12);
1279
1280             /* Update vectorial force */
1281             fix1             = _mm256_add_pd(fix1,tx);
1282             fiy1             = _mm256_add_pd(fiy1,ty);
1283             fiz1             = _mm256_add_pd(fiz1,tz);
1284
1285             fjx2             = _mm256_add_pd(fjx2,tx);
1286             fjy2             = _mm256_add_pd(fjy2,ty);
1287             fjz2             = _mm256_add_pd(fjz2,tz);
1288
1289             }
1290
1291             /**************************
1292              * CALCULATE INTERACTIONS *
1293              **************************/
1294
1295             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1296             {
1297
1298             r20              = _mm256_mul_pd(rsq20,rinv20);
1299             r20              = _mm256_andnot_pd(dummy_mask,r20);
1300
1301             /* EWALD ELECTROSTATICS */
1302
1303             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1304             ewrt             = _mm256_mul_pd(r20,ewtabscale);
1305             ewitab           = _mm256_cvttpd_epi32(ewrt);
1306             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1307             ewitab           = _mm_slli_epi32(ewitab,2);
1308             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1309             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1310             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1311             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1312             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1313             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1314             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1315             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_sub_pd(rinv20,sh_ewald),velec));
1316             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
1317
1318             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1319
1320             /* Update potential sum for this i atom from the interaction with this j atom. */
1321             velec            = _mm256_and_pd(velec,cutoff_mask);
1322             velec            = _mm256_andnot_pd(dummy_mask,velec);
1323             velecsum         = _mm256_add_pd(velecsum,velec);
1324
1325             fscal            = felec;
1326
1327             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1328
1329             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1330
1331             /* Calculate temporary vectorial force */
1332             tx               = _mm256_mul_pd(fscal,dx20);
1333             ty               = _mm256_mul_pd(fscal,dy20);
1334             tz               = _mm256_mul_pd(fscal,dz20);
1335
1336             /* Update vectorial force */
1337             fix2             = _mm256_add_pd(fix2,tx);
1338             fiy2             = _mm256_add_pd(fiy2,ty);
1339             fiz2             = _mm256_add_pd(fiz2,tz);
1340
1341             fjx0             = _mm256_add_pd(fjx0,tx);
1342             fjy0             = _mm256_add_pd(fjy0,ty);
1343             fjz0             = _mm256_add_pd(fjz0,tz);
1344
1345             }
1346
1347             /**************************
1348              * CALCULATE INTERACTIONS *
1349              **************************/
1350
1351             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1352             {
1353
1354             r21              = _mm256_mul_pd(rsq21,rinv21);
1355             r21              = _mm256_andnot_pd(dummy_mask,r21);
1356
1357             /* EWALD ELECTROSTATICS */
1358
1359             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1360             ewrt             = _mm256_mul_pd(r21,ewtabscale);
1361             ewitab           = _mm256_cvttpd_epi32(ewrt);
1362             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1363             ewitab           = _mm_slli_epi32(ewitab,2);
1364             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1365             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1366             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1367             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1368             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1369             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1370             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1371             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_sub_pd(rinv21,sh_ewald),velec));
1372             felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
1373
1374             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
1375
1376             /* Update potential sum for this i atom from the interaction with this j atom. */
1377             velec            = _mm256_and_pd(velec,cutoff_mask);
1378             velec            = _mm256_andnot_pd(dummy_mask,velec);
1379             velecsum         = _mm256_add_pd(velecsum,velec);
1380
1381             fscal            = felec;
1382
1383             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1384
1385             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1386
1387             /* Calculate temporary vectorial force */
1388             tx               = _mm256_mul_pd(fscal,dx21);
1389             ty               = _mm256_mul_pd(fscal,dy21);
1390             tz               = _mm256_mul_pd(fscal,dz21);
1391
1392             /* Update vectorial force */
1393             fix2             = _mm256_add_pd(fix2,tx);
1394             fiy2             = _mm256_add_pd(fiy2,ty);
1395             fiz2             = _mm256_add_pd(fiz2,tz);
1396
1397             fjx1             = _mm256_add_pd(fjx1,tx);
1398             fjy1             = _mm256_add_pd(fjy1,ty);
1399             fjz1             = _mm256_add_pd(fjz1,tz);
1400
1401             }
1402
1403             /**************************
1404              * CALCULATE INTERACTIONS *
1405              **************************/
1406
1407             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1408             {
1409
1410             r22              = _mm256_mul_pd(rsq22,rinv22);
1411             r22              = _mm256_andnot_pd(dummy_mask,r22);
1412
1413             /* EWALD ELECTROSTATICS */
1414
1415             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1416             ewrt             = _mm256_mul_pd(r22,ewtabscale);
1417             ewitab           = _mm256_cvttpd_epi32(ewrt);
1418             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1419             ewitab           = _mm_slli_epi32(ewitab,2);
1420             ewtabF           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,0) );
1421             ewtabD           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,1) );
1422             ewtabV           = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,2) );
1423             ewtabFn          = _mm256_load_pd( ewtab + _mm_extract_epi32(ewitab,3) );
1424             GMX_MM256_FULLTRANSPOSE4_PD(ewtabF,ewtabD,ewtabV,ewtabFn);
1425             felec            = _mm256_add_pd(ewtabF,_mm256_mul_pd(eweps,ewtabD));
1426             velec            = _mm256_sub_pd(ewtabV,_mm256_mul_pd(_mm256_mul_pd(ewtabhalfspace,eweps),_mm256_add_pd(ewtabF,felec)));
1427             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_sub_pd(rinv22,sh_ewald),velec));
1428             felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
1429
1430             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
1431
1432             /* Update potential sum for this i atom from the interaction with this j atom. */
1433             velec            = _mm256_and_pd(velec,cutoff_mask);
1434             velec            = _mm256_andnot_pd(dummy_mask,velec);
1435             velecsum         = _mm256_add_pd(velecsum,velec);
1436
1437             fscal            = felec;
1438
1439             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1440
1441             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1442
1443             /* Calculate temporary vectorial force */
1444             tx               = _mm256_mul_pd(fscal,dx22);
1445             ty               = _mm256_mul_pd(fscal,dy22);
1446             tz               = _mm256_mul_pd(fscal,dz22);
1447
1448             /* Update vectorial force */
1449             fix2             = _mm256_add_pd(fix2,tx);
1450             fiy2             = _mm256_add_pd(fiy2,ty);
1451             fiz2             = _mm256_add_pd(fiz2,tz);
1452
1453             fjx2             = _mm256_add_pd(fjx2,tx);
1454             fjy2             = _mm256_add_pd(fjy2,ty);
1455             fjz2             = _mm256_add_pd(fjz2,tz);
1456
1457             }
1458
1459             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1460             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1461             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1462             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1463
1464             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1465                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1466
1467             /* Inner loop uses 459 flops */
1468         }
1469
1470         /* End of innermost loop */
1471
1472         gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1473                                                  f+i_coord_offset,fshift+i_shift_offset);
1474
1475         ggid                        = gid[iidx];
1476         /* Update potential energies */
1477         gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1478         gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
1479
1480         /* Increment number of inner iterations */
1481         inneriter                  += j_index_end - j_index_start;
1482
1483         /* Outer loop uses 20 flops */
1484     }
1485
1486     /* Increment number of outer iterations */
1487     outeriter        += nri;
1488
1489     /* Update outer/inner flops */
1490
1491     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*459);
1492 }
1493 /*
1494  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_F_avx_256_double
1495  * Electrostatics interaction: Ewald
1496  * VdW interaction:            LJEwald
1497  * Geometry:                   Water3-Water3
1498  * Calculate force/pot:        Force
1499  */
1500 void
1501 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3W3_F_avx_256_double
1502                     (t_nblist                    * gmx_restrict       nlist,
1503                      rvec                        * gmx_restrict          xx,
1504                      rvec                        * gmx_restrict          ff,
1505                      t_forcerec                  * gmx_restrict          fr,
1506                      t_mdatoms                   * gmx_restrict     mdatoms,
1507                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1508                      t_nrnb                      * gmx_restrict        nrnb)
1509 {
1510     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1511      * just 0 for non-waters.
1512      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1513      * jnr indices corresponding to data put in the four positions in the SIMD register.
1514      */
1515     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1516     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1517     int              jnrA,jnrB,jnrC,jnrD;
1518     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1519     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1520     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1521     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1522     real             rcutoff_scalar;
1523     real             *shiftvec,*fshift,*x,*f;
1524     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1525     real             scratch[4*DIM];
1526     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1527     real *           vdwioffsetptr0;
1528     real *           vdwgridioffsetptr0;
1529     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1530     real *           vdwioffsetptr1;
1531     real *           vdwgridioffsetptr1;
1532     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1533     real *           vdwioffsetptr2;
1534     real *           vdwgridioffsetptr2;
1535     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1536     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
1537     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1538     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1539     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1540     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1541     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1542     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1543     __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1544     __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1545     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1546     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1547     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1548     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1549     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1550     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1551     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
1552     real             *charge;
1553     int              nvdwtype;
1554     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1555     int              *vdwtype;
1556     real             *vdwparam;
1557     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
1558     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
1559     __m256d           c6grid_00;
1560     __m256d           c6grid_01;
1561     __m256d           c6grid_02;
1562     __m256d           c6grid_10;
1563     __m256d           c6grid_11;
1564     __m256d           c6grid_12;
1565     __m256d           c6grid_20;
1566     __m256d           c6grid_21;
1567     __m256d           c6grid_22;
1568     real             *vdwgridparam;
1569     __m256d           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
1570     __m256d           one_half  = _mm256_set1_pd(0.5);
1571     __m256d           minus_one = _mm256_set1_pd(-1.0);
1572     __m128i          ewitab;
1573     __m256d          ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1574     __m256d          beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1575     real             *ewtab;
1576     __m256d          dummy_mask,cutoff_mask;
1577     __m128           tmpmask0,tmpmask1;
1578     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1579     __m256d          one     = _mm256_set1_pd(1.0);
1580     __m256d          two     = _mm256_set1_pd(2.0);
1581     x                = xx[0];
1582     f                = ff[0];
1583
1584     nri              = nlist->nri;
1585     iinr             = nlist->iinr;
1586     jindex           = nlist->jindex;
1587     jjnr             = nlist->jjnr;
1588     shiftidx         = nlist->shift;
1589     gid              = nlist->gid;
1590     shiftvec         = fr->shift_vec[0];
1591     fshift           = fr->fshift[0];
1592     facel            = _mm256_set1_pd(fr->epsfac);
1593     charge           = mdatoms->chargeA;
1594     nvdwtype         = fr->ntype;
1595     vdwparam         = fr->nbfp;
1596     vdwtype          = mdatoms->typeA;
1597     vdwgridparam     = fr->ljpme_c6grid;
1598     sh_lj_ewald      = _mm256_set1_pd(fr->ic->sh_lj_ewald);
1599     ewclj            = _mm256_set1_pd(fr->ewaldcoeff_lj);
1600     ewclj2           = _mm256_mul_pd(minus_one,_mm256_mul_pd(ewclj,ewclj));
1601
1602     sh_ewald         = _mm256_set1_pd(fr->ic->sh_ewald);
1603     beta             = _mm256_set1_pd(fr->ic->ewaldcoeff_q);
1604     beta2            = _mm256_mul_pd(beta,beta);
1605     beta3            = _mm256_mul_pd(beta,beta2);
1606
1607     ewtab            = fr->ic->tabq_coul_F;
1608     ewtabscale       = _mm256_set1_pd(fr->ic->tabq_scale);
1609     ewtabhalfspace   = _mm256_set1_pd(0.5/fr->ic->tabq_scale);
1610
1611     /* Setup water-specific parameters */
1612     inr              = nlist->iinr[0];
1613     iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
1614     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1615     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1616     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1617     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
1618
1619     jq0              = _mm256_set1_pd(charge[inr+0]);
1620     jq1              = _mm256_set1_pd(charge[inr+1]);
1621     jq2              = _mm256_set1_pd(charge[inr+2]);
1622     vdwjidx0A        = 2*vdwtype[inr+0];
1623     qq00             = _mm256_mul_pd(iq0,jq0);
1624     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
1625     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
1626     c6grid_00        = _mm256_set1_pd(vdwgridioffsetptr0[vdwjidx0A]);
1627     qq01             = _mm256_mul_pd(iq0,jq1);
1628     qq02             = _mm256_mul_pd(iq0,jq2);
1629     qq10             = _mm256_mul_pd(iq1,jq0);
1630     qq11             = _mm256_mul_pd(iq1,jq1);
1631     qq12             = _mm256_mul_pd(iq1,jq2);
1632     qq20             = _mm256_mul_pd(iq2,jq0);
1633     qq21             = _mm256_mul_pd(iq2,jq1);
1634     qq22             = _mm256_mul_pd(iq2,jq2);
1635
1636     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1637     rcutoff_scalar   = fr->rcoulomb;
1638     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
1639     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
1640
1641     sh_vdw_invrcut6  = _mm256_set1_pd(fr->ic->sh_invrc6);
1642     rvdw             = _mm256_set1_pd(fr->rvdw);
1643
1644     /* Avoid stupid compiler warnings */
1645     jnrA = jnrB = jnrC = jnrD = 0;
1646     j_coord_offsetA = 0;
1647     j_coord_offsetB = 0;
1648     j_coord_offsetC = 0;
1649     j_coord_offsetD = 0;
1650
1651     outeriter        = 0;
1652     inneriter        = 0;
1653
1654     for(iidx=0;iidx<4*DIM;iidx++)
1655     {
1656         scratch[iidx] = 0.0;
1657     }
1658
1659     /* Start outer loop over neighborlists */
1660     for(iidx=0; iidx<nri; iidx++)
1661     {
1662         /* Load shift vector for this list */
1663         i_shift_offset   = DIM*shiftidx[iidx];
1664
1665         /* Load limits for loop over neighbors */
1666         j_index_start    = jindex[iidx];
1667         j_index_end      = jindex[iidx+1];
1668
1669         /* Get outer coordinate index */
1670         inr              = iinr[iidx];
1671         i_coord_offset   = DIM*inr;
1672
1673         /* Load i particle coords and add shift vector */
1674         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1675                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1676
1677         fix0             = _mm256_setzero_pd();
1678         fiy0             = _mm256_setzero_pd();
1679         fiz0             = _mm256_setzero_pd();
1680         fix1             = _mm256_setzero_pd();
1681         fiy1             = _mm256_setzero_pd();
1682         fiz1             = _mm256_setzero_pd();
1683         fix2             = _mm256_setzero_pd();
1684         fiy2             = _mm256_setzero_pd();
1685         fiz2             = _mm256_setzero_pd();
1686
1687         /* Start inner kernel loop */
1688         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1689         {
1690
1691             /* Get j neighbor index, and coordinate index */
1692             jnrA             = jjnr[jidx];
1693             jnrB             = jjnr[jidx+1];
1694             jnrC             = jjnr[jidx+2];
1695             jnrD             = jjnr[jidx+3];
1696             j_coord_offsetA  = DIM*jnrA;
1697             j_coord_offsetB  = DIM*jnrB;
1698             j_coord_offsetC  = DIM*jnrC;
1699             j_coord_offsetD  = DIM*jnrD;
1700
1701             /* load j atom coordinates */
1702             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1703                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1704                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1705
1706             /* Calculate displacement vector */
1707             dx00             = _mm256_sub_pd(ix0,jx0);
1708             dy00             = _mm256_sub_pd(iy0,jy0);
1709             dz00             = _mm256_sub_pd(iz0,jz0);
1710             dx01             = _mm256_sub_pd(ix0,jx1);
1711             dy01             = _mm256_sub_pd(iy0,jy1);
1712             dz01             = _mm256_sub_pd(iz0,jz1);
1713             dx02             = _mm256_sub_pd(ix0,jx2);
1714             dy02             = _mm256_sub_pd(iy0,jy2);
1715             dz02             = _mm256_sub_pd(iz0,jz2);
1716             dx10             = _mm256_sub_pd(ix1,jx0);
1717             dy10             = _mm256_sub_pd(iy1,jy0);
1718             dz10             = _mm256_sub_pd(iz1,jz0);
1719             dx11             = _mm256_sub_pd(ix1,jx1);
1720             dy11             = _mm256_sub_pd(iy1,jy1);
1721             dz11             = _mm256_sub_pd(iz1,jz1);
1722             dx12             = _mm256_sub_pd(ix1,jx2);
1723             dy12             = _mm256_sub_pd(iy1,jy2);
1724             dz12             = _mm256_sub_pd(iz1,jz2);
1725             dx20             = _mm256_sub_pd(ix2,jx0);
1726             dy20             = _mm256_sub_pd(iy2,jy0);
1727             dz20             = _mm256_sub_pd(iz2,jz0);
1728             dx21             = _mm256_sub_pd(ix2,jx1);
1729             dy21             = _mm256_sub_pd(iy2,jy1);
1730             dz21             = _mm256_sub_pd(iz2,jz1);
1731             dx22             = _mm256_sub_pd(ix2,jx2);
1732             dy22             = _mm256_sub_pd(iy2,jy2);
1733             dz22             = _mm256_sub_pd(iz2,jz2);
1734
1735             /* Calculate squared distance and things based on it */
1736             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1737             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1738             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1739             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1740             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1741             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1742             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1743             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1744             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1745
1746             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1747             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
1748             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
1749             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1750             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
1751             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
1752             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1753             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
1754             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
1755
1756             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1757             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
1758             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
1759             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1760             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1761             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1762             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1763             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1764             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1765
1766             fjx0             = _mm256_setzero_pd();
1767             fjy0             = _mm256_setzero_pd();
1768             fjz0             = _mm256_setzero_pd();
1769             fjx1             = _mm256_setzero_pd();
1770             fjy1             = _mm256_setzero_pd();
1771             fjz1             = _mm256_setzero_pd();
1772             fjx2             = _mm256_setzero_pd();
1773             fjy2             = _mm256_setzero_pd();
1774             fjz2             = _mm256_setzero_pd();
1775
1776             /**************************
1777              * CALCULATE INTERACTIONS *
1778              **************************/
1779
1780             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1781             {
1782
1783             r00              = _mm256_mul_pd(rsq00,rinv00);
1784
1785             /* EWALD ELECTROSTATICS */
1786
1787             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1788             ewrt             = _mm256_mul_pd(r00,ewtabscale);
1789             ewitab           = _mm256_cvttpd_epi32(ewrt);
1790             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1791             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1792                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1793                                             &ewtabF,&ewtabFn);
1794             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1795             felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
1796
1797             /* Analytical LJ-PME */
1798             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
1799             ewcljrsq         = _mm256_mul_pd(ewclj2,rsq00);
1800             ewclj6           = _mm256_mul_pd(ewclj2,_mm256_mul_pd(ewclj2,ewclj2));
1801             exponent         = gmx_simd_exp_d(ewcljrsq);
1802             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1803             poly             = _mm256_mul_pd(exponent,_mm256_add_pd(_mm256_sub_pd(one,ewcljrsq),_mm256_mul_pd(_mm256_mul_pd(ewcljrsq,ewcljrsq),one_half)));
1804             /* f6A = 6 * C6grid * (1 - poly) */
1805             f6A              = _mm256_mul_pd(c6grid_00,_mm256_sub_pd(one,poly));
1806             /* f6B = C6grid * exponent * beta^6 */
1807             f6B              = _mm256_mul_pd(_mm256_mul_pd(c6grid_00,one_sixth),_mm256_mul_pd(exponent,ewclj6));
1808             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1809             fvdw              = _mm256_mul_pd(_mm256_add_pd(_mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),_mm256_sub_pd(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1810
1811             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1812
1813             fscal            = _mm256_add_pd(felec,fvdw);
1814
1815             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1816
1817             /* Calculate temporary vectorial force */
1818             tx               = _mm256_mul_pd(fscal,dx00);
1819             ty               = _mm256_mul_pd(fscal,dy00);
1820             tz               = _mm256_mul_pd(fscal,dz00);
1821
1822             /* Update vectorial force */
1823             fix0             = _mm256_add_pd(fix0,tx);
1824             fiy0             = _mm256_add_pd(fiy0,ty);
1825             fiz0             = _mm256_add_pd(fiz0,tz);
1826
1827             fjx0             = _mm256_add_pd(fjx0,tx);
1828             fjy0             = _mm256_add_pd(fjy0,ty);
1829             fjz0             = _mm256_add_pd(fjz0,tz);
1830
1831             }
1832
1833             /**************************
1834              * CALCULATE INTERACTIONS *
1835              **************************/
1836
1837             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1838             {
1839
1840             r01              = _mm256_mul_pd(rsq01,rinv01);
1841
1842             /* EWALD ELECTROSTATICS */
1843
1844             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1845             ewrt             = _mm256_mul_pd(r01,ewtabscale);
1846             ewitab           = _mm256_cvttpd_epi32(ewrt);
1847             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1848             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1849                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1850                                             &ewtabF,&ewtabFn);
1851             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1852             felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
1853
1854             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
1855
1856             fscal            = felec;
1857
1858             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1859
1860             /* Calculate temporary vectorial force */
1861             tx               = _mm256_mul_pd(fscal,dx01);
1862             ty               = _mm256_mul_pd(fscal,dy01);
1863             tz               = _mm256_mul_pd(fscal,dz01);
1864
1865             /* Update vectorial force */
1866             fix0             = _mm256_add_pd(fix0,tx);
1867             fiy0             = _mm256_add_pd(fiy0,ty);
1868             fiz0             = _mm256_add_pd(fiz0,tz);
1869
1870             fjx1             = _mm256_add_pd(fjx1,tx);
1871             fjy1             = _mm256_add_pd(fjy1,ty);
1872             fjz1             = _mm256_add_pd(fjz1,tz);
1873
1874             }
1875
1876             /**************************
1877              * CALCULATE INTERACTIONS *
1878              **************************/
1879
1880             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1881             {
1882
1883             r02              = _mm256_mul_pd(rsq02,rinv02);
1884
1885             /* EWALD ELECTROSTATICS */
1886
1887             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1888             ewrt             = _mm256_mul_pd(r02,ewtabscale);
1889             ewitab           = _mm256_cvttpd_epi32(ewrt);
1890             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1891             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1892                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1893                                             &ewtabF,&ewtabFn);
1894             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1895             felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
1896
1897             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
1898
1899             fscal            = felec;
1900
1901             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1902
1903             /* Calculate temporary vectorial force */
1904             tx               = _mm256_mul_pd(fscal,dx02);
1905             ty               = _mm256_mul_pd(fscal,dy02);
1906             tz               = _mm256_mul_pd(fscal,dz02);
1907
1908             /* Update vectorial force */
1909             fix0             = _mm256_add_pd(fix0,tx);
1910             fiy0             = _mm256_add_pd(fiy0,ty);
1911             fiz0             = _mm256_add_pd(fiz0,tz);
1912
1913             fjx2             = _mm256_add_pd(fjx2,tx);
1914             fjy2             = _mm256_add_pd(fjy2,ty);
1915             fjz2             = _mm256_add_pd(fjz2,tz);
1916
1917             }
1918
1919             /**************************
1920              * CALCULATE INTERACTIONS *
1921              **************************/
1922
1923             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1924             {
1925
1926             r10              = _mm256_mul_pd(rsq10,rinv10);
1927
1928             /* EWALD ELECTROSTATICS */
1929
1930             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1931             ewrt             = _mm256_mul_pd(r10,ewtabscale);
1932             ewitab           = _mm256_cvttpd_epi32(ewrt);
1933             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1934             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1935                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1936                                             &ewtabF,&ewtabFn);
1937             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1938             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
1939
1940             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1941
1942             fscal            = felec;
1943
1944             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1945
1946             /* Calculate temporary vectorial force */
1947             tx               = _mm256_mul_pd(fscal,dx10);
1948             ty               = _mm256_mul_pd(fscal,dy10);
1949             tz               = _mm256_mul_pd(fscal,dz10);
1950
1951             /* Update vectorial force */
1952             fix1             = _mm256_add_pd(fix1,tx);
1953             fiy1             = _mm256_add_pd(fiy1,ty);
1954             fiz1             = _mm256_add_pd(fiz1,tz);
1955
1956             fjx0             = _mm256_add_pd(fjx0,tx);
1957             fjy0             = _mm256_add_pd(fjy0,ty);
1958             fjz0             = _mm256_add_pd(fjz0,tz);
1959
1960             }
1961
1962             /**************************
1963              * CALCULATE INTERACTIONS *
1964              **************************/
1965
1966             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1967             {
1968
1969             r11              = _mm256_mul_pd(rsq11,rinv11);
1970
1971             /* EWALD ELECTROSTATICS */
1972
1973             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
1974             ewrt             = _mm256_mul_pd(r11,ewtabscale);
1975             ewitab           = _mm256_cvttpd_epi32(ewrt);
1976             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
1977             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
1978                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
1979                                             &ewtabF,&ewtabFn);
1980             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
1981             felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
1982
1983             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
1984
1985             fscal            = felec;
1986
1987             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1988
1989             /* Calculate temporary vectorial force */
1990             tx               = _mm256_mul_pd(fscal,dx11);
1991             ty               = _mm256_mul_pd(fscal,dy11);
1992             tz               = _mm256_mul_pd(fscal,dz11);
1993
1994             /* Update vectorial force */
1995             fix1             = _mm256_add_pd(fix1,tx);
1996             fiy1             = _mm256_add_pd(fiy1,ty);
1997             fiz1             = _mm256_add_pd(fiz1,tz);
1998
1999             fjx1             = _mm256_add_pd(fjx1,tx);
2000             fjy1             = _mm256_add_pd(fjy1,ty);
2001             fjz1             = _mm256_add_pd(fjz1,tz);
2002
2003             }
2004
2005             /**************************
2006              * CALCULATE INTERACTIONS *
2007              **************************/
2008
2009             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2010             {
2011
2012             r12              = _mm256_mul_pd(rsq12,rinv12);
2013
2014             /* EWALD ELECTROSTATICS */
2015
2016             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2017             ewrt             = _mm256_mul_pd(r12,ewtabscale);
2018             ewitab           = _mm256_cvttpd_epi32(ewrt);
2019             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2020             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2021                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2022                                             &ewtabF,&ewtabFn);
2023             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2024             felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
2025
2026             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2027
2028             fscal            = felec;
2029
2030             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2031
2032             /* Calculate temporary vectorial force */
2033             tx               = _mm256_mul_pd(fscal,dx12);
2034             ty               = _mm256_mul_pd(fscal,dy12);
2035             tz               = _mm256_mul_pd(fscal,dz12);
2036
2037             /* Update vectorial force */
2038             fix1             = _mm256_add_pd(fix1,tx);
2039             fiy1             = _mm256_add_pd(fiy1,ty);
2040             fiz1             = _mm256_add_pd(fiz1,tz);
2041
2042             fjx2             = _mm256_add_pd(fjx2,tx);
2043             fjy2             = _mm256_add_pd(fjy2,ty);
2044             fjz2             = _mm256_add_pd(fjz2,tz);
2045
2046             }
2047
2048             /**************************
2049              * CALCULATE INTERACTIONS *
2050              **************************/
2051
2052             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2053             {
2054
2055             r20              = _mm256_mul_pd(rsq20,rinv20);
2056
2057             /* EWALD ELECTROSTATICS */
2058
2059             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2060             ewrt             = _mm256_mul_pd(r20,ewtabscale);
2061             ewitab           = _mm256_cvttpd_epi32(ewrt);
2062             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2063             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2064                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2065                                             &ewtabF,&ewtabFn);
2066             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2067             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
2068
2069             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
2070
2071             fscal            = felec;
2072
2073             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2074
2075             /* Calculate temporary vectorial force */
2076             tx               = _mm256_mul_pd(fscal,dx20);
2077             ty               = _mm256_mul_pd(fscal,dy20);
2078             tz               = _mm256_mul_pd(fscal,dz20);
2079
2080             /* Update vectorial force */
2081             fix2             = _mm256_add_pd(fix2,tx);
2082             fiy2             = _mm256_add_pd(fiy2,ty);
2083             fiz2             = _mm256_add_pd(fiz2,tz);
2084
2085             fjx0             = _mm256_add_pd(fjx0,tx);
2086             fjy0             = _mm256_add_pd(fjy0,ty);
2087             fjz0             = _mm256_add_pd(fjz0,tz);
2088
2089             }
2090
2091             /**************************
2092              * CALCULATE INTERACTIONS *
2093              **************************/
2094
2095             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2096             {
2097
2098             r21              = _mm256_mul_pd(rsq21,rinv21);
2099
2100             /* EWALD ELECTROSTATICS */
2101
2102             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2103             ewrt             = _mm256_mul_pd(r21,ewtabscale);
2104             ewitab           = _mm256_cvttpd_epi32(ewrt);
2105             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2106             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2107                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2108                                             &ewtabF,&ewtabFn);
2109             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2110             felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
2111
2112             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
2113
2114             fscal            = felec;
2115
2116             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2117
2118             /* Calculate temporary vectorial force */
2119             tx               = _mm256_mul_pd(fscal,dx21);
2120             ty               = _mm256_mul_pd(fscal,dy21);
2121             tz               = _mm256_mul_pd(fscal,dz21);
2122
2123             /* Update vectorial force */
2124             fix2             = _mm256_add_pd(fix2,tx);
2125             fiy2             = _mm256_add_pd(fiy2,ty);
2126             fiz2             = _mm256_add_pd(fiz2,tz);
2127
2128             fjx1             = _mm256_add_pd(fjx1,tx);
2129             fjy1             = _mm256_add_pd(fjy1,ty);
2130             fjz1             = _mm256_add_pd(fjz1,tz);
2131
2132             }
2133
2134             /**************************
2135              * CALCULATE INTERACTIONS *
2136              **************************/
2137
2138             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2139             {
2140
2141             r22              = _mm256_mul_pd(rsq22,rinv22);
2142
2143             /* EWALD ELECTROSTATICS */
2144
2145             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2146             ewrt             = _mm256_mul_pd(r22,ewtabscale);
2147             ewitab           = _mm256_cvttpd_epi32(ewrt);
2148             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2149             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2150                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2151                                             &ewtabF,&ewtabFn);
2152             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2153             felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
2154
2155             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
2156
2157             fscal            = felec;
2158
2159             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2160
2161             /* Calculate temporary vectorial force */
2162             tx               = _mm256_mul_pd(fscal,dx22);
2163             ty               = _mm256_mul_pd(fscal,dy22);
2164             tz               = _mm256_mul_pd(fscal,dz22);
2165
2166             /* Update vectorial force */
2167             fix2             = _mm256_add_pd(fix2,tx);
2168             fiy2             = _mm256_add_pd(fiy2,ty);
2169             fiz2             = _mm256_add_pd(fiz2,tz);
2170
2171             fjx2             = _mm256_add_pd(fjx2,tx);
2172             fjy2             = _mm256_add_pd(fjy2,ty);
2173             fjz2             = _mm256_add_pd(fjz2,tz);
2174
2175             }
2176
2177             fjptrA             = f+j_coord_offsetA;
2178             fjptrB             = f+j_coord_offsetB;
2179             fjptrC             = f+j_coord_offsetC;
2180             fjptrD             = f+j_coord_offsetD;
2181
2182             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
2183                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2184
2185             /* Inner loop uses 374 flops */
2186         }
2187
2188         if(jidx<j_index_end)
2189         {
2190
2191             /* Get j neighbor index, and coordinate index */
2192             jnrlistA         = jjnr[jidx];
2193             jnrlistB         = jjnr[jidx+1];
2194             jnrlistC         = jjnr[jidx+2];
2195             jnrlistD         = jjnr[jidx+3];
2196             /* Sign of each element will be negative for non-real atoms.
2197              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
2198              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
2199              */
2200             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
2201
2202             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
2203             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
2204             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
2205
2206             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
2207             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
2208             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
2209             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
2210             j_coord_offsetA  = DIM*jnrA;
2211             j_coord_offsetB  = DIM*jnrB;
2212             j_coord_offsetC  = DIM*jnrC;
2213             j_coord_offsetD  = DIM*jnrD;
2214
2215             /* load j atom coordinates */
2216             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
2217                                                  x+j_coord_offsetC,x+j_coord_offsetD,
2218                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
2219
2220             /* Calculate displacement vector */
2221             dx00             = _mm256_sub_pd(ix0,jx0);
2222             dy00             = _mm256_sub_pd(iy0,jy0);
2223             dz00             = _mm256_sub_pd(iz0,jz0);
2224             dx01             = _mm256_sub_pd(ix0,jx1);
2225             dy01             = _mm256_sub_pd(iy0,jy1);
2226             dz01             = _mm256_sub_pd(iz0,jz1);
2227             dx02             = _mm256_sub_pd(ix0,jx2);
2228             dy02             = _mm256_sub_pd(iy0,jy2);
2229             dz02             = _mm256_sub_pd(iz0,jz2);
2230             dx10             = _mm256_sub_pd(ix1,jx0);
2231             dy10             = _mm256_sub_pd(iy1,jy0);
2232             dz10             = _mm256_sub_pd(iz1,jz0);
2233             dx11             = _mm256_sub_pd(ix1,jx1);
2234             dy11             = _mm256_sub_pd(iy1,jy1);
2235             dz11             = _mm256_sub_pd(iz1,jz1);
2236             dx12             = _mm256_sub_pd(ix1,jx2);
2237             dy12             = _mm256_sub_pd(iy1,jy2);
2238             dz12             = _mm256_sub_pd(iz1,jz2);
2239             dx20             = _mm256_sub_pd(ix2,jx0);
2240             dy20             = _mm256_sub_pd(iy2,jy0);
2241             dz20             = _mm256_sub_pd(iz2,jz0);
2242             dx21             = _mm256_sub_pd(ix2,jx1);
2243             dy21             = _mm256_sub_pd(iy2,jy1);
2244             dz21             = _mm256_sub_pd(iz2,jz1);
2245             dx22             = _mm256_sub_pd(ix2,jx2);
2246             dy22             = _mm256_sub_pd(iy2,jy2);
2247             dz22             = _mm256_sub_pd(iz2,jz2);
2248
2249             /* Calculate squared distance and things based on it */
2250             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
2251             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
2252             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
2253             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
2254             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
2255             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
2256             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
2257             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
2258             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
2259
2260             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
2261             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
2262             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
2263             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
2264             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
2265             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
2266             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
2267             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
2268             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
2269
2270             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
2271             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
2272             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
2273             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
2274             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
2275             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
2276             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
2277             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
2278             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
2279
2280             fjx0             = _mm256_setzero_pd();
2281             fjy0             = _mm256_setzero_pd();
2282             fjz0             = _mm256_setzero_pd();
2283             fjx1             = _mm256_setzero_pd();
2284             fjy1             = _mm256_setzero_pd();
2285             fjz1             = _mm256_setzero_pd();
2286             fjx2             = _mm256_setzero_pd();
2287             fjy2             = _mm256_setzero_pd();
2288             fjz2             = _mm256_setzero_pd();
2289
2290             /**************************
2291              * CALCULATE INTERACTIONS *
2292              **************************/
2293
2294             if (gmx_mm256_any_lt(rsq00,rcutoff2))
2295             {
2296
2297             r00              = _mm256_mul_pd(rsq00,rinv00);
2298             r00              = _mm256_andnot_pd(dummy_mask,r00);
2299
2300             /* EWALD ELECTROSTATICS */
2301
2302             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2303             ewrt             = _mm256_mul_pd(r00,ewtabscale);
2304             ewitab           = _mm256_cvttpd_epi32(ewrt);
2305             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2306             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2307                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2308                                             &ewtabF,&ewtabFn);
2309             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2310             felec            = _mm256_mul_pd(_mm256_mul_pd(qq00,rinv00),_mm256_sub_pd(rinvsq00,felec));
2311
2312             /* Analytical LJ-PME */
2313             rinvsix          = _mm256_mul_pd(_mm256_mul_pd(rinvsq00,rinvsq00),rinvsq00);
2314             ewcljrsq         = _mm256_mul_pd(ewclj2,rsq00);
2315             ewclj6           = _mm256_mul_pd(ewclj2,_mm256_mul_pd(ewclj2,ewclj2));
2316             exponent         = gmx_simd_exp_d(ewcljrsq);
2317             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
2318             poly             = _mm256_mul_pd(exponent,_mm256_add_pd(_mm256_sub_pd(one,ewcljrsq),_mm256_mul_pd(_mm256_mul_pd(ewcljrsq,ewcljrsq),one_half)));
2319             /* f6A = 6 * C6grid * (1 - poly) */
2320             f6A              = _mm256_mul_pd(c6grid_00,_mm256_sub_pd(one,poly));
2321             /* f6B = C6grid * exponent * beta^6 */
2322             f6B              = _mm256_mul_pd(_mm256_mul_pd(c6grid_00,one_sixth),_mm256_mul_pd(exponent,ewclj6));
2323             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
2324             fvdw              = _mm256_mul_pd(_mm256_add_pd(_mm256_mul_pd(_mm256_sub_pd(_mm256_mul_pd(c12_00,rinvsix),_mm256_sub_pd(c6_00,f6A)),rinvsix),f6B),rinvsq00);
2325
2326             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
2327
2328             fscal            = _mm256_add_pd(felec,fvdw);
2329
2330             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2331
2332             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2333
2334             /* Calculate temporary vectorial force */
2335             tx               = _mm256_mul_pd(fscal,dx00);
2336             ty               = _mm256_mul_pd(fscal,dy00);
2337             tz               = _mm256_mul_pd(fscal,dz00);
2338
2339             /* Update vectorial force */
2340             fix0             = _mm256_add_pd(fix0,tx);
2341             fiy0             = _mm256_add_pd(fiy0,ty);
2342             fiz0             = _mm256_add_pd(fiz0,tz);
2343
2344             fjx0             = _mm256_add_pd(fjx0,tx);
2345             fjy0             = _mm256_add_pd(fjy0,ty);
2346             fjz0             = _mm256_add_pd(fjz0,tz);
2347
2348             }
2349
2350             /**************************
2351              * CALCULATE INTERACTIONS *
2352              **************************/
2353
2354             if (gmx_mm256_any_lt(rsq01,rcutoff2))
2355             {
2356
2357             r01              = _mm256_mul_pd(rsq01,rinv01);
2358             r01              = _mm256_andnot_pd(dummy_mask,r01);
2359
2360             /* EWALD ELECTROSTATICS */
2361
2362             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2363             ewrt             = _mm256_mul_pd(r01,ewtabscale);
2364             ewitab           = _mm256_cvttpd_epi32(ewrt);
2365             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2366             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2367                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2368                                             &ewtabF,&ewtabFn);
2369             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2370             felec            = _mm256_mul_pd(_mm256_mul_pd(qq01,rinv01),_mm256_sub_pd(rinvsq01,felec));
2371
2372             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
2373
2374             fscal            = felec;
2375
2376             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2377
2378             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2379
2380             /* Calculate temporary vectorial force */
2381             tx               = _mm256_mul_pd(fscal,dx01);
2382             ty               = _mm256_mul_pd(fscal,dy01);
2383             tz               = _mm256_mul_pd(fscal,dz01);
2384
2385             /* Update vectorial force */
2386             fix0             = _mm256_add_pd(fix0,tx);
2387             fiy0             = _mm256_add_pd(fiy0,ty);
2388             fiz0             = _mm256_add_pd(fiz0,tz);
2389
2390             fjx1             = _mm256_add_pd(fjx1,tx);
2391             fjy1             = _mm256_add_pd(fjy1,ty);
2392             fjz1             = _mm256_add_pd(fjz1,tz);
2393
2394             }
2395
2396             /**************************
2397              * CALCULATE INTERACTIONS *
2398              **************************/
2399
2400             if (gmx_mm256_any_lt(rsq02,rcutoff2))
2401             {
2402
2403             r02              = _mm256_mul_pd(rsq02,rinv02);
2404             r02              = _mm256_andnot_pd(dummy_mask,r02);
2405
2406             /* EWALD ELECTROSTATICS */
2407
2408             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2409             ewrt             = _mm256_mul_pd(r02,ewtabscale);
2410             ewitab           = _mm256_cvttpd_epi32(ewrt);
2411             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2412             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2413                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2414                                             &ewtabF,&ewtabFn);
2415             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2416             felec            = _mm256_mul_pd(_mm256_mul_pd(qq02,rinv02),_mm256_sub_pd(rinvsq02,felec));
2417
2418             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
2419
2420             fscal            = felec;
2421
2422             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2423
2424             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2425
2426             /* Calculate temporary vectorial force */
2427             tx               = _mm256_mul_pd(fscal,dx02);
2428             ty               = _mm256_mul_pd(fscal,dy02);
2429             tz               = _mm256_mul_pd(fscal,dz02);
2430
2431             /* Update vectorial force */
2432             fix0             = _mm256_add_pd(fix0,tx);
2433             fiy0             = _mm256_add_pd(fiy0,ty);
2434             fiz0             = _mm256_add_pd(fiz0,tz);
2435
2436             fjx2             = _mm256_add_pd(fjx2,tx);
2437             fjy2             = _mm256_add_pd(fjy2,ty);
2438             fjz2             = _mm256_add_pd(fjz2,tz);
2439
2440             }
2441
2442             /**************************
2443              * CALCULATE INTERACTIONS *
2444              **************************/
2445
2446             if (gmx_mm256_any_lt(rsq10,rcutoff2))
2447             {
2448
2449             r10              = _mm256_mul_pd(rsq10,rinv10);
2450             r10              = _mm256_andnot_pd(dummy_mask,r10);
2451
2452             /* EWALD ELECTROSTATICS */
2453
2454             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2455             ewrt             = _mm256_mul_pd(r10,ewtabscale);
2456             ewitab           = _mm256_cvttpd_epi32(ewrt);
2457             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2458             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2459                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2460                                             &ewtabF,&ewtabFn);
2461             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2462             felec            = _mm256_mul_pd(_mm256_mul_pd(qq10,rinv10),_mm256_sub_pd(rinvsq10,felec));
2463
2464             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
2465
2466             fscal            = felec;
2467
2468             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2469
2470             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2471
2472             /* Calculate temporary vectorial force */
2473             tx               = _mm256_mul_pd(fscal,dx10);
2474             ty               = _mm256_mul_pd(fscal,dy10);
2475             tz               = _mm256_mul_pd(fscal,dz10);
2476
2477             /* Update vectorial force */
2478             fix1             = _mm256_add_pd(fix1,tx);
2479             fiy1             = _mm256_add_pd(fiy1,ty);
2480             fiz1             = _mm256_add_pd(fiz1,tz);
2481
2482             fjx0             = _mm256_add_pd(fjx0,tx);
2483             fjy0             = _mm256_add_pd(fjy0,ty);
2484             fjz0             = _mm256_add_pd(fjz0,tz);
2485
2486             }
2487
2488             /**************************
2489              * CALCULATE INTERACTIONS *
2490              **************************/
2491
2492             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2493             {
2494
2495             r11              = _mm256_mul_pd(rsq11,rinv11);
2496             r11              = _mm256_andnot_pd(dummy_mask,r11);
2497
2498             /* EWALD ELECTROSTATICS */
2499
2500             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2501             ewrt             = _mm256_mul_pd(r11,ewtabscale);
2502             ewitab           = _mm256_cvttpd_epi32(ewrt);
2503             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2504             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2505                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2506                                             &ewtabF,&ewtabFn);
2507             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2508             felec            = _mm256_mul_pd(_mm256_mul_pd(qq11,rinv11),_mm256_sub_pd(rinvsq11,felec));
2509
2510             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
2511
2512             fscal            = felec;
2513
2514             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2515
2516             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2517
2518             /* Calculate temporary vectorial force */
2519             tx               = _mm256_mul_pd(fscal,dx11);
2520             ty               = _mm256_mul_pd(fscal,dy11);
2521             tz               = _mm256_mul_pd(fscal,dz11);
2522
2523             /* Update vectorial force */
2524             fix1             = _mm256_add_pd(fix1,tx);
2525             fiy1             = _mm256_add_pd(fiy1,ty);
2526             fiz1             = _mm256_add_pd(fiz1,tz);
2527
2528             fjx1             = _mm256_add_pd(fjx1,tx);
2529             fjy1             = _mm256_add_pd(fjy1,ty);
2530             fjz1             = _mm256_add_pd(fjz1,tz);
2531
2532             }
2533
2534             /**************************
2535              * CALCULATE INTERACTIONS *
2536              **************************/
2537
2538             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2539             {
2540
2541             r12              = _mm256_mul_pd(rsq12,rinv12);
2542             r12              = _mm256_andnot_pd(dummy_mask,r12);
2543
2544             /* EWALD ELECTROSTATICS */
2545
2546             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2547             ewrt             = _mm256_mul_pd(r12,ewtabscale);
2548             ewitab           = _mm256_cvttpd_epi32(ewrt);
2549             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2550             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2551                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2552                                             &ewtabF,&ewtabFn);
2553             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2554             felec            = _mm256_mul_pd(_mm256_mul_pd(qq12,rinv12),_mm256_sub_pd(rinvsq12,felec));
2555
2556             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2557
2558             fscal            = felec;
2559
2560             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2561
2562             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2563
2564             /* Calculate temporary vectorial force */
2565             tx               = _mm256_mul_pd(fscal,dx12);
2566             ty               = _mm256_mul_pd(fscal,dy12);
2567             tz               = _mm256_mul_pd(fscal,dz12);
2568
2569             /* Update vectorial force */
2570             fix1             = _mm256_add_pd(fix1,tx);
2571             fiy1             = _mm256_add_pd(fiy1,ty);
2572             fiz1             = _mm256_add_pd(fiz1,tz);
2573
2574             fjx2             = _mm256_add_pd(fjx2,tx);
2575             fjy2             = _mm256_add_pd(fjy2,ty);
2576             fjz2             = _mm256_add_pd(fjz2,tz);
2577
2578             }
2579
2580             /**************************
2581              * CALCULATE INTERACTIONS *
2582              **************************/
2583
2584             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2585             {
2586
2587             r20              = _mm256_mul_pd(rsq20,rinv20);
2588             r20              = _mm256_andnot_pd(dummy_mask,r20);
2589
2590             /* EWALD ELECTROSTATICS */
2591
2592             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2593             ewrt             = _mm256_mul_pd(r20,ewtabscale);
2594             ewitab           = _mm256_cvttpd_epi32(ewrt);
2595             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2596             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2597                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2598                                             &ewtabF,&ewtabFn);
2599             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2600             felec            = _mm256_mul_pd(_mm256_mul_pd(qq20,rinv20),_mm256_sub_pd(rinvsq20,felec));
2601
2602             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
2603
2604             fscal            = felec;
2605
2606             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2607
2608             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2609
2610             /* Calculate temporary vectorial force */
2611             tx               = _mm256_mul_pd(fscal,dx20);
2612             ty               = _mm256_mul_pd(fscal,dy20);
2613             tz               = _mm256_mul_pd(fscal,dz20);
2614
2615             /* Update vectorial force */
2616             fix2             = _mm256_add_pd(fix2,tx);
2617             fiy2             = _mm256_add_pd(fiy2,ty);
2618             fiz2             = _mm256_add_pd(fiz2,tz);
2619
2620             fjx0             = _mm256_add_pd(fjx0,tx);
2621             fjy0             = _mm256_add_pd(fjy0,ty);
2622             fjz0             = _mm256_add_pd(fjz0,tz);
2623
2624             }
2625
2626             /**************************
2627              * CALCULATE INTERACTIONS *
2628              **************************/
2629
2630             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2631             {
2632
2633             r21              = _mm256_mul_pd(rsq21,rinv21);
2634             r21              = _mm256_andnot_pd(dummy_mask,r21);
2635
2636             /* EWALD ELECTROSTATICS */
2637
2638             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2639             ewrt             = _mm256_mul_pd(r21,ewtabscale);
2640             ewitab           = _mm256_cvttpd_epi32(ewrt);
2641             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2642             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2643                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2644                                             &ewtabF,&ewtabFn);
2645             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2646             felec            = _mm256_mul_pd(_mm256_mul_pd(qq21,rinv21),_mm256_sub_pd(rinvsq21,felec));
2647
2648             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
2649
2650             fscal            = felec;
2651
2652             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2653
2654             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2655
2656             /* Calculate temporary vectorial force */
2657             tx               = _mm256_mul_pd(fscal,dx21);
2658             ty               = _mm256_mul_pd(fscal,dy21);
2659             tz               = _mm256_mul_pd(fscal,dz21);
2660
2661             /* Update vectorial force */
2662             fix2             = _mm256_add_pd(fix2,tx);
2663             fiy2             = _mm256_add_pd(fiy2,ty);
2664             fiz2             = _mm256_add_pd(fiz2,tz);
2665
2666             fjx1             = _mm256_add_pd(fjx1,tx);
2667             fjy1             = _mm256_add_pd(fjy1,ty);
2668             fjz1             = _mm256_add_pd(fjz1,tz);
2669
2670             }
2671
2672             /**************************
2673              * CALCULATE INTERACTIONS *
2674              **************************/
2675
2676             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2677             {
2678
2679             r22              = _mm256_mul_pd(rsq22,rinv22);
2680             r22              = _mm256_andnot_pd(dummy_mask,r22);
2681
2682             /* EWALD ELECTROSTATICS */
2683
2684             /* Calculate Ewald table index by multiplying r with scale and truncate to integer */
2685             ewrt             = _mm256_mul_pd(r22,ewtabscale);
2686             ewitab           = _mm256_cvttpd_epi32(ewrt);
2687             eweps            = _mm256_sub_pd(ewrt,_mm256_round_pd(ewrt, _MM_FROUND_FLOOR));
2688             gmx_mm256_load_4pair_swizzle_pd(ewtab + _mm_extract_epi32(ewitab,0),ewtab + _mm_extract_epi32(ewitab,1),
2689                                             ewtab + _mm_extract_epi32(ewitab,2),ewtab + _mm_extract_epi32(ewitab,3),
2690                                             &ewtabF,&ewtabFn);
2691             felec            = _mm256_add_pd(_mm256_mul_pd( _mm256_sub_pd(one,eweps),ewtabF),_mm256_mul_pd(eweps,ewtabFn));
2692             felec            = _mm256_mul_pd(_mm256_mul_pd(qq22,rinv22),_mm256_sub_pd(rinvsq22,felec));
2693
2694             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
2695
2696             fscal            = felec;
2697
2698             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2699
2700             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2701
2702             /* Calculate temporary vectorial force */
2703             tx               = _mm256_mul_pd(fscal,dx22);
2704             ty               = _mm256_mul_pd(fscal,dy22);
2705             tz               = _mm256_mul_pd(fscal,dz22);
2706
2707             /* Update vectorial force */
2708             fix2             = _mm256_add_pd(fix2,tx);
2709             fiy2             = _mm256_add_pd(fiy2,ty);
2710             fiz2             = _mm256_add_pd(fiz2,tz);
2711
2712             fjx2             = _mm256_add_pd(fjx2,tx);
2713             fjy2             = _mm256_add_pd(fjy2,ty);
2714             fjz2             = _mm256_add_pd(fjz2,tz);
2715
2716             }
2717
2718             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2719             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2720             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2721             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2722
2723             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
2724                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2725
2726             /* Inner loop uses 383 flops */
2727         }
2728
2729         /* End of innermost loop */
2730
2731         gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
2732                                                  f+i_coord_offset,fshift+i_shift_offset);
2733
2734         /* Increment number of inner iterations */
2735         inneriter                  += j_index_end - j_index_start;
2736
2737         /* Outer loop uses 18 flops */
2738     }
2739
2740     /* Increment number of outer iterations */
2741     outeriter        += nri;
2742
2743     /* Update outer/inner flops */
2744
2745     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*383);
2746 }