Added option to gmx nmeig to print ZPE.
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_avx_256_double.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014,2015,2017, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_double kernel generator.
37  */
38 #include "gmxpre.h"
39
40 #include "config.h"
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "gromacs/gmxlib/nrnb.h"
46
47 #include "kernelutil_x86_avx_256_double.h"
48
49 /*
50  * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_VF_avx_256_double
51  * Electrostatics interaction: ReactionField
52  * VdW interaction:            CubicSplineTable
53  * Geometry:                   Water3-Water3
54  * Calculate force/pot:        PotentialAndForce
55  */
56 void
57 nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_VF_avx_256_double
58                     (t_nblist                    * gmx_restrict       nlist,
59                      rvec                        * gmx_restrict          xx,
60                      rvec                        * gmx_restrict          ff,
61                      struct t_forcerec           * gmx_restrict          fr,
62                      t_mdatoms                   * gmx_restrict     mdatoms,
63                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
64                      t_nrnb                      * gmx_restrict        nrnb)
65 {
66     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
67      * just 0 for non-waters.
68      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
69      * jnr indices corresponding to data put in the four positions in the SIMD register.
70      */
71     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
72     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
73     int              jnrA,jnrB,jnrC,jnrD;
74     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
75     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
76     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
77     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
78     real             rcutoff_scalar;
79     real             *shiftvec,*fshift,*x,*f;
80     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
81     real             scratch[4*DIM];
82     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
83     real *           vdwioffsetptr0;
84     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
85     real *           vdwioffsetptr1;
86     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
87     real *           vdwioffsetptr2;
88     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
89     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
90     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
91     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
92     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
93     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
94     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
95     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
96     __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
97     __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
98     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
99     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
100     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
101     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
102     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
103     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
104     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
105     real             *charge;
106     int              nvdwtype;
107     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
108     int              *vdwtype;
109     real             *vdwparam;
110     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
111     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
112     __m128i          vfitab;
113     __m128i          ifour       = _mm_set1_epi32(4);
114     __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
115     real             *vftab;
116     __m256d          dummy_mask,cutoff_mask;
117     __m128           tmpmask0,tmpmask1;
118     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
119     __m256d          one     = _mm256_set1_pd(1.0);
120     __m256d          two     = _mm256_set1_pd(2.0);
121     x                = xx[0];
122     f                = ff[0];
123
124     nri              = nlist->nri;
125     iinr             = nlist->iinr;
126     jindex           = nlist->jindex;
127     jjnr             = nlist->jjnr;
128     shiftidx         = nlist->shift;
129     gid              = nlist->gid;
130     shiftvec         = fr->shift_vec[0];
131     fshift           = fr->fshift[0];
132     facel            = _mm256_set1_pd(fr->ic->epsfac);
133     charge           = mdatoms->chargeA;
134     krf              = _mm256_set1_pd(fr->ic->k_rf);
135     krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
136     crf              = _mm256_set1_pd(fr->ic->c_rf);
137     nvdwtype         = fr->ntype;
138     vdwparam         = fr->nbfp;
139     vdwtype          = mdatoms->typeA;
140
141     vftab            = kernel_data->table_vdw->data;
142     vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
143
144     /* Setup water-specific parameters */
145     inr              = nlist->iinr[0];
146     iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
147     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
148     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
149     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
150
151     jq0              = _mm256_set1_pd(charge[inr+0]);
152     jq1              = _mm256_set1_pd(charge[inr+1]);
153     jq2              = _mm256_set1_pd(charge[inr+2]);
154     vdwjidx0A        = 2*vdwtype[inr+0];
155     qq00             = _mm256_mul_pd(iq0,jq0);
156     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
157     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
158     qq01             = _mm256_mul_pd(iq0,jq1);
159     qq02             = _mm256_mul_pd(iq0,jq2);
160     qq10             = _mm256_mul_pd(iq1,jq0);
161     qq11             = _mm256_mul_pd(iq1,jq1);
162     qq12             = _mm256_mul_pd(iq1,jq2);
163     qq20             = _mm256_mul_pd(iq2,jq0);
164     qq21             = _mm256_mul_pd(iq2,jq1);
165     qq22             = _mm256_mul_pd(iq2,jq2);
166
167     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
168     rcutoff_scalar   = fr->ic->rcoulomb;
169     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
170     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
171
172     /* Avoid stupid compiler warnings */
173     jnrA = jnrB = jnrC = jnrD = 0;
174     j_coord_offsetA = 0;
175     j_coord_offsetB = 0;
176     j_coord_offsetC = 0;
177     j_coord_offsetD = 0;
178
179     outeriter        = 0;
180     inneriter        = 0;
181
182     for(iidx=0;iidx<4*DIM;iidx++)
183     {
184         scratch[iidx] = 0.0;
185     }
186
187     /* Start outer loop over neighborlists */
188     for(iidx=0; iidx<nri; iidx++)
189     {
190         /* Load shift vector for this list */
191         i_shift_offset   = DIM*shiftidx[iidx];
192
193         /* Load limits for loop over neighbors */
194         j_index_start    = jindex[iidx];
195         j_index_end      = jindex[iidx+1];
196
197         /* Get outer coordinate index */
198         inr              = iinr[iidx];
199         i_coord_offset   = DIM*inr;
200
201         /* Load i particle coords and add shift vector */
202         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
203                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
204
205         fix0             = _mm256_setzero_pd();
206         fiy0             = _mm256_setzero_pd();
207         fiz0             = _mm256_setzero_pd();
208         fix1             = _mm256_setzero_pd();
209         fiy1             = _mm256_setzero_pd();
210         fiz1             = _mm256_setzero_pd();
211         fix2             = _mm256_setzero_pd();
212         fiy2             = _mm256_setzero_pd();
213         fiz2             = _mm256_setzero_pd();
214
215         /* Reset potential sums */
216         velecsum         = _mm256_setzero_pd();
217         vvdwsum          = _mm256_setzero_pd();
218
219         /* Start inner kernel loop */
220         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
221         {
222
223             /* Get j neighbor index, and coordinate index */
224             jnrA             = jjnr[jidx];
225             jnrB             = jjnr[jidx+1];
226             jnrC             = jjnr[jidx+2];
227             jnrD             = jjnr[jidx+3];
228             j_coord_offsetA  = DIM*jnrA;
229             j_coord_offsetB  = DIM*jnrB;
230             j_coord_offsetC  = DIM*jnrC;
231             j_coord_offsetD  = DIM*jnrD;
232
233             /* load j atom coordinates */
234             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
235                                                  x+j_coord_offsetC,x+j_coord_offsetD,
236                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
237
238             /* Calculate displacement vector */
239             dx00             = _mm256_sub_pd(ix0,jx0);
240             dy00             = _mm256_sub_pd(iy0,jy0);
241             dz00             = _mm256_sub_pd(iz0,jz0);
242             dx01             = _mm256_sub_pd(ix0,jx1);
243             dy01             = _mm256_sub_pd(iy0,jy1);
244             dz01             = _mm256_sub_pd(iz0,jz1);
245             dx02             = _mm256_sub_pd(ix0,jx2);
246             dy02             = _mm256_sub_pd(iy0,jy2);
247             dz02             = _mm256_sub_pd(iz0,jz2);
248             dx10             = _mm256_sub_pd(ix1,jx0);
249             dy10             = _mm256_sub_pd(iy1,jy0);
250             dz10             = _mm256_sub_pd(iz1,jz0);
251             dx11             = _mm256_sub_pd(ix1,jx1);
252             dy11             = _mm256_sub_pd(iy1,jy1);
253             dz11             = _mm256_sub_pd(iz1,jz1);
254             dx12             = _mm256_sub_pd(ix1,jx2);
255             dy12             = _mm256_sub_pd(iy1,jy2);
256             dz12             = _mm256_sub_pd(iz1,jz2);
257             dx20             = _mm256_sub_pd(ix2,jx0);
258             dy20             = _mm256_sub_pd(iy2,jy0);
259             dz20             = _mm256_sub_pd(iz2,jz0);
260             dx21             = _mm256_sub_pd(ix2,jx1);
261             dy21             = _mm256_sub_pd(iy2,jy1);
262             dz21             = _mm256_sub_pd(iz2,jz1);
263             dx22             = _mm256_sub_pd(ix2,jx2);
264             dy22             = _mm256_sub_pd(iy2,jy2);
265             dz22             = _mm256_sub_pd(iz2,jz2);
266
267             /* Calculate squared distance and things based on it */
268             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
269             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
270             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
271             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
272             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
273             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
274             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
275             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
276             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
277
278             rinv00           = avx256_invsqrt_d(rsq00);
279             rinv01           = avx256_invsqrt_d(rsq01);
280             rinv02           = avx256_invsqrt_d(rsq02);
281             rinv10           = avx256_invsqrt_d(rsq10);
282             rinv11           = avx256_invsqrt_d(rsq11);
283             rinv12           = avx256_invsqrt_d(rsq12);
284             rinv20           = avx256_invsqrt_d(rsq20);
285             rinv21           = avx256_invsqrt_d(rsq21);
286             rinv22           = avx256_invsqrt_d(rsq22);
287
288             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
289             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
290             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
291             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
292             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
293             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
294             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
295             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
296             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
297
298             fjx0             = _mm256_setzero_pd();
299             fjy0             = _mm256_setzero_pd();
300             fjz0             = _mm256_setzero_pd();
301             fjx1             = _mm256_setzero_pd();
302             fjy1             = _mm256_setzero_pd();
303             fjz1             = _mm256_setzero_pd();
304             fjx2             = _mm256_setzero_pd();
305             fjy2             = _mm256_setzero_pd();
306             fjz2             = _mm256_setzero_pd();
307
308             /**************************
309              * CALCULATE INTERACTIONS *
310              **************************/
311
312             if (gmx_mm256_any_lt(rsq00,rcutoff2))
313             {
314
315             r00              = _mm256_mul_pd(rsq00,rinv00);
316
317             /* Calculate table index by multiplying r with table scale and truncate to integer */
318             rt               = _mm256_mul_pd(r00,vftabscale);
319             vfitab           = _mm256_cvttpd_epi32(rt);
320             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
321             vfitab           = _mm_slli_epi32(vfitab,3);
322
323             /* REACTION-FIELD ELECTROSTATICS */
324             velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
325             felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
326
327             /* CUBIC SPLINE TABLE DISPERSION */
328             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
329             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
330             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
331             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
332             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
333             Heps             = _mm256_mul_pd(vfeps,H);
334             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
335             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
336             vvdw6            = _mm256_mul_pd(c6_00,VV);
337             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
338             fvdw6            = _mm256_mul_pd(c6_00,FF);
339
340             /* CUBIC SPLINE TABLE REPULSION */
341             vfitab           = _mm_add_epi32(vfitab,ifour);
342             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
343             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
344             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
345             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
346             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
347             Heps             = _mm256_mul_pd(vfeps,H);
348             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
349             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
350             vvdw12           = _mm256_mul_pd(c12_00,VV);
351             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
352             fvdw12           = _mm256_mul_pd(c12_00,FF);
353             vvdw             = _mm256_add_pd(vvdw12,vvdw6);
354             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
355
356             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
357
358             /* Update potential sum for this i atom from the interaction with this j atom. */
359             velec            = _mm256_and_pd(velec,cutoff_mask);
360             velecsum         = _mm256_add_pd(velecsum,velec);
361             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
362             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
363
364             fscal            = _mm256_add_pd(felec,fvdw);
365
366             fscal            = _mm256_and_pd(fscal,cutoff_mask);
367
368             /* Calculate temporary vectorial force */
369             tx               = _mm256_mul_pd(fscal,dx00);
370             ty               = _mm256_mul_pd(fscal,dy00);
371             tz               = _mm256_mul_pd(fscal,dz00);
372
373             /* Update vectorial force */
374             fix0             = _mm256_add_pd(fix0,tx);
375             fiy0             = _mm256_add_pd(fiy0,ty);
376             fiz0             = _mm256_add_pd(fiz0,tz);
377
378             fjx0             = _mm256_add_pd(fjx0,tx);
379             fjy0             = _mm256_add_pd(fjy0,ty);
380             fjz0             = _mm256_add_pd(fjz0,tz);
381
382             }
383
384             /**************************
385              * CALCULATE INTERACTIONS *
386              **************************/
387
388             if (gmx_mm256_any_lt(rsq01,rcutoff2))
389             {
390
391             /* REACTION-FIELD ELECTROSTATICS */
392             velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
393             felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
394
395             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
396
397             /* Update potential sum for this i atom from the interaction with this j atom. */
398             velec            = _mm256_and_pd(velec,cutoff_mask);
399             velecsum         = _mm256_add_pd(velecsum,velec);
400
401             fscal            = felec;
402
403             fscal            = _mm256_and_pd(fscal,cutoff_mask);
404
405             /* Calculate temporary vectorial force */
406             tx               = _mm256_mul_pd(fscal,dx01);
407             ty               = _mm256_mul_pd(fscal,dy01);
408             tz               = _mm256_mul_pd(fscal,dz01);
409
410             /* Update vectorial force */
411             fix0             = _mm256_add_pd(fix0,tx);
412             fiy0             = _mm256_add_pd(fiy0,ty);
413             fiz0             = _mm256_add_pd(fiz0,tz);
414
415             fjx1             = _mm256_add_pd(fjx1,tx);
416             fjy1             = _mm256_add_pd(fjy1,ty);
417             fjz1             = _mm256_add_pd(fjz1,tz);
418
419             }
420
421             /**************************
422              * CALCULATE INTERACTIONS *
423              **************************/
424
425             if (gmx_mm256_any_lt(rsq02,rcutoff2))
426             {
427
428             /* REACTION-FIELD ELECTROSTATICS */
429             velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
430             felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
431
432             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
433
434             /* Update potential sum for this i atom from the interaction with this j atom. */
435             velec            = _mm256_and_pd(velec,cutoff_mask);
436             velecsum         = _mm256_add_pd(velecsum,velec);
437
438             fscal            = felec;
439
440             fscal            = _mm256_and_pd(fscal,cutoff_mask);
441
442             /* Calculate temporary vectorial force */
443             tx               = _mm256_mul_pd(fscal,dx02);
444             ty               = _mm256_mul_pd(fscal,dy02);
445             tz               = _mm256_mul_pd(fscal,dz02);
446
447             /* Update vectorial force */
448             fix0             = _mm256_add_pd(fix0,tx);
449             fiy0             = _mm256_add_pd(fiy0,ty);
450             fiz0             = _mm256_add_pd(fiz0,tz);
451
452             fjx2             = _mm256_add_pd(fjx2,tx);
453             fjy2             = _mm256_add_pd(fjy2,ty);
454             fjz2             = _mm256_add_pd(fjz2,tz);
455
456             }
457
458             /**************************
459              * CALCULATE INTERACTIONS *
460              **************************/
461
462             if (gmx_mm256_any_lt(rsq10,rcutoff2))
463             {
464
465             /* REACTION-FIELD ELECTROSTATICS */
466             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
467             felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
468
469             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
470
471             /* Update potential sum for this i atom from the interaction with this j atom. */
472             velec            = _mm256_and_pd(velec,cutoff_mask);
473             velecsum         = _mm256_add_pd(velecsum,velec);
474
475             fscal            = felec;
476
477             fscal            = _mm256_and_pd(fscal,cutoff_mask);
478
479             /* Calculate temporary vectorial force */
480             tx               = _mm256_mul_pd(fscal,dx10);
481             ty               = _mm256_mul_pd(fscal,dy10);
482             tz               = _mm256_mul_pd(fscal,dz10);
483
484             /* Update vectorial force */
485             fix1             = _mm256_add_pd(fix1,tx);
486             fiy1             = _mm256_add_pd(fiy1,ty);
487             fiz1             = _mm256_add_pd(fiz1,tz);
488
489             fjx0             = _mm256_add_pd(fjx0,tx);
490             fjy0             = _mm256_add_pd(fjy0,ty);
491             fjz0             = _mm256_add_pd(fjz0,tz);
492
493             }
494
495             /**************************
496              * CALCULATE INTERACTIONS *
497              **************************/
498
499             if (gmx_mm256_any_lt(rsq11,rcutoff2))
500             {
501
502             /* REACTION-FIELD ELECTROSTATICS */
503             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
504             felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
505
506             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
507
508             /* Update potential sum for this i atom from the interaction with this j atom. */
509             velec            = _mm256_and_pd(velec,cutoff_mask);
510             velecsum         = _mm256_add_pd(velecsum,velec);
511
512             fscal            = felec;
513
514             fscal            = _mm256_and_pd(fscal,cutoff_mask);
515
516             /* Calculate temporary vectorial force */
517             tx               = _mm256_mul_pd(fscal,dx11);
518             ty               = _mm256_mul_pd(fscal,dy11);
519             tz               = _mm256_mul_pd(fscal,dz11);
520
521             /* Update vectorial force */
522             fix1             = _mm256_add_pd(fix1,tx);
523             fiy1             = _mm256_add_pd(fiy1,ty);
524             fiz1             = _mm256_add_pd(fiz1,tz);
525
526             fjx1             = _mm256_add_pd(fjx1,tx);
527             fjy1             = _mm256_add_pd(fjy1,ty);
528             fjz1             = _mm256_add_pd(fjz1,tz);
529
530             }
531
532             /**************************
533              * CALCULATE INTERACTIONS *
534              **************************/
535
536             if (gmx_mm256_any_lt(rsq12,rcutoff2))
537             {
538
539             /* REACTION-FIELD ELECTROSTATICS */
540             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
541             felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
542
543             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
544
545             /* Update potential sum for this i atom from the interaction with this j atom. */
546             velec            = _mm256_and_pd(velec,cutoff_mask);
547             velecsum         = _mm256_add_pd(velecsum,velec);
548
549             fscal            = felec;
550
551             fscal            = _mm256_and_pd(fscal,cutoff_mask);
552
553             /* Calculate temporary vectorial force */
554             tx               = _mm256_mul_pd(fscal,dx12);
555             ty               = _mm256_mul_pd(fscal,dy12);
556             tz               = _mm256_mul_pd(fscal,dz12);
557
558             /* Update vectorial force */
559             fix1             = _mm256_add_pd(fix1,tx);
560             fiy1             = _mm256_add_pd(fiy1,ty);
561             fiz1             = _mm256_add_pd(fiz1,tz);
562
563             fjx2             = _mm256_add_pd(fjx2,tx);
564             fjy2             = _mm256_add_pd(fjy2,ty);
565             fjz2             = _mm256_add_pd(fjz2,tz);
566
567             }
568
569             /**************************
570              * CALCULATE INTERACTIONS *
571              **************************/
572
573             if (gmx_mm256_any_lt(rsq20,rcutoff2))
574             {
575
576             /* REACTION-FIELD ELECTROSTATICS */
577             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
578             felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
579
580             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
581
582             /* Update potential sum for this i atom from the interaction with this j atom. */
583             velec            = _mm256_and_pd(velec,cutoff_mask);
584             velecsum         = _mm256_add_pd(velecsum,velec);
585
586             fscal            = felec;
587
588             fscal            = _mm256_and_pd(fscal,cutoff_mask);
589
590             /* Calculate temporary vectorial force */
591             tx               = _mm256_mul_pd(fscal,dx20);
592             ty               = _mm256_mul_pd(fscal,dy20);
593             tz               = _mm256_mul_pd(fscal,dz20);
594
595             /* Update vectorial force */
596             fix2             = _mm256_add_pd(fix2,tx);
597             fiy2             = _mm256_add_pd(fiy2,ty);
598             fiz2             = _mm256_add_pd(fiz2,tz);
599
600             fjx0             = _mm256_add_pd(fjx0,tx);
601             fjy0             = _mm256_add_pd(fjy0,ty);
602             fjz0             = _mm256_add_pd(fjz0,tz);
603
604             }
605
606             /**************************
607              * CALCULATE INTERACTIONS *
608              **************************/
609
610             if (gmx_mm256_any_lt(rsq21,rcutoff2))
611             {
612
613             /* REACTION-FIELD ELECTROSTATICS */
614             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
615             felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
616
617             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
618
619             /* Update potential sum for this i atom from the interaction with this j atom. */
620             velec            = _mm256_and_pd(velec,cutoff_mask);
621             velecsum         = _mm256_add_pd(velecsum,velec);
622
623             fscal            = felec;
624
625             fscal            = _mm256_and_pd(fscal,cutoff_mask);
626
627             /* Calculate temporary vectorial force */
628             tx               = _mm256_mul_pd(fscal,dx21);
629             ty               = _mm256_mul_pd(fscal,dy21);
630             tz               = _mm256_mul_pd(fscal,dz21);
631
632             /* Update vectorial force */
633             fix2             = _mm256_add_pd(fix2,tx);
634             fiy2             = _mm256_add_pd(fiy2,ty);
635             fiz2             = _mm256_add_pd(fiz2,tz);
636
637             fjx1             = _mm256_add_pd(fjx1,tx);
638             fjy1             = _mm256_add_pd(fjy1,ty);
639             fjz1             = _mm256_add_pd(fjz1,tz);
640
641             }
642
643             /**************************
644              * CALCULATE INTERACTIONS *
645              **************************/
646
647             if (gmx_mm256_any_lt(rsq22,rcutoff2))
648             {
649
650             /* REACTION-FIELD ELECTROSTATICS */
651             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
652             felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
653
654             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
655
656             /* Update potential sum for this i atom from the interaction with this j atom. */
657             velec            = _mm256_and_pd(velec,cutoff_mask);
658             velecsum         = _mm256_add_pd(velecsum,velec);
659
660             fscal            = felec;
661
662             fscal            = _mm256_and_pd(fscal,cutoff_mask);
663
664             /* Calculate temporary vectorial force */
665             tx               = _mm256_mul_pd(fscal,dx22);
666             ty               = _mm256_mul_pd(fscal,dy22);
667             tz               = _mm256_mul_pd(fscal,dz22);
668
669             /* Update vectorial force */
670             fix2             = _mm256_add_pd(fix2,tx);
671             fiy2             = _mm256_add_pd(fiy2,ty);
672             fiz2             = _mm256_add_pd(fiz2,tz);
673
674             fjx2             = _mm256_add_pd(fjx2,tx);
675             fjy2             = _mm256_add_pd(fjy2,ty);
676             fjz2             = _mm256_add_pd(fjz2,tz);
677
678             }
679
680             fjptrA             = f+j_coord_offsetA;
681             fjptrB             = f+j_coord_offsetB;
682             fjptrC             = f+j_coord_offsetC;
683             fjptrD             = f+j_coord_offsetD;
684
685             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
686                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
687
688             /* Inner loop uses 360 flops */
689         }
690
691         if(jidx<j_index_end)
692         {
693
694             /* Get j neighbor index, and coordinate index */
695             jnrlistA         = jjnr[jidx];
696             jnrlistB         = jjnr[jidx+1];
697             jnrlistC         = jjnr[jidx+2];
698             jnrlistD         = jjnr[jidx+3];
699             /* Sign of each element will be negative for non-real atoms.
700              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
701              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
702              */
703             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
704
705             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
706             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
707             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
708
709             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
710             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
711             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
712             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
713             j_coord_offsetA  = DIM*jnrA;
714             j_coord_offsetB  = DIM*jnrB;
715             j_coord_offsetC  = DIM*jnrC;
716             j_coord_offsetD  = DIM*jnrD;
717
718             /* load j atom coordinates */
719             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
720                                                  x+j_coord_offsetC,x+j_coord_offsetD,
721                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
722
723             /* Calculate displacement vector */
724             dx00             = _mm256_sub_pd(ix0,jx0);
725             dy00             = _mm256_sub_pd(iy0,jy0);
726             dz00             = _mm256_sub_pd(iz0,jz0);
727             dx01             = _mm256_sub_pd(ix0,jx1);
728             dy01             = _mm256_sub_pd(iy0,jy1);
729             dz01             = _mm256_sub_pd(iz0,jz1);
730             dx02             = _mm256_sub_pd(ix0,jx2);
731             dy02             = _mm256_sub_pd(iy0,jy2);
732             dz02             = _mm256_sub_pd(iz0,jz2);
733             dx10             = _mm256_sub_pd(ix1,jx0);
734             dy10             = _mm256_sub_pd(iy1,jy0);
735             dz10             = _mm256_sub_pd(iz1,jz0);
736             dx11             = _mm256_sub_pd(ix1,jx1);
737             dy11             = _mm256_sub_pd(iy1,jy1);
738             dz11             = _mm256_sub_pd(iz1,jz1);
739             dx12             = _mm256_sub_pd(ix1,jx2);
740             dy12             = _mm256_sub_pd(iy1,jy2);
741             dz12             = _mm256_sub_pd(iz1,jz2);
742             dx20             = _mm256_sub_pd(ix2,jx0);
743             dy20             = _mm256_sub_pd(iy2,jy0);
744             dz20             = _mm256_sub_pd(iz2,jz0);
745             dx21             = _mm256_sub_pd(ix2,jx1);
746             dy21             = _mm256_sub_pd(iy2,jy1);
747             dz21             = _mm256_sub_pd(iz2,jz1);
748             dx22             = _mm256_sub_pd(ix2,jx2);
749             dy22             = _mm256_sub_pd(iy2,jy2);
750             dz22             = _mm256_sub_pd(iz2,jz2);
751
752             /* Calculate squared distance and things based on it */
753             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
754             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
755             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
756             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
757             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
758             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
759             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
760             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
761             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
762
763             rinv00           = avx256_invsqrt_d(rsq00);
764             rinv01           = avx256_invsqrt_d(rsq01);
765             rinv02           = avx256_invsqrt_d(rsq02);
766             rinv10           = avx256_invsqrt_d(rsq10);
767             rinv11           = avx256_invsqrt_d(rsq11);
768             rinv12           = avx256_invsqrt_d(rsq12);
769             rinv20           = avx256_invsqrt_d(rsq20);
770             rinv21           = avx256_invsqrt_d(rsq21);
771             rinv22           = avx256_invsqrt_d(rsq22);
772
773             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
774             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
775             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
776             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
777             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
778             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
779             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
780             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
781             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
782
783             fjx0             = _mm256_setzero_pd();
784             fjy0             = _mm256_setzero_pd();
785             fjz0             = _mm256_setzero_pd();
786             fjx1             = _mm256_setzero_pd();
787             fjy1             = _mm256_setzero_pd();
788             fjz1             = _mm256_setzero_pd();
789             fjx2             = _mm256_setzero_pd();
790             fjy2             = _mm256_setzero_pd();
791             fjz2             = _mm256_setzero_pd();
792
793             /**************************
794              * CALCULATE INTERACTIONS *
795              **************************/
796
797             if (gmx_mm256_any_lt(rsq00,rcutoff2))
798             {
799
800             r00              = _mm256_mul_pd(rsq00,rinv00);
801             r00              = _mm256_andnot_pd(dummy_mask,r00);
802
803             /* Calculate table index by multiplying r with table scale and truncate to integer */
804             rt               = _mm256_mul_pd(r00,vftabscale);
805             vfitab           = _mm256_cvttpd_epi32(rt);
806             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
807             vfitab           = _mm_slli_epi32(vfitab,3);
808
809             /* REACTION-FIELD ELECTROSTATICS */
810             velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
811             felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
812
813             /* CUBIC SPLINE TABLE DISPERSION */
814             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
815             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
816             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
817             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
818             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
819             Heps             = _mm256_mul_pd(vfeps,H);
820             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
821             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
822             vvdw6            = _mm256_mul_pd(c6_00,VV);
823             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
824             fvdw6            = _mm256_mul_pd(c6_00,FF);
825
826             /* CUBIC SPLINE TABLE REPULSION */
827             vfitab           = _mm_add_epi32(vfitab,ifour);
828             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
829             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
830             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
831             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
832             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
833             Heps             = _mm256_mul_pd(vfeps,H);
834             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
835             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
836             vvdw12           = _mm256_mul_pd(c12_00,VV);
837             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
838             fvdw12           = _mm256_mul_pd(c12_00,FF);
839             vvdw             = _mm256_add_pd(vvdw12,vvdw6);
840             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
841
842             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
843
844             /* Update potential sum for this i atom from the interaction with this j atom. */
845             velec            = _mm256_and_pd(velec,cutoff_mask);
846             velec            = _mm256_andnot_pd(dummy_mask,velec);
847             velecsum         = _mm256_add_pd(velecsum,velec);
848             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
849             vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
850             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
851
852             fscal            = _mm256_add_pd(felec,fvdw);
853
854             fscal            = _mm256_and_pd(fscal,cutoff_mask);
855
856             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
857
858             /* Calculate temporary vectorial force */
859             tx               = _mm256_mul_pd(fscal,dx00);
860             ty               = _mm256_mul_pd(fscal,dy00);
861             tz               = _mm256_mul_pd(fscal,dz00);
862
863             /* Update vectorial force */
864             fix0             = _mm256_add_pd(fix0,tx);
865             fiy0             = _mm256_add_pd(fiy0,ty);
866             fiz0             = _mm256_add_pd(fiz0,tz);
867
868             fjx0             = _mm256_add_pd(fjx0,tx);
869             fjy0             = _mm256_add_pd(fjy0,ty);
870             fjz0             = _mm256_add_pd(fjz0,tz);
871
872             }
873
874             /**************************
875              * CALCULATE INTERACTIONS *
876              **************************/
877
878             if (gmx_mm256_any_lt(rsq01,rcutoff2))
879             {
880
881             /* REACTION-FIELD ELECTROSTATICS */
882             velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
883             felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
884
885             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
886
887             /* Update potential sum for this i atom from the interaction with this j atom. */
888             velec            = _mm256_and_pd(velec,cutoff_mask);
889             velec            = _mm256_andnot_pd(dummy_mask,velec);
890             velecsum         = _mm256_add_pd(velecsum,velec);
891
892             fscal            = felec;
893
894             fscal            = _mm256_and_pd(fscal,cutoff_mask);
895
896             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
897
898             /* Calculate temporary vectorial force */
899             tx               = _mm256_mul_pd(fscal,dx01);
900             ty               = _mm256_mul_pd(fscal,dy01);
901             tz               = _mm256_mul_pd(fscal,dz01);
902
903             /* Update vectorial force */
904             fix0             = _mm256_add_pd(fix0,tx);
905             fiy0             = _mm256_add_pd(fiy0,ty);
906             fiz0             = _mm256_add_pd(fiz0,tz);
907
908             fjx1             = _mm256_add_pd(fjx1,tx);
909             fjy1             = _mm256_add_pd(fjy1,ty);
910             fjz1             = _mm256_add_pd(fjz1,tz);
911
912             }
913
914             /**************************
915              * CALCULATE INTERACTIONS *
916              **************************/
917
918             if (gmx_mm256_any_lt(rsq02,rcutoff2))
919             {
920
921             /* REACTION-FIELD ELECTROSTATICS */
922             velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
923             felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
924
925             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
926
927             /* Update potential sum for this i atom from the interaction with this j atom. */
928             velec            = _mm256_and_pd(velec,cutoff_mask);
929             velec            = _mm256_andnot_pd(dummy_mask,velec);
930             velecsum         = _mm256_add_pd(velecsum,velec);
931
932             fscal            = felec;
933
934             fscal            = _mm256_and_pd(fscal,cutoff_mask);
935
936             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
937
938             /* Calculate temporary vectorial force */
939             tx               = _mm256_mul_pd(fscal,dx02);
940             ty               = _mm256_mul_pd(fscal,dy02);
941             tz               = _mm256_mul_pd(fscal,dz02);
942
943             /* Update vectorial force */
944             fix0             = _mm256_add_pd(fix0,tx);
945             fiy0             = _mm256_add_pd(fiy0,ty);
946             fiz0             = _mm256_add_pd(fiz0,tz);
947
948             fjx2             = _mm256_add_pd(fjx2,tx);
949             fjy2             = _mm256_add_pd(fjy2,ty);
950             fjz2             = _mm256_add_pd(fjz2,tz);
951
952             }
953
954             /**************************
955              * CALCULATE INTERACTIONS *
956              **************************/
957
958             if (gmx_mm256_any_lt(rsq10,rcutoff2))
959             {
960
961             /* REACTION-FIELD ELECTROSTATICS */
962             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
963             felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
964
965             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
966
967             /* Update potential sum for this i atom from the interaction with this j atom. */
968             velec            = _mm256_and_pd(velec,cutoff_mask);
969             velec            = _mm256_andnot_pd(dummy_mask,velec);
970             velecsum         = _mm256_add_pd(velecsum,velec);
971
972             fscal            = felec;
973
974             fscal            = _mm256_and_pd(fscal,cutoff_mask);
975
976             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
977
978             /* Calculate temporary vectorial force */
979             tx               = _mm256_mul_pd(fscal,dx10);
980             ty               = _mm256_mul_pd(fscal,dy10);
981             tz               = _mm256_mul_pd(fscal,dz10);
982
983             /* Update vectorial force */
984             fix1             = _mm256_add_pd(fix1,tx);
985             fiy1             = _mm256_add_pd(fiy1,ty);
986             fiz1             = _mm256_add_pd(fiz1,tz);
987
988             fjx0             = _mm256_add_pd(fjx0,tx);
989             fjy0             = _mm256_add_pd(fjy0,ty);
990             fjz0             = _mm256_add_pd(fjz0,tz);
991
992             }
993
994             /**************************
995              * CALCULATE INTERACTIONS *
996              **************************/
997
998             if (gmx_mm256_any_lt(rsq11,rcutoff2))
999             {
1000
1001             /* REACTION-FIELD ELECTROSTATICS */
1002             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
1003             felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
1004
1005             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
1006
1007             /* Update potential sum for this i atom from the interaction with this j atom. */
1008             velec            = _mm256_and_pd(velec,cutoff_mask);
1009             velec            = _mm256_andnot_pd(dummy_mask,velec);
1010             velecsum         = _mm256_add_pd(velecsum,velec);
1011
1012             fscal            = felec;
1013
1014             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1015
1016             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1017
1018             /* Calculate temporary vectorial force */
1019             tx               = _mm256_mul_pd(fscal,dx11);
1020             ty               = _mm256_mul_pd(fscal,dy11);
1021             tz               = _mm256_mul_pd(fscal,dz11);
1022
1023             /* Update vectorial force */
1024             fix1             = _mm256_add_pd(fix1,tx);
1025             fiy1             = _mm256_add_pd(fiy1,ty);
1026             fiz1             = _mm256_add_pd(fiz1,tz);
1027
1028             fjx1             = _mm256_add_pd(fjx1,tx);
1029             fjy1             = _mm256_add_pd(fjy1,ty);
1030             fjz1             = _mm256_add_pd(fjz1,tz);
1031
1032             }
1033
1034             /**************************
1035              * CALCULATE INTERACTIONS *
1036              **************************/
1037
1038             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1039             {
1040
1041             /* REACTION-FIELD ELECTROSTATICS */
1042             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
1043             felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
1044
1045             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
1046
1047             /* Update potential sum for this i atom from the interaction with this j atom. */
1048             velec            = _mm256_and_pd(velec,cutoff_mask);
1049             velec            = _mm256_andnot_pd(dummy_mask,velec);
1050             velecsum         = _mm256_add_pd(velecsum,velec);
1051
1052             fscal            = felec;
1053
1054             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1055
1056             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1057
1058             /* Calculate temporary vectorial force */
1059             tx               = _mm256_mul_pd(fscal,dx12);
1060             ty               = _mm256_mul_pd(fscal,dy12);
1061             tz               = _mm256_mul_pd(fscal,dz12);
1062
1063             /* Update vectorial force */
1064             fix1             = _mm256_add_pd(fix1,tx);
1065             fiy1             = _mm256_add_pd(fiy1,ty);
1066             fiz1             = _mm256_add_pd(fiz1,tz);
1067
1068             fjx2             = _mm256_add_pd(fjx2,tx);
1069             fjy2             = _mm256_add_pd(fjy2,ty);
1070             fjz2             = _mm256_add_pd(fjz2,tz);
1071
1072             }
1073
1074             /**************************
1075              * CALCULATE INTERACTIONS *
1076              **************************/
1077
1078             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1079             {
1080
1081             /* REACTION-FIELD ELECTROSTATICS */
1082             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
1083             felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
1084
1085             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1086
1087             /* Update potential sum for this i atom from the interaction with this j atom. */
1088             velec            = _mm256_and_pd(velec,cutoff_mask);
1089             velec            = _mm256_andnot_pd(dummy_mask,velec);
1090             velecsum         = _mm256_add_pd(velecsum,velec);
1091
1092             fscal            = felec;
1093
1094             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1095
1096             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1097
1098             /* Calculate temporary vectorial force */
1099             tx               = _mm256_mul_pd(fscal,dx20);
1100             ty               = _mm256_mul_pd(fscal,dy20);
1101             tz               = _mm256_mul_pd(fscal,dz20);
1102
1103             /* Update vectorial force */
1104             fix2             = _mm256_add_pd(fix2,tx);
1105             fiy2             = _mm256_add_pd(fiy2,ty);
1106             fiz2             = _mm256_add_pd(fiz2,tz);
1107
1108             fjx0             = _mm256_add_pd(fjx0,tx);
1109             fjy0             = _mm256_add_pd(fjy0,ty);
1110             fjz0             = _mm256_add_pd(fjz0,tz);
1111
1112             }
1113
1114             /**************************
1115              * CALCULATE INTERACTIONS *
1116              **************************/
1117
1118             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1119             {
1120
1121             /* REACTION-FIELD ELECTROSTATICS */
1122             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
1123             felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
1124
1125             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
1126
1127             /* Update potential sum for this i atom from the interaction with this j atom. */
1128             velec            = _mm256_and_pd(velec,cutoff_mask);
1129             velec            = _mm256_andnot_pd(dummy_mask,velec);
1130             velecsum         = _mm256_add_pd(velecsum,velec);
1131
1132             fscal            = felec;
1133
1134             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1135
1136             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1137
1138             /* Calculate temporary vectorial force */
1139             tx               = _mm256_mul_pd(fscal,dx21);
1140             ty               = _mm256_mul_pd(fscal,dy21);
1141             tz               = _mm256_mul_pd(fscal,dz21);
1142
1143             /* Update vectorial force */
1144             fix2             = _mm256_add_pd(fix2,tx);
1145             fiy2             = _mm256_add_pd(fiy2,ty);
1146             fiz2             = _mm256_add_pd(fiz2,tz);
1147
1148             fjx1             = _mm256_add_pd(fjx1,tx);
1149             fjy1             = _mm256_add_pd(fjy1,ty);
1150             fjz1             = _mm256_add_pd(fjz1,tz);
1151
1152             }
1153
1154             /**************************
1155              * CALCULATE INTERACTIONS *
1156              **************************/
1157
1158             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1159             {
1160
1161             /* REACTION-FIELD ELECTROSTATICS */
1162             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
1163             felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
1164
1165             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
1166
1167             /* Update potential sum for this i atom from the interaction with this j atom. */
1168             velec            = _mm256_and_pd(velec,cutoff_mask);
1169             velec            = _mm256_andnot_pd(dummy_mask,velec);
1170             velecsum         = _mm256_add_pd(velecsum,velec);
1171
1172             fscal            = felec;
1173
1174             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1175
1176             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1177
1178             /* Calculate temporary vectorial force */
1179             tx               = _mm256_mul_pd(fscal,dx22);
1180             ty               = _mm256_mul_pd(fscal,dy22);
1181             tz               = _mm256_mul_pd(fscal,dz22);
1182
1183             /* Update vectorial force */
1184             fix2             = _mm256_add_pd(fix2,tx);
1185             fiy2             = _mm256_add_pd(fiy2,ty);
1186             fiz2             = _mm256_add_pd(fiz2,tz);
1187
1188             fjx2             = _mm256_add_pd(fjx2,tx);
1189             fjy2             = _mm256_add_pd(fjy2,ty);
1190             fjz2             = _mm256_add_pd(fjz2,tz);
1191
1192             }
1193
1194             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1195             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1196             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1197             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1198
1199             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1200                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1201
1202             /* Inner loop uses 361 flops */
1203         }
1204
1205         /* End of innermost loop */
1206
1207         gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1208                                                  f+i_coord_offset,fshift+i_shift_offset);
1209
1210         ggid                        = gid[iidx];
1211         /* Update potential energies */
1212         gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1213         gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
1214
1215         /* Increment number of inner iterations */
1216         inneriter                  += j_index_end - j_index_start;
1217
1218         /* Outer loop uses 20 flops */
1219     }
1220
1221     /* Increment number of outer iterations */
1222     outeriter        += nri;
1223
1224     /* Update outer/inner flops */
1225
1226     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*361);
1227 }
1228 /*
1229  * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_F_avx_256_double
1230  * Electrostatics interaction: ReactionField
1231  * VdW interaction:            CubicSplineTable
1232  * Geometry:                   Water3-Water3
1233  * Calculate force/pot:        Force
1234  */
1235 void
1236 nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_F_avx_256_double
1237                     (t_nblist                    * gmx_restrict       nlist,
1238                      rvec                        * gmx_restrict          xx,
1239                      rvec                        * gmx_restrict          ff,
1240                      struct t_forcerec           * gmx_restrict          fr,
1241                      t_mdatoms                   * gmx_restrict     mdatoms,
1242                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1243                      t_nrnb                      * gmx_restrict        nrnb)
1244 {
1245     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1246      * just 0 for non-waters.
1247      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1248      * jnr indices corresponding to data put in the four positions in the SIMD register.
1249      */
1250     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1251     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1252     int              jnrA,jnrB,jnrC,jnrD;
1253     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1254     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1255     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1256     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1257     real             rcutoff_scalar;
1258     real             *shiftvec,*fshift,*x,*f;
1259     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1260     real             scratch[4*DIM];
1261     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1262     real *           vdwioffsetptr0;
1263     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1264     real *           vdwioffsetptr1;
1265     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1266     real *           vdwioffsetptr2;
1267     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1268     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
1269     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1270     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1271     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1272     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1273     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1274     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1275     __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1276     __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1277     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1278     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1279     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1280     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1281     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1282     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1283     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
1284     real             *charge;
1285     int              nvdwtype;
1286     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1287     int              *vdwtype;
1288     real             *vdwparam;
1289     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
1290     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
1291     __m128i          vfitab;
1292     __m128i          ifour       = _mm_set1_epi32(4);
1293     __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
1294     real             *vftab;
1295     __m256d          dummy_mask,cutoff_mask;
1296     __m128           tmpmask0,tmpmask1;
1297     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1298     __m256d          one     = _mm256_set1_pd(1.0);
1299     __m256d          two     = _mm256_set1_pd(2.0);
1300     x                = xx[0];
1301     f                = ff[0];
1302
1303     nri              = nlist->nri;
1304     iinr             = nlist->iinr;
1305     jindex           = nlist->jindex;
1306     jjnr             = nlist->jjnr;
1307     shiftidx         = nlist->shift;
1308     gid              = nlist->gid;
1309     shiftvec         = fr->shift_vec[0];
1310     fshift           = fr->fshift[0];
1311     facel            = _mm256_set1_pd(fr->ic->epsfac);
1312     charge           = mdatoms->chargeA;
1313     krf              = _mm256_set1_pd(fr->ic->k_rf);
1314     krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
1315     crf              = _mm256_set1_pd(fr->ic->c_rf);
1316     nvdwtype         = fr->ntype;
1317     vdwparam         = fr->nbfp;
1318     vdwtype          = mdatoms->typeA;
1319
1320     vftab            = kernel_data->table_vdw->data;
1321     vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
1322
1323     /* Setup water-specific parameters */
1324     inr              = nlist->iinr[0];
1325     iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
1326     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1327     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1328     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1329
1330     jq0              = _mm256_set1_pd(charge[inr+0]);
1331     jq1              = _mm256_set1_pd(charge[inr+1]);
1332     jq2              = _mm256_set1_pd(charge[inr+2]);
1333     vdwjidx0A        = 2*vdwtype[inr+0];
1334     qq00             = _mm256_mul_pd(iq0,jq0);
1335     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
1336     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
1337     qq01             = _mm256_mul_pd(iq0,jq1);
1338     qq02             = _mm256_mul_pd(iq0,jq2);
1339     qq10             = _mm256_mul_pd(iq1,jq0);
1340     qq11             = _mm256_mul_pd(iq1,jq1);
1341     qq12             = _mm256_mul_pd(iq1,jq2);
1342     qq20             = _mm256_mul_pd(iq2,jq0);
1343     qq21             = _mm256_mul_pd(iq2,jq1);
1344     qq22             = _mm256_mul_pd(iq2,jq2);
1345
1346     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1347     rcutoff_scalar   = fr->ic->rcoulomb;
1348     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
1349     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
1350
1351     /* Avoid stupid compiler warnings */
1352     jnrA = jnrB = jnrC = jnrD = 0;
1353     j_coord_offsetA = 0;
1354     j_coord_offsetB = 0;
1355     j_coord_offsetC = 0;
1356     j_coord_offsetD = 0;
1357
1358     outeriter        = 0;
1359     inneriter        = 0;
1360
1361     for(iidx=0;iidx<4*DIM;iidx++)
1362     {
1363         scratch[iidx] = 0.0;
1364     }
1365
1366     /* Start outer loop over neighborlists */
1367     for(iidx=0; iidx<nri; iidx++)
1368     {
1369         /* Load shift vector for this list */
1370         i_shift_offset   = DIM*shiftidx[iidx];
1371
1372         /* Load limits for loop over neighbors */
1373         j_index_start    = jindex[iidx];
1374         j_index_end      = jindex[iidx+1];
1375
1376         /* Get outer coordinate index */
1377         inr              = iinr[iidx];
1378         i_coord_offset   = DIM*inr;
1379
1380         /* Load i particle coords and add shift vector */
1381         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1382                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1383
1384         fix0             = _mm256_setzero_pd();
1385         fiy0             = _mm256_setzero_pd();
1386         fiz0             = _mm256_setzero_pd();
1387         fix1             = _mm256_setzero_pd();
1388         fiy1             = _mm256_setzero_pd();
1389         fiz1             = _mm256_setzero_pd();
1390         fix2             = _mm256_setzero_pd();
1391         fiy2             = _mm256_setzero_pd();
1392         fiz2             = _mm256_setzero_pd();
1393
1394         /* Start inner kernel loop */
1395         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1396         {
1397
1398             /* Get j neighbor index, and coordinate index */
1399             jnrA             = jjnr[jidx];
1400             jnrB             = jjnr[jidx+1];
1401             jnrC             = jjnr[jidx+2];
1402             jnrD             = jjnr[jidx+3];
1403             j_coord_offsetA  = DIM*jnrA;
1404             j_coord_offsetB  = DIM*jnrB;
1405             j_coord_offsetC  = DIM*jnrC;
1406             j_coord_offsetD  = DIM*jnrD;
1407
1408             /* load j atom coordinates */
1409             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1410                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1411                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1412
1413             /* Calculate displacement vector */
1414             dx00             = _mm256_sub_pd(ix0,jx0);
1415             dy00             = _mm256_sub_pd(iy0,jy0);
1416             dz00             = _mm256_sub_pd(iz0,jz0);
1417             dx01             = _mm256_sub_pd(ix0,jx1);
1418             dy01             = _mm256_sub_pd(iy0,jy1);
1419             dz01             = _mm256_sub_pd(iz0,jz1);
1420             dx02             = _mm256_sub_pd(ix0,jx2);
1421             dy02             = _mm256_sub_pd(iy0,jy2);
1422             dz02             = _mm256_sub_pd(iz0,jz2);
1423             dx10             = _mm256_sub_pd(ix1,jx0);
1424             dy10             = _mm256_sub_pd(iy1,jy0);
1425             dz10             = _mm256_sub_pd(iz1,jz0);
1426             dx11             = _mm256_sub_pd(ix1,jx1);
1427             dy11             = _mm256_sub_pd(iy1,jy1);
1428             dz11             = _mm256_sub_pd(iz1,jz1);
1429             dx12             = _mm256_sub_pd(ix1,jx2);
1430             dy12             = _mm256_sub_pd(iy1,jy2);
1431             dz12             = _mm256_sub_pd(iz1,jz2);
1432             dx20             = _mm256_sub_pd(ix2,jx0);
1433             dy20             = _mm256_sub_pd(iy2,jy0);
1434             dz20             = _mm256_sub_pd(iz2,jz0);
1435             dx21             = _mm256_sub_pd(ix2,jx1);
1436             dy21             = _mm256_sub_pd(iy2,jy1);
1437             dz21             = _mm256_sub_pd(iz2,jz1);
1438             dx22             = _mm256_sub_pd(ix2,jx2);
1439             dy22             = _mm256_sub_pd(iy2,jy2);
1440             dz22             = _mm256_sub_pd(iz2,jz2);
1441
1442             /* Calculate squared distance and things based on it */
1443             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1444             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1445             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1446             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1447             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1448             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1449             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1450             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1451             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1452
1453             rinv00           = avx256_invsqrt_d(rsq00);
1454             rinv01           = avx256_invsqrt_d(rsq01);
1455             rinv02           = avx256_invsqrt_d(rsq02);
1456             rinv10           = avx256_invsqrt_d(rsq10);
1457             rinv11           = avx256_invsqrt_d(rsq11);
1458             rinv12           = avx256_invsqrt_d(rsq12);
1459             rinv20           = avx256_invsqrt_d(rsq20);
1460             rinv21           = avx256_invsqrt_d(rsq21);
1461             rinv22           = avx256_invsqrt_d(rsq22);
1462
1463             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1464             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
1465             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
1466             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1467             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1468             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1469             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1470             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1471             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1472
1473             fjx0             = _mm256_setzero_pd();
1474             fjy0             = _mm256_setzero_pd();
1475             fjz0             = _mm256_setzero_pd();
1476             fjx1             = _mm256_setzero_pd();
1477             fjy1             = _mm256_setzero_pd();
1478             fjz1             = _mm256_setzero_pd();
1479             fjx2             = _mm256_setzero_pd();
1480             fjy2             = _mm256_setzero_pd();
1481             fjz2             = _mm256_setzero_pd();
1482
1483             /**************************
1484              * CALCULATE INTERACTIONS *
1485              **************************/
1486
1487             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1488             {
1489
1490             r00              = _mm256_mul_pd(rsq00,rinv00);
1491
1492             /* Calculate table index by multiplying r with table scale and truncate to integer */
1493             rt               = _mm256_mul_pd(r00,vftabscale);
1494             vfitab           = _mm256_cvttpd_epi32(rt);
1495             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1496             vfitab           = _mm_slli_epi32(vfitab,3);
1497
1498             /* REACTION-FIELD ELECTROSTATICS */
1499             felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
1500
1501             /* CUBIC SPLINE TABLE DISPERSION */
1502             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1503             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1504             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1505             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1506             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1507             Heps             = _mm256_mul_pd(vfeps,H);
1508             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1509             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1510             fvdw6            = _mm256_mul_pd(c6_00,FF);
1511
1512             /* CUBIC SPLINE TABLE REPULSION */
1513             vfitab           = _mm_add_epi32(vfitab,ifour);
1514             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1515             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1516             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1517             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1518             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1519             Heps             = _mm256_mul_pd(vfeps,H);
1520             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1521             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1522             fvdw12           = _mm256_mul_pd(c12_00,FF);
1523             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1524
1525             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1526
1527             fscal            = _mm256_add_pd(felec,fvdw);
1528
1529             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1530
1531             /* Calculate temporary vectorial force */
1532             tx               = _mm256_mul_pd(fscal,dx00);
1533             ty               = _mm256_mul_pd(fscal,dy00);
1534             tz               = _mm256_mul_pd(fscal,dz00);
1535
1536             /* Update vectorial force */
1537             fix0             = _mm256_add_pd(fix0,tx);
1538             fiy0             = _mm256_add_pd(fiy0,ty);
1539             fiz0             = _mm256_add_pd(fiz0,tz);
1540
1541             fjx0             = _mm256_add_pd(fjx0,tx);
1542             fjy0             = _mm256_add_pd(fjy0,ty);
1543             fjz0             = _mm256_add_pd(fjz0,tz);
1544
1545             }
1546
1547             /**************************
1548              * CALCULATE INTERACTIONS *
1549              **************************/
1550
1551             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1552             {
1553
1554             /* REACTION-FIELD ELECTROSTATICS */
1555             felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
1556
1557             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
1558
1559             fscal            = felec;
1560
1561             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1562
1563             /* Calculate temporary vectorial force */
1564             tx               = _mm256_mul_pd(fscal,dx01);
1565             ty               = _mm256_mul_pd(fscal,dy01);
1566             tz               = _mm256_mul_pd(fscal,dz01);
1567
1568             /* Update vectorial force */
1569             fix0             = _mm256_add_pd(fix0,tx);
1570             fiy0             = _mm256_add_pd(fiy0,ty);
1571             fiz0             = _mm256_add_pd(fiz0,tz);
1572
1573             fjx1             = _mm256_add_pd(fjx1,tx);
1574             fjy1             = _mm256_add_pd(fjy1,ty);
1575             fjz1             = _mm256_add_pd(fjz1,tz);
1576
1577             }
1578
1579             /**************************
1580              * CALCULATE INTERACTIONS *
1581              **************************/
1582
1583             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1584             {
1585
1586             /* REACTION-FIELD ELECTROSTATICS */
1587             felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
1588
1589             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
1590
1591             fscal            = felec;
1592
1593             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1594
1595             /* Calculate temporary vectorial force */
1596             tx               = _mm256_mul_pd(fscal,dx02);
1597             ty               = _mm256_mul_pd(fscal,dy02);
1598             tz               = _mm256_mul_pd(fscal,dz02);
1599
1600             /* Update vectorial force */
1601             fix0             = _mm256_add_pd(fix0,tx);
1602             fiy0             = _mm256_add_pd(fiy0,ty);
1603             fiz0             = _mm256_add_pd(fiz0,tz);
1604
1605             fjx2             = _mm256_add_pd(fjx2,tx);
1606             fjy2             = _mm256_add_pd(fjy2,ty);
1607             fjz2             = _mm256_add_pd(fjz2,tz);
1608
1609             }
1610
1611             /**************************
1612              * CALCULATE INTERACTIONS *
1613              **************************/
1614
1615             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1616             {
1617
1618             /* REACTION-FIELD ELECTROSTATICS */
1619             felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
1620
1621             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1622
1623             fscal            = felec;
1624
1625             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1626
1627             /* Calculate temporary vectorial force */
1628             tx               = _mm256_mul_pd(fscal,dx10);
1629             ty               = _mm256_mul_pd(fscal,dy10);
1630             tz               = _mm256_mul_pd(fscal,dz10);
1631
1632             /* Update vectorial force */
1633             fix1             = _mm256_add_pd(fix1,tx);
1634             fiy1             = _mm256_add_pd(fiy1,ty);
1635             fiz1             = _mm256_add_pd(fiz1,tz);
1636
1637             fjx0             = _mm256_add_pd(fjx0,tx);
1638             fjy0             = _mm256_add_pd(fjy0,ty);
1639             fjz0             = _mm256_add_pd(fjz0,tz);
1640
1641             }
1642
1643             /**************************
1644              * CALCULATE INTERACTIONS *
1645              **************************/
1646
1647             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1648             {
1649
1650             /* REACTION-FIELD ELECTROSTATICS */
1651             felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
1652
1653             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
1654
1655             fscal            = felec;
1656
1657             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1658
1659             /* Calculate temporary vectorial force */
1660             tx               = _mm256_mul_pd(fscal,dx11);
1661             ty               = _mm256_mul_pd(fscal,dy11);
1662             tz               = _mm256_mul_pd(fscal,dz11);
1663
1664             /* Update vectorial force */
1665             fix1             = _mm256_add_pd(fix1,tx);
1666             fiy1             = _mm256_add_pd(fiy1,ty);
1667             fiz1             = _mm256_add_pd(fiz1,tz);
1668
1669             fjx1             = _mm256_add_pd(fjx1,tx);
1670             fjy1             = _mm256_add_pd(fjy1,ty);
1671             fjz1             = _mm256_add_pd(fjz1,tz);
1672
1673             }
1674
1675             /**************************
1676              * CALCULATE INTERACTIONS *
1677              **************************/
1678
1679             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1680             {
1681
1682             /* REACTION-FIELD ELECTROSTATICS */
1683             felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
1684
1685             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
1686
1687             fscal            = felec;
1688
1689             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1690
1691             /* Calculate temporary vectorial force */
1692             tx               = _mm256_mul_pd(fscal,dx12);
1693             ty               = _mm256_mul_pd(fscal,dy12);
1694             tz               = _mm256_mul_pd(fscal,dz12);
1695
1696             /* Update vectorial force */
1697             fix1             = _mm256_add_pd(fix1,tx);
1698             fiy1             = _mm256_add_pd(fiy1,ty);
1699             fiz1             = _mm256_add_pd(fiz1,tz);
1700
1701             fjx2             = _mm256_add_pd(fjx2,tx);
1702             fjy2             = _mm256_add_pd(fjy2,ty);
1703             fjz2             = _mm256_add_pd(fjz2,tz);
1704
1705             }
1706
1707             /**************************
1708              * CALCULATE INTERACTIONS *
1709              **************************/
1710
1711             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1712             {
1713
1714             /* REACTION-FIELD ELECTROSTATICS */
1715             felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
1716
1717             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1718
1719             fscal            = felec;
1720
1721             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1722
1723             /* Calculate temporary vectorial force */
1724             tx               = _mm256_mul_pd(fscal,dx20);
1725             ty               = _mm256_mul_pd(fscal,dy20);
1726             tz               = _mm256_mul_pd(fscal,dz20);
1727
1728             /* Update vectorial force */
1729             fix2             = _mm256_add_pd(fix2,tx);
1730             fiy2             = _mm256_add_pd(fiy2,ty);
1731             fiz2             = _mm256_add_pd(fiz2,tz);
1732
1733             fjx0             = _mm256_add_pd(fjx0,tx);
1734             fjy0             = _mm256_add_pd(fjy0,ty);
1735             fjz0             = _mm256_add_pd(fjz0,tz);
1736
1737             }
1738
1739             /**************************
1740              * CALCULATE INTERACTIONS *
1741              **************************/
1742
1743             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1744             {
1745
1746             /* REACTION-FIELD ELECTROSTATICS */
1747             felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
1748
1749             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
1750
1751             fscal            = felec;
1752
1753             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1754
1755             /* Calculate temporary vectorial force */
1756             tx               = _mm256_mul_pd(fscal,dx21);
1757             ty               = _mm256_mul_pd(fscal,dy21);
1758             tz               = _mm256_mul_pd(fscal,dz21);
1759
1760             /* Update vectorial force */
1761             fix2             = _mm256_add_pd(fix2,tx);
1762             fiy2             = _mm256_add_pd(fiy2,ty);
1763             fiz2             = _mm256_add_pd(fiz2,tz);
1764
1765             fjx1             = _mm256_add_pd(fjx1,tx);
1766             fjy1             = _mm256_add_pd(fjy1,ty);
1767             fjz1             = _mm256_add_pd(fjz1,tz);
1768
1769             }
1770
1771             /**************************
1772              * CALCULATE INTERACTIONS *
1773              **************************/
1774
1775             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1776             {
1777
1778             /* REACTION-FIELD ELECTROSTATICS */
1779             felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
1780
1781             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
1782
1783             fscal            = felec;
1784
1785             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1786
1787             /* Calculate temporary vectorial force */
1788             tx               = _mm256_mul_pd(fscal,dx22);
1789             ty               = _mm256_mul_pd(fscal,dy22);
1790             tz               = _mm256_mul_pd(fscal,dz22);
1791
1792             /* Update vectorial force */
1793             fix2             = _mm256_add_pd(fix2,tx);
1794             fiy2             = _mm256_add_pd(fiy2,ty);
1795             fiz2             = _mm256_add_pd(fiz2,tz);
1796
1797             fjx2             = _mm256_add_pd(fjx2,tx);
1798             fjy2             = _mm256_add_pd(fjy2,ty);
1799             fjz2             = _mm256_add_pd(fjz2,tz);
1800
1801             }
1802
1803             fjptrA             = f+j_coord_offsetA;
1804             fjptrB             = f+j_coord_offsetB;
1805             fjptrC             = f+j_coord_offsetC;
1806             fjptrD             = f+j_coord_offsetD;
1807
1808             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1809                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1810
1811             /* Inner loop uses 297 flops */
1812         }
1813
1814         if(jidx<j_index_end)
1815         {
1816
1817             /* Get j neighbor index, and coordinate index */
1818             jnrlistA         = jjnr[jidx];
1819             jnrlistB         = jjnr[jidx+1];
1820             jnrlistC         = jjnr[jidx+2];
1821             jnrlistD         = jjnr[jidx+3];
1822             /* Sign of each element will be negative for non-real atoms.
1823              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1824              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
1825              */
1826             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
1827
1828             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
1829             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
1830             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
1831
1832             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1833             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1834             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1835             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1836             j_coord_offsetA  = DIM*jnrA;
1837             j_coord_offsetB  = DIM*jnrB;
1838             j_coord_offsetC  = DIM*jnrC;
1839             j_coord_offsetD  = DIM*jnrD;
1840
1841             /* load j atom coordinates */
1842             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1843                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1844                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1845
1846             /* Calculate displacement vector */
1847             dx00             = _mm256_sub_pd(ix0,jx0);
1848             dy00             = _mm256_sub_pd(iy0,jy0);
1849             dz00             = _mm256_sub_pd(iz0,jz0);
1850             dx01             = _mm256_sub_pd(ix0,jx1);
1851             dy01             = _mm256_sub_pd(iy0,jy1);
1852             dz01             = _mm256_sub_pd(iz0,jz1);
1853             dx02             = _mm256_sub_pd(ix0,jx2);
1854             dy02             = _mm256_sub_pd(iy0,jy2);
1855             dz02             = _mm256_sub_pd(iz0,jz2);
1856             dx10             = _mm256_sub_pd(ix1,jx0);
1857             dy10             = _mm256_sub_pd(iy1,jy0);
1858             dz10             = _mm256_sub_pd(iz1,jz0);
1859             dx11             = _mm256_sub_pd(ix1,jx1);
1860             dy11             = _mm256_sub_pd(iy1,jy1);
1861             dz11             = _mm256_sub_pd(iz1,jz1);
1862             dx12             = _mm256_sub_pd(ix1,jx2);
1863             dy12             = _mm256_sub_pd(iy1,jy2);
1864             dz12             = _mm256_sub_pd(iz1,jz2);
1865             dx20             = _mm256_sub_pd(ix2,jx0);
1866             dy20             = _mm256_sub_pd(iy2,jy0);
1867             dz20             = _mm256_sub_pd(iz2,jz0);
1868             dx21             = _mm256_sub_pd(ix2,jx1);
1869             dy21             = _mm256_sub_pd(iy2,jy1);
1870             dz21             = _mm256_sub_pd(iz2,jz1);
1871             dx22             = _mm256_sub_pd(ix2,jx2);
1872             dy22             = _mm256_sub_pd(iy2,jy2);
1873             dz22             = _mm256_sub_pd(iz2,jz2);
1874
1875             /* Calculate squared distance and things based on it */
1876             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1877             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1878             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1879             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1880             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1881             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1882             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1883             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1884             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1885
1886             rinv00           = avx256_invsqrt_d(rsq00);
1887             rinv01           = avx256_invsqrt_d(rsq01);
1888             rinv02           = avx256_invsqrt_d(rsq02);
1889             rinv10           = avx256_invsqrt_d(rsq10);
1890             rinv11           = avx256_invsqrt_d(rsq11);
1891             rinv12           = avx256_invsqrt_d(rsq12);
1892             rinv20           = avx256_invsqrt_d(rsq20);
1893             rinv21           = avx256_invsqrt_d(rsq21);
1894             rinv22           = avx256_invsqrt_d(rsq22);
1895
1896             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1897             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
1898             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
1899             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1900             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1901             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1902             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1903             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1904             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1905
1906             fjx0             = _mm256_setzero_pd();
1907             fjy0             = _mm256_setzero_pd();
1908             fjz0             = _mm256_setzero_pd();
1909             fjx1             = _mm256_setzero_pd();
1910             fjy1             = _mm256_setzero_pd();
1911             fjz1             = _mm256_setzero_pd();
1912             fjx2             = _mm256_setzero_pd();
1913             fjy2             = _mm256_setzero_pd();
1914             fjz2             = _mm256_setzero_pd();
1915
1916             /**************************
1917              * CALCULATE INTERACTIONS *
1918              **************************/
1919
1920             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1921             {
1922
1923             r00              = _mm256_mul_pd(rsq00,rinv00);
1924             r00              = _mm256_andnot_pd(dummy_mask,r00);
1925
1926             /* Calculate table index by multiplying r with table scale and truncate to integer */
1927             rt               = _mm256_mul_pd(r00,vftabscale);
1928             vfitab           = _mm256_cvttpd_epi32(rt);
1929             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1930             vfitab           = _mm_slli_epi32(vfitab,3);
1931
1932             /* REACTION-FIELD ELECTROSTATICS */
1933             felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
1934
1935             /* CUBIC SPLINE TABLE DISPERSION */
1936             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1937             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1938             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1939             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1940             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1941             Heps             = _mm256_mul_pd(vfeps,H);
1942             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1943             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1944             fvdw6            = _mm256_mul_pd(c6_00,FF);
1945
1946             /* CUBIC SPLINE TABLE REPULSION */
1947             vfitab           = _mm_add_epi32(vfitab,ifour);
1948             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1949             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1950             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1951             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1952             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1953             Heps             = _mm256_mul_pd(vfeps,H);
1954             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1955             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1956             fvdw12           = _mm256_mul_pd(c12_00,FF);
1957             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1958
1959             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1960
1961             fscal            = _mm256_add_pd(felec,fvdw);
1962
1963             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1964
1965             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1966
1967             /* Calculate temporary vectorial force */
1968             tx               = _mm256_mul_pd(fscal,dx00);
1969             ty               = _mm256_mul_pd(fscal,dy00);
1970             tz               = _mm256_mul_pd(fscal,dz00);
1971
1972             /* Update vectorial force */
1973             fix0             = _mm256_add_pd(fix0,tx);
1974             fiy0             = _mm256_add_pd(fiy0,ty);
1975             fiz0             = _mm256_add_pd(fiz0,tz);
1976
1977             fjx0             = _mm256_add_pd(fjx0,tx);
1978             fjy0             = _mm256_add_pd(fjy0,ty);
1979             fjz0             = _mm256_add_pd(fjz0,tz);
1980
1981             }
1982
1983             /**************************
1984              * CALCULATE INTERACTIONS *
1985              **************************/
1986
1987             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1988             {
1989
1990             /* REACTION-FIELD ELECTROSTATICS */
1991             felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
1992
1993             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
1994
1995             fscal            = felec;
1996
1997             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1998
1999             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2000
2001             /* Calculate temporary vectorial force */
2002             tx               = _mm256_mul_pd(fscal,dx01);
2003             ty               = _mm256_mul_pd(fscal,dy01);
2004             tz               = _mm256_mul_pd(fscal,dz01);
2005
2006             /* Update vectorial force */
2007             fix0             = _mm256_add_pd(fix0,tx);
2008             fiy0             = _mm256_add_pd(fiy0,ty);
2009             fiz0             = _mm256_add_pd(fiz0,tz);
2010
2011             fjx1             = _mm256_add_pd(fjx1,tx);
2012             fjy1             = _mm256_add_pd(fjy1,ty);
2013             fjz1             = _mm256_add_pd(fjz1,tz);
2014
2015             }
2016
2017             /**************************
2018              * CALCULATE INTERACTIONS *
2019              **************************/
2020
2021             if (gmx_mm256_any_lt(rsq02,rcutoff2))
2022             {
2023
2024             /* REACTION-FIELD ELECTROSTATICS */
2025             felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
2026
2027             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
2028
2029             fscal            = felec;
2030
2031             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2032
2033             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2034
2035             /* Calculate temporary vectorial force */
2036             tx               = _mm256_mul_pd(fscal,dx02);
2037             ty               = _mm256_mul_pd(fscal,dy02);
2038             tz               = _mm256_mul_pd(fscal,dz02);
2039
2040             /* Update vectorial force */
2041             fix0             = _mm256_add_pd(fix0,tx);
2042             fiy0             = _mm256_add_pd(fiy0,ty);
2043             fiz0             = _mm256_add_pd(fiz0,tz);
2044
2045             fjx2             = _mm256_add_pd(fjx2,tx);
2046             fjy2             = _mm256_add_pd(fjy2,ty);
2047             fjz2             = _mm256_add_pd(fjz2,tz);
2048
2049             }
2050
2051             /**************************
2052              * CALCULATE INTERACTIONS *
2053              **************************/
2054
2055             if (gmx_mm256_any_lt(rsq10,rcutoff2))
2056             {
2057
2058             /* REACTION-FIELD ELECTROSTATICS */
2059             felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
2060
2061             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
2062
2063             fscal            = felec;
2064
2065             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2066
2067             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2068
2069             /* Calculate temporary vectorial force */
2070             tx               = _mm256_mul_pd(fscal,dx10);
2071             ty               = _mm256_mul_pd(fscal,dy10);
2072             tz               = _mm256_mul_pd(fscal,dz10);
2073
2074             /* Update vectorial force */
2075             fix1             = _mm256_add_pd(fix1,tx);
2076             fiy1             = _mm256_add_pd(fiy1,ty);
2077             fiz1             = _mm256_add_pd(fiz1,tz);
2078
2079             fjx0             = _mm256_add_pd(fjx0,tx);
2080             fjy0             = _mm256_add_pd(fjy0,ty);
2081             fjz0             = _mm256_add_pd(fjz0,tz);
2082
2083             }
2084
2085             /**************************
2086              * CALCULATE INTERACTIONS *
2087              **************************/
2088
2089             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2090             {
2091
2092             /* REACTION-FIELD ELECTROSTATICS */
2093             felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
2094
2095             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
2096
2097             fscal            = felec;
2098
2099             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2100
2101             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2102
2103             /* Calculate temporary vectorial force */
2104             tx               = _mm256_mul_pd(fscal,dx11);
2105             ty               = _mm256_mul_pd(fscal,dy11);
2106             tz               = _mm256_mul_pd(fscal,dz11);
2107
2108             /* Update vectorial force */
2109             fix1             = _mm256_add_pd(fix1,tx);
2110             fiy1             = _mm256_add_pd(fiy1,ty);
2111             fiz1             = _mm256_add_pd(fiz1,tz);
2112
2113             fjx1             = _mm256_add_pd(fjx1,tx);
2114             fjy1             = _mm256_add_pd(fjy1,ty);
2115             fjz1             = _mm256_add_pd(fjz1,tz);
2116
2117             }
2118
2119             /**************************
2120              * CALCULATE INTERACTIONS *
2121              **************************/
2122
2123             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2124             {
2125
2126             /* REACTION-FIELD ELECTROSTATICS */
2127             felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
2128
2129             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2130
2131             fscal            = felec;
2132
2133             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2134
2135             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2136
2137             /* Calculate temporary vectorial force */
2138             tx               = _mm256_mul_pd(fscal,dx12);
2139             ty               = _mm256_mul_pd(fscal,dy12);
2140             tz               = _mm256_mul_pd(fscal,dz12);
2141
2142             /* Update vectorial force */
2143             fix1             = _mm256_add_pd(fix1,tx);
2144             fiy1             = _mm256_add_pd(fiy1,ty);
2145             fiz1             = _mm256_add_pd(fiz1,tz);
2146
2147             fjx2             = _mm256_add_pd(fjx2,tx);
2148             fjy2             = _mm256_add_pd(fjy2,ty);
2149             fjz2             = _mm256_add_pd(fjz2,tz);
2150
2151             }
2152
2153             /**************************
2154              * CALCULATE INTERACTIONS *
2155              **************************/
2156
2157             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2158             {
2159
2160             /* REACTION-FIELD ELECTROSTATICS */
2161             felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
2162
2163             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
2164
2165             fscal            = felec;
2166
2167             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2168
2169             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2170
2171             /* Calculate temporary vectorial force */
2172             tx               = _mm256_mul_pd(fscal,dx20);
2173             ty               = _mm256_mul_pd(fscal,dy20);
2174             tz               = _mm256_mul_pd(fscal,dz20);
2175
2176             /* Update vectorial force */
2177             fix2             = _mm256_add_pd(fix2,tx);
2178             fiy2             = _mm256_add_pd(fiy2,ty);
2179             fiz2             = _mm256_add_pd(fiz2,tz);
2180
2181             fjx0             = _mm256_add_pd(fjx0,tx);
2182             fjy0             = _mm256_add_pd(fjy0,ty);
2183             fjz0             = _mm256_add_pd(fjz0,tz);
2184
2185             }
2186
2187             /**************************
2188              * CALCULATE INTERACTIONS *
2189              **************************/
2190
2191             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2192             {
2193
2194             /* REACTION-FIELD ELECTROSTATICS */
2195             felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
2196
2197             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
2198
2199             fscal            = felec;
2200
2201             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2202
2203             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2204
2205             /* Calculate temporary vectorial force */
2206             tx               = _mm256_mul_pd(fscal,dx21);
2207             ty               = _mm256_mul_pd(fscal,dy21);
2208             tz               = _mm256_mul_pd(fscal,dz21);
2209
2210             /* Update vectorial force */
2211             fix2             = _mm256_add_pd(fix2,tx);
2212             fiy2             = _mm256_add_pd(fiy2,ty);
2213             fiz2             = _mm256_add_pd(fiz2,tz);
2214
2215             fjx1             = _mm256_add_pd(fjx1,tx);
2216             fjy1             = _mm256_add_pd(fjy1,ty);
2217             fjz1             = _mm256_add_pd(fjz1,tz);
2218
2219             }
2220
2221             /**************************
2222              * CALCULATE INTERACTIONS *
2223              **************************/
2224
2225             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2226             {
2227
2228             /* REACTION-FIELD ELECTROSTATICS */
2229             felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
2230
2231             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
2232
2233             fscal            = felec;
2234
2235             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2236
2237             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2238
2239             /* Calculate temporary vectorial force */
2240             tx               = _mm256_mul_pd(fscal,dx22);
2241             ty               = _mm256_mul_pd(fscal,dy22);
2242             tz               = _mm256_mul_pd(fscal,dz22);
2243
2244             /* Update vectorial force */
2245             fix2             = _mm256_add_pd(fix2,tx);
2246             fiy2             = _mm256_add_pd(fiy2,ty);
2247             fiz2             = _mm256_add_pd(fiz2,tz);
2248
2249             fjx2             = _mm256_add_pd(fjx2,tx);
2250             fjy2             = _mm256_add_pd(fjy2,ty);
2251             fjz2             = _mm256_add_pd(fjz2,tz);
2252
2253             }
2254
2255             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2256             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2257             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2258             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2259
2260             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
2261                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2262
2263             /* Inner loop uses 298 flops */
2264         }
2265
2266         /* End of innermost loop */
2267
2268         gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
2269                                                  f+i_coord_offset,fshift+i_shift_offset);
2270
2271         /* Increment number of inner iterations */
2272         inneriter                  += j_index_end - j_index_start;
2273
2274         /* Outer loop uses 18 flops */
2275     }
2276
2277     /* Increment number of outer iterations */
2278     outeriter        += nri;
2279
2280     /* Update outer/inner flops */
2281
2282     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*298);
2283 }