Merge release-5-0 into master
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_avx_256_double.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_double kernel generator.
37  */
38 #include "gmxpre.h"
39
40 #include "config.h"
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "gromacs/legacyheaders/types/simple.h"
46 #include "gromacs/math/vec.h"
47 #include "gromacs/legacyheaders/nrnb.h"
48
49 #include "gromacs/simd/math_x86_avx_256_double.h"
50 #include "kernelutil_x86_avx_256_double.h"
51
52 /*
53  * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_VF_avx_256_double
54  * Electrostatics interaction: ReactionField
55  * VdW interaction:            CubicSplineTable
56  * Geometry:                   Water3-Water3
57  * Calculate force/pot:        PotentialAndForce
58  */
59 void
60 nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_VF_avx_256_double
61                     (t_nblist                    * gmx_restrict       nlist,
62                      rvec                        * gmx_restrict          xx,
63                      rvec                        * gmx_restrict          ff,
64                      t_forcerec                  * gmx_restrict          fr,
65                      t_mdatoms                   * gmx_restrict     mdatoms,
66                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67                      t_nrnb                      * gmx_restrict        nrnb)
68 {
69     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
70      * just 0 for non-waters.
71      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
72      * jnr indices corresponding to data put in the four positions in the SIMD register.
73      */
74     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
75     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76     int              jnrA,jnrB,jnrC,jnrD;
77     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
78     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
79     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
80     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
81     real             rcutoff_scalar;
82     real             *shiftvec,*fshift,*x,*f;
83     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
84     real             scratch[4*DIM];
85     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86     real *           vdwioffsetptr0;
87     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
88     real *           vdwioffsetptr1;
89     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
90     real *           vdwioffsetptr2;
91     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
92     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
93     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
94     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
95     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
96     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
97     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
98     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
99     __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
100     __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
101     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
102     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
103     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
104     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
105     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
106     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
107     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
108     real             *charge;
109     int              nvdwtype;
110     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
111     int              *vdwtype;
112     real             *vdwparam;
113     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
114     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
115     __m128i          vfitab;
116     __m128i          ifour       = _mm_set1_epi32(4);
117     __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
118     real             *vftab;
119     __m256d          dummy_mask,cutoff_mask;
120     __m128           tmpmask0,tmpmask1;
121     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
122     __m256d          one     = _mm256_set1_pd(1.0);
123     __m256d          two     = _mm256_set1_pd(2.0);
124     x                = xx[0];
125     f                = ff[0];
126
127     nri              = nlist->nri;
128     iinr             = nlist->iinr;
129     jindex           = nlist->jindex;
130     jjnr             = nlist->jjnr;
131     shiftidx         = nlist->shift;
132     gid              = nlist->gid;
133     shiftvec         = fr->shift_vec[0];
134     fshift           = fr->fshift[0];
135     facel            = _mm256_set1_pd(fr->epsfac);
136     charge           = mdatoms->chargeA;
137     krf              = _mm256_set1_pd(fr->ic->k_rf);
138     krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
139     crf              = _mm256_set1_pd(fr->ic->c_rf);
140     nvdwtype         = fr->ntype;
141     vdwparam         = fr->nbfp;
142     vdwtype          = mdatoms->typeA;
143
144     vftab            = kernel_data->table_vdw->data;
145     vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
146
147     /* Setup water-specific parameters */
148     inr              = nlist->iinr[0];
149     iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
150     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
151     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
152     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
153
154     jq0              = _mm256_set1_pd(charge[inr+0]);
155     jq1              = _mm256_set1_pd(charge[inr+1]);
156     jq2              = _mm256_set1_pd(charge[inr+2]);
157     vdwjidx0A        = 2*vdwtype[inr+0];
158     qq00             = _mm256_mul_pd(iq0,jq0);
159     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
160     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
161     qq01             = _mm256_mul_pd(iq0,jq1);
162     qq02             = _mm256_mul_pd(iq0,jq2);
163     qq10             = _mm256_mul_pd(iq1,jq0);
164     qq11             = _mm256_mul_pd(iq1,jq1);
165     qq12             = _mm256_mul_pd(iq1,jq2);
166     qq20             = _mm256_mul_pd(iq2,jq0);
167     qq21             = _mm256_mul_pd(iq2,jq1);
168     qq22             = _mm256_mul_pd(iq2,jq2);
169
170     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
171     rcutoff_scalar   = fr->rcoulomb;
172     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
173     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
174
175     /* Avoid stupid compiler warnings */
176     jnrA = jnrB = jnrC = jnrD = 0;
177     j_coord_offsetA = 0;
178     j_coord_offsetB = 0;
179     j_coord_offsetC = 0;
180     j_coord_offsetD = 0;
181
182     outeriter        = 0;
183     inneriter        = 0;
184
185     for(iidx=0;iidx<4*DIM;iidx++)
186     {
187         scratch[iidx] = 0.0;
188     }
189
190     /* Start outer loop over neighborlists */
191     for(iidx=0; iidx<nri; iidx++)
192     {
193         /* Load shift vector for this list */
194         i_shift_offset   = DIM*shiftidx[iidx];
195
196         /* Load limits for loop over neighbors */
197         j_index_start    = jindex[iidx];
198         j_index_end      = jindex[iidx+1];
199
200         /* Get outer coordinate index */
201         inr              = iinr[iidx];
202         i_coord_offset   = DIM*inr;
203
204         /* Load i particle coords and add shift vector */
205         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
206                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
207
208         fix0             = _mm256_setzero_pd();
209         fiy0             = _mm256_setzero_pd();
210         fiz0             = _mm256_setzero_pd();
211         fix1             = _mm256_setzero_pd();
212         fiy1             = _mm256_setzero_pd();
213         fiz1             = _mm256_setzero_pd();
214         fix2             = _mm256_setzero_pd();
215         fiy2             = _mm256_setzero_pd();
216         fiz2             = _mm256_setzero_pd();
217
218         /* Reset potential sums */
219         velecsum         = _mm256_setzero_pd();
220         vvdwsum          = _mm256_setzero_pd();
221
222         /* Start inner kernel loop */
223         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
224         {
225
226             /* Get j neighbor index, and coordinate index */
227             jnrA             = jjnr[jidx];
228             jnrB             = jjnr[jidx+1];
229             jnrC             = jjnr[jidx+2];
230             jnrD             = jjnr[jidx+3];
231             j_coord_offsetA  = DIM*jnrA;
232             j_coord_offsetB  = DIM*jnrB;
233             j_coord_offsetC  = DIM*jnrC;
234             j_coord_offsetD  = DIM*jnrD;
235
236             /* load j atom coordinates */
237             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
238                                                  x+j_coord_offsetC,x+j_coord_offsetD,
239                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
240
241             /* Calculate displacement vector */
242             dx00             = _mm256_sub_pd(ix0,jx0);
243             dy00             = _mm256_sub_pd(iy0,jy0);
244             dz00             = _mm256_sub_pd(iz0,jz0);
245             dx01             = _mm256_sub_pd(ix0,jx1);
246             dy01             = _mm256_sub_pd(iy0,jy1);
247             dz01             = _mm256_sub_pd(iz0,jz1);
248             dx02             = _mm256_sub_pd(ix0,jx2);
249             dy02             = _mm256_sub_pd(iy0,jy2);
250             dz02             = _mm256_sub_pd(iz0,jz2);
251             dx10             = _mm256_sub_pd(ix1,jx0);
252             dy10             = _mm256_sub_pd(iy1,jy0);
253             dz10             = _mm256_sub_pd(iz1,jz0);
254             dx11             = _mm256_sub_pd(ix1,jx1);
255             dy11             = _mm256_sub_pd(iy1,jy1);
256             dz11             = _mm256_sub_pd(iz1,jz1);
257             dx12             = _mm256_sub_pd(ix1,jx2);
258             dy12             = _mm256_sub_pd(iy1,jy2);
259             dz12             = _mm256_sub_pd(iz1,jz2);
260             dx20             = _mm256_sub_pd(ix2,jx0);
261             dy20             = _mm256_sub_pd(iy2,jy0);
262             dz20             = _mm256_sub_pd(iz2,jz0);
263             dx21             = _mm256_sub_pd(ix2,jx1);
264             dy21             = _mm256_sub_pd(iy2,jy1);
265             dz21             = _mm256_sub_pd(iz2,jz1);
266             dx22             = _mm256_sub_pd(ix2,jx2);
267             dy22             = _mm256_sub_pd(iy2,jy2);
268             dz22             = _mm256_sub_pd(iz2,jz2);
269
270             /* Calculate squared distance and things based on it */
271             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
272             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
273             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
274             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
275             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
276             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
277             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
278             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
279             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
280
281             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
282             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
283             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
284             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
285             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
286             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
287             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
288             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
289             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
290
291             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
292             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
293             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
294             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
295             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
296             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
297             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
298             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
299             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
300
301             fjx0             = _mm256_setzero_pd();
302             fjy0             = _mm256_setzero_pd();
303             fjz0             = _mm256_setzero_pd();
304             fjx1             = _mm256_setzero_pd();
305             fjy1             = _mm256_setzero_pd();
306             fjz1             = _mm256_setzero_pd();
307             fjx2             = _mm256_setzero_pd();
308             fjy2             = _mm256_setzero_pd();
309             fjz2             = _mm256_setzero_pd();
310
311             /**************************
312              * CALCULATE INTERACTIONS *
313              **************************/
314
315             if (gmx_mm256_any_lt(rsq00,rcutoff2))
316             {
317
318             r00              = _mm256_mul_pd(rsq00,rinv00);
319
320             /* Calculate table index by multiplying r with table scale and truncate to integer */
321             rt               = _mm256_mul_pd(r00,vftabscale);
322             vfitab           = _mm256_cvttpd_epi32(rt);
323             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
324             vfitab           = _mm_slli_epi32(vfitab,3);
325
326             /* REACTION-FIELD ELECTROSTATICS */
327             velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
328             felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
329
330             /* CUBIC SPLINE TABLE DISPERSION */
331             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
332             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
333             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
334             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
335             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
336             Heps             = _mm256_mul_pd(vfeps,H);
337             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
338             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
339             vvdw6            = _mm256_mul_pd(c6_00,VV);
340             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
341             fvdw6            = _mm256_mul_pd(c6_00,FF);
342
343             /* CUBIC SPLINE TABLE REPULSION */
344             vfitab           = _mm_add_epi32(vfitab,ifour);
345             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
346             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
347             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
348             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
349             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
350             Heps             = _mm256_mul_pd(vfeps,H);
351             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
352             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
353             vvdw12           = _mm256_mul_pd(c12_00,VV);
354             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
355             fvdw12           = _mm256_mul_pd(c12_00,FF);
356             vvdw             = _mm256_add_pd(vvdw12,vvdw6);
357             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
358
359             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
360
361             /* Update potential sum for this i atom from the interaction with this j atom. */
362             velec            = _mm256_and_pd(velec,cutoff_mask);
363             velecsum         = _mm256_add_pd(velecsum,velec);
364             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
365             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
366
367             fscal            = _mm256_add_pd(felec,fvdw);
368
369             fscal            = _mm256_and_pd(fscal,cutoff_mask);
370
371             /* Calculate temporary vectorial force */
372             tx               = _mm256_mul_pd(fscal,dx00);
373             ty               = _mm256_mul_pd(fscal,dy00);
374             tz               = _mm256_mul_pd(fscal,dz00);
375
376             /* Update vectorial force */
377             fix0             = _mm256_add_pd(fix0,tx);
378             fiy0             = _mm256_add_pd(fiy0,ty);
379             fiz0             = _mm256_add_pd(fiz0,tz);
380
381             fjx0             = _mm256_add_pd(fjx0,tx);
382             fjy0             = _mm256_add_pd(fjy0,ty);
383             fjz0             = _mm256_add_pd(fjz0,tz);
384
385             }
386
387             /**************************
388              * CALCULATE INTERACTIONS *
389              **************************/
390
391             if (gmx_mm256_any_lt(rsq01,rcutoff2))
392             {
393
394             /* REACTION-FIELD ELECTROSTATICS */
395             velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
396             felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
397
398             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
399
400             /* Update potential sum for this i atom from the interaction with this j atom. */
401             velec            = _mm256_and_pd(velec,cutoff_mask);
402             velecsum         = _mm256_add_pd(velecsum,velec);
403
404             fscal            = felec;
405
406             fscal            = _mm256_and_pd(fscal,cutoff_mask);
407
408             /* Calculate temporary vectorial force */
409             tx               = _mm256_mul_pd(fscal,dx01);
410             ty               = _mm256_mul_pd(fscal,dy01);
411             tz               = _mm256_mul_pd(fscal,dz01);
412
413             /* Update vectorial force */
414             fix0             = _mm256_add_pd(fix0,tx);
415             fiy0             = _mm256_add_pd(fiy0,ty);
416             fiz0             = _mm256_add_pd(fiz0,tz);
417
418             fjx1             = _mm256_add_pd(fjx1,tx);
419             fjy1             = _mm256_add_pd(fjy1,ty);
420             fjz1             = _mm256_add_pd(fjz1,tz);
421
422             }
423
424             /**************************
425              * CALCULATE INTERACTIONS *
426              **************************/
427
428             if (gmx_mm256_any_lt(rsq02,rcutoff2))
429             {
430
431             /* REACTION-FIELD ELECTROSTATICS */
432             velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
433             felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
434
435             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
436
437             /* Update potential sum for this i atom from the interaction with this j atom. */
438             velec            = _mm256_and_pd(velec,cutoff_mask);
439             velecsum         = _mm256_add_pd(velecsum,velec);
440
441             fscal            = felec;
442
443             fscal            = _mm256_and_pd(fscal,cutoff_mask);
444
445             /* Calculate temporary vectorial force */
446             tx               = _mm256_mul_pd(fscal,dx02);
447             ty               = _mm256_mul_pd(fscal,dy02);
448             tz               = _mm256_mul_pd(fscal,dz02);
449
450             /* Update vectorial force */
451             fix0             = _mm256_add_pd(fix0,tx);
452             fiy0             = _mm256_add_pd(fiy0,ty);
453             fiz0             = _mm256_add_pd(fiz0,tz);
454
455             fjx2             = _mm256_add_pd(fjx2,tx);
456             fjy2             = _mm256_add_pd(fjy2,ty);
457             fjz2             = _mm256_add_pd(fjz2,tz);
458
459             }
460
461             /**************************
462              * CALCULATE INTERACTIONS *
463              **************************/
464
465             if (gmx_mm256_any_lt(rsq10,rcutoff2))
466             {
467
468             /* REACTION-FIELD ELECTROSTATICS */
469             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
470             felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
471
472             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
473
474             /* Update potential sum for this i atom from the interaction with this j atom. */
475             velec            = _mm256_and_pd(velec,cutoff_mask);
476             velecsum         = _mm256_add_pd(velecsum,velec);
477
478             fscal            = felec;
479
480             fscal            = _mm256_and_pd(fscal,cutoff_mask);
481
482             /* Calculate temporary vectorial force */
483             tx               = _mm256_mul_pd(fscal,dx10);
484             ty               = _mm256_mul_pd(fscal,dy10);
485             tz               = _mm256_mul_pd(fscal,dz10);
486
487             /* Update vectorial force */
488             fix1             = _mm256_add_pd(fix1,tx);
489             fiy1             = _mm256_add_pd(fiy1,ty);
490             fiz1             = _mm256_add_pd(fiz1,tz);
491
492             fjx0             = _mm256_add_pd(fjx0,tx);
493             fjy0             = _mm256_add_pd(fjy0,ty);
494             fjz0             = _mm256_add_pd(fjz0,tz);
495
496             }
497
498             /**************************
499              * CALCULATE INTERACTIONS *
500              **************************/
501
502             if (gmx_mm256_any_lt(rsq11,rcutoff2))
503             {
504
505             /* REACTION-FIELD ELECTROSTATICS */
506             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
507             felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
508
509             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
510
511             /* Update potential sum for this i atom from the interaction with this j atom. */
512             velec            = _mm256_and_pd(velec,cutoff_mask);
513             velecsum         = _mm256_add_pd(velecsum,velec);
514
515             fscal            = felec;
516
517             fscal            = _mm256_and_pd(fscal,cutoff_mask);
518
519             /* Calculate temporary vectorial force */
520             tx               = _mm256_mul_pd(fscal,dx11);
521             ty               = _mm256_mul_pd(fscal,dy11);
522             tz               = _mm256_mul_pd(fscal,dz11);
523
524             /* Update vectorial force */
525             fix1             = _mm256_add_pd(fix1,tx);
526             fiy1             = _mm256_add_pd(fiy1,ty);
527             fiz1             = _mm256_add_pd(fiz1,tz);
528
529             fjx1             = _mm256_add_pd(fjx1,tx);
530             fjy1             = _mm256_add_pd(fjy1,ty);
531             fjz1             = _mm256_add_pd(fjz1,tz);
532
533             }
534
535             /**************************
536              * CALCULATE INTERACTIONS *
537              **************************/
538
539             if (gmx_mm256_any_lt(rsq12,rcutoff2))
540             {
541
542             /* REACTION-FIELD ELECTROSTATICS */
543             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
544             felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
545
546             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
547
548             /* Update potential sum for this i atom from the interaction with this j atom. */
549             velec            = _mm256_and_pd(velec,cutoff_mask);
550             velecsum         = _mm256_add_pd(velecsum,velec);
551
552             fscal            = felec;
553
554             fscal            = _mm256_and_pd(fscal,cutoff_mask);
555
556             /* Calculate temporary vectorial force */
557             tx               = _mm256_mul_pd(fscal,dx12);
558             ty               = _mm256_mul_pd(fscal,dy12);
559             tz               = _mm256_mul_pd(fscal,dz12);
560
561             /* Update vectorial force */
562             fix1             = _mm256_add_pd(fix1,tx);
563             fiy1             = _mm256_add_pd(fiy1,ty);
564             fiz1             = _mm256_add_pd(fiz1,tz);
565
566             fjx2             = _mm256_add_pd(fjx2,tx);
567             fjy2             = _mm256_add_pd(fjy2,ty);
568             fjz2             = _mm256_add_pd(fjz2,tz);
569
570             }
571
572             /**************************
573              * CALCULATE INTERACTIONS *
574              **************************/
575
576             if (gmx_mm256_any_lt(rsq20,rcutoff2))
577             {
578
579             /* REACTION-FIELD ELECTROSTATICS */
580             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
581             felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
582
583             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
584
585             /* Update potential sum for this i atom from the interaction with this j atom. */
586             velec            = _mm256_and_pd(velec,cutoff_mask);
587             velecsum         = _mm256_add_pd(velecsum,velec);
588
589             fscal            = felec;
590
591             fscal            = _mm256_and_pd(fscal,cutoff_mask);
592
593             /* Calculate temporary vectorial force */
594             tx               = _mm256_mul_pd(fscal,dx20);
595             ty               = _mm256_mul_pd(fscal,dy20);
596             tz               = _mm256_mul_pd(fscal,dz20);
597
598             /* Update vectorial force */
599             fix2             = _mm256_add_pd(fix2,tx);
600             fiy2             = _mm256_add_pd(fiy2,ty);
601             fiz2             = _mm256_add_pd(fiz2,tz);
602
603             fjx0             = _mm256_add_pd(fjx0,tx);
604             fjy0             = _mm256_add_pd(fjy0,ty);
605             fjz0             = _mm256_add_pd(fjz0,tz);
606
607             }
608
609             /**************************
610              * CALCULATE INTERACTIONS *
611              **************************/
612
613             if (gmx_mm256_any_lt(rsq21,rcutoff2))
614             {
615
616             /* REACTION-FIELD ELECTROSTATICS */
617             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
618             felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
619
620             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
621
622             /* Update potential sum for this i atom from the interaction with this j atom. */
623             velec            = _mm256_and_pd(velec,cutoff_mask);
624             velecsum         = _mm256_add_pd(velecsum,velec);
625
626             fscal            = felec;
627
628             fscal            = _mm256_and_pd(fscal,cutoff_mask);
629
630             /* Calculate temporary vectorial force */
631             tx               = _mm256_mul_pd(fscal,dx21);
632             ty               = _mm256_mul_pd(fscal,dy21);
633             tz               = _mm256_mul_pd(fscal,dz21);
634
635             /* Update vectorial force */
636             fix2             = _mm256_add_pd(fix2,tx);
637             fiy2             = _mm256_add_pd(fiy2,ty);
638             fiz2             = _mm256_add_pd(fiz2,tz);
639
640             fjx1             = _mm256_add_pd(fjx1,tx);
641             fjy1             = _mm256_add_pd(fjy1,ty);
642             fjz1             = _mm256_add_pd(fjz1,tz);
643
644             }
645
646             /**************************
647              * CALCULATE INTERACTIONS *
648              **************************/
649
650             if (gmx_mm256_any_lt(rsq22,rcutoff2))
651             {
652
653             /* REACTION-FIELD ELECTROSTATICS */
654             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
655             felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
656
657             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
658
659             /* Update potential sum for this i atom from the interaction with this j atom. */
660             velec            = _mm256_and_pd(velec,cutoff_mask);
661             velecsum         = _mm256_add_pd(velecsum,velec);
662
663             fscal            = felec;
664
665             fscal            = _mm256_and_pd(fscal,cutoff_mask);
666
667             /* Calculate temporary vectorial force */
668             tx               = _mm256_mul_pd(fscal,dx22);
669             ty               = _mm256_mul_pd(fscal,dy22);
670             tz               = _mm256_mul_pd(fscal,dz22);
671
672             /* Update vectorial force */
673             fix2             = _mm256_add_pd(fix2,tx);
674             fiy2             = _mm256_add_pd(fiy2,ty);
675             fiz2             = _mm256_add_pd(fiz2,tz);
676
677             fjx2             = _mm256_add_pd(fjx2,tx);
678             fjy2             = _mm256_add_pd(fjy2,ty);
679             fjz2             = _mm256_add_pd(fjz2,tz);
680
681             }
682
683             fjptrA             = f+j_coord_offsetA;
684             fjptrB             = f+j_coord_offsetB;
685             fjptrC             = f+j_coord_offsetC;
686             fjptrD             = f+j_coord_offsetD;
687
688             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
689                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
690
691             /* Inner loop uses 360 flops */
692         }
693
694         if(jidx<j_index_end)
695         {
696
697             /* Get j neighbor index, and coordinate index */
698             jnrlistA         = jjnr[jidx];
699             jnrlistB         = jjnr[jidx+1];
700             jnrlistC         = jjnr[jidx+2];
701             jnrlistD         = jjnr[jidx+3];
702             /* Sign of each element will be negative for non-real atoms.
703              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
704              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
705              */
706             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
707
708             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
709             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
710             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
711
712             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
713             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
714             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
715             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
716             j_coord_offsetA  = DIM*jnrA;
717             j_coord_offsetB  = DIM*jnrB;
718             j_coord_offsetC  = DIM*jnrC;
719             j_coord_offsetD  = DIM*jnrD;
720
721             /* load j atom coordinates */
722             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
723                                                  x+j_coord_offsetC,x+j_coord_offsetD,
724                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
725
726             /* Calculate displacement vector */
727             dx00             = _mm256_sub_pd(ix0,jx0);
728             dy00             = _mm256_sub_pd(iy0,jy0);
729             dz00             = _mm256_sub_pd(iz0,jz0);
730             dx01             = _mm256_sub_pd(ix0,jx1);
731             dy01             = _mm256_sub_pd(iy0,jy1);
732             dz01             = _mm256_sub_pd(iz0,jz1);
733             dx02             = _mm256_sub_pd(ix0,jx2);
734             dy02             = _mm256_sub_pd(iy0,jy2);
735             dz02             = _mm256_sub_pd(iz0,jz2);
736             dx10             = _mm256_sub_pd(ix1,jx0);
737             dy10             = _mm256_sub_pd(iy1,jy0);
738             dz10             = _mm256_sub_pd(iz1,jz0);
739             dx11             = _mm256_sub_pd(ix1,jx1);
740             dy11             = _mm256_sub_pd(iy1,jy1);
741             dz11             = _mm256_sub_pd(iz1,jz1);
742             dx12             = _mm256_sub_pd(ix1,jx2);
743             dy12             = _mm256_sub_pd(iy1,jy2);
744             dz12             = _mm256_sub_pd(iz1,jz2);
745             dx20             = _mm256_sub_pd(ix2,jx0);
746             dy20             = _mm256_sub_pd(iy2,jy0);
747             dz20             = _mm256_sub_pd(iz2,jz0);
748             dx21             = _mm256_sub_pd(ix2,jx1);
749             dy21             = _mm256_sub_pd(iy2,jy1);
750             dz21             = _mm256_sub_pd(iz2,jz1);
751             dx22             = _mm256_sub_pd(ix2,jx2);
752             dy22             = _mm256_sub_pd(iy2,jy2);
753             dz22             = _mm256_sub_pd(iz2,jz2);
754
755             /* Calculate squared distance and things based on it */
756             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
757             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
758             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
759             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
760             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
761             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
762             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
763             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
764             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
765
766             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
767             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
768             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
769             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
770             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
771             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
772             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
773             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
774             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
775
776             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
777             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
778             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
779             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
780             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
781             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
782             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
783             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
784             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
785
786             fjx0             = _mm256_setzero_pd();
787             fjy0             = _mm256_setzero_pd();
788             fjz0             = _mm256_setzero_pd();
789             fjx1             = _mm256_setzero_pd();
790             fjy1             = _mm256_setzero_pd();
791             fjz1             = _mm256_setzero_pd();
792             fjx2             = _mm256_setzero_pd();
793             fjy2             = _mm256_setzero_pd();
794             fjz2             = _mm256_setzero_pd();
795
796             /**************************
797              * CALCULATE INTERACTIONS *
798              **************************/
799
800             if (gmx_mm256_any_lt(rsq00,rcutoff2))
801             {
802
803             r00              = _mm256_mul_pd(rsq00,rinv00);
804             r00              = _mm256_andnot_pd(dummy_mask,r00);
805
806             /* Calculate table index by multiplying r with table scale and truncate to integer */
807             rt               = _mm256_mul_pd(r00,vftabscale);
808             vfitab           = _mm256_cvttpd_epi32(rt);
809             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
810             vfitab           = _mm_slli_epi32(vfitab,3);
811
812             /* REACTION-FIELD ELECTROSTATICS */
813             velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
814             felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
815
816             /* CUBIC SPLINE TABLE DISPERSION */
817             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
818             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
819             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
820             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
821             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
822             Heps             = _mm256_mul_pd(vfeps,H);
823             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
824             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
825             vvdw6            = _mm256_mul_pd(c6_00,VV);
826             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
827             fvdw6            = _mm256_mul_pd(c6_00,FF);
828
829             /* CUBIC SPLINE TABLE REPULSION */
830             vfitab           = _mm_add_epi32(vfitab,ifour);
831             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
832             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
833             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
834             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
835             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
836             Heps             = _mm256_mul_pd(vfeps,H);
837             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
838             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
839             vvdw12           = _mm256_mul_pd(c12_00,VV);
840             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
841             fvdw12           = _mm256_mul_pd(c12_00,FF);
842             vvdw             = _mm256_add_pd(vvdw12,vvdw6);
843             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
844
845             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
846
847             /* Update potential sum for this i atom from the interaction with this j atom. */
848             velec            = _mm256_and_pd(velec,cutoff_mask);
849             velec            = _mm256_andnot_pd(dummy_mask,velec);
850             velecsum         = _mm256_add_pd(velecsum,velec);
851             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
852             vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
853             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
854
855             fscal            = _mm256_add_pd(felec,fvdw);
856
857             fscal            = _mm256_and_pd(fscal,cutoff_mask);
858
859             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
860
861             /* Calculate temporary vectorial force */
862             tx               = _mm256_mul_pd(fscal,dx00);
863             ty               = _mm256_mul_pd(fscal,dy00);
864             tz               = _mm256_mul_pd(fscal,dz00);
865
866             /* Update vectorial force */
867             fix0             = _mm256_add_pd(fix0,tx);
868             fiy0             = _mm256_add_pd(fiy0,ty);
869             fiz0             = _mm256_add_pd(fiz0,tz);
870
871             fjx0             = _mm256_add_pd(fjx0,tx);
872             fjy0             = _mm256_add_pd(fjy0,ty);
873             fjz0             = _mm256_add_pd(fjz0,tz);
874
875             }
876
877             /**************************
878              * CALCULATE INTERACTIONS *
879              **************************/
880
881             if (gmx_mm256_any_lt(rsq01,rcutoff2))
882             {
883
884             /* REACTION-FIELD ELECTROSTATICS */
885             velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
886             felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
887
888             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
889
890             /* Update potential sum for this i atom from the interaction with this j atom. */
891             velec            = _mm256_and_pd(velec,cutoff_mask);
892             velec            = _mm256_andnot_pd(dummy_mask,velec);
893             velecsum         = _mm256_add_pd(velecsum,velec);
894
895             fscal            = felec;
896
897             fscal            = _mm256_and_pd(fscal,cutoff_mask);
898
899             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
900
901             /* Calculate temporary vectorial force */
902             tx               = _mm256_mul_pd(fscal,dx01);
903             ty               = _mm256_mul_pd(fscal,dy01);
904             tz               = _mm256_mul_pd(fscal,dz01);
905
906             /* Update vectorial force */
907             fix0             = _mm256_add_pd(fix0,tx);
908             fiy0             = _mm256_add_pd(fiy0,ty);
909             fiz0             = _mm256_add_pd(fiz0,tz);
910
911             fjx1             = _mm256_add_pd(fjx1,tx);
912             fjy1             = _mm256_add_pd(fjy1,ty);
913             fjz1             = _mm256_add_pd(fjz1,tz);
914
915             }
916
917             /**************************
918              * CALCULATE INTERACTIONS *
919              **************************/
920
921             if (gmx_mm256_any_lt(rsq02,rcutoff2))
922             {
923
924             /* REACTION-FIELD ELECTROSTATICS */
925             velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
926             felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
927
928             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
929
930             /* Update potential sum for this i atom from the interaction with this j atom. */
931             velec            = _mm256_and_pd(velec,cutoff_mask);
932             velec            = _mm256_andnot_pd(dummy_mask,velec);
933             velecsum         = _mm256_add_pd(velecsum,velec);
934
935             fscal            = felec;
936
937             fscal            = _mm256_and_pd(fscal,cutoff_mask);
938
939             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
940
941             /* Calculate temporary vectorial force */
942             tx               = _mm256_mul_pd(fscal,dx02);
943             ty               = _mm256_mul_pd(fscal,dy02);
944             tz               = _mm256_mul_pd(fscal,dz02);
945
946             /* Update vectorial force */
947             fix0             = _mm256_add_pd(fix0,tx);
948             fiy0             = _mm256_add_pd(fiy0,ty);
949             fiz0             = _mm256_add_pd(fiz0,tz);
950
951             fjx2             = _mm256_add_pd(fjx2,tx);
952             fjy2             = _mm256_add_pd(fjy2,ty);
953             fjz2             = _mm256_add_pd(fjz2,tz);
954
955             }
956
957             /**************************
958              * CALCULATE INTERACTIONS *
959              **************************/
960
961             if (gmx_mm256_any_lt(rsq10,rcutoff2))
962             {
963
964             /* REACTION-FIELD ELECTROSTATICS */
965             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
966             felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
967
968             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
969
970             /* Update potential sum for this i atom from the interaction with this j atom. */
971             velec            = _mm256_and_pd(velec,cutoff_mask);
972             velec            = _mm256_andnot_pd(dummy_mask,velec);
973             velecsum         = _mm256_add_pd(velecsum,velec);
974
975             fscal            = felec;
976
977             fscal            = _mm256_and_pd(fscal,cutoff_mask);
978
979             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
980
981             /* Calculate temporary vectorial force */
982             tx               = _mm256_mul_pd(fscal,dx10);
983             ty               = _mm256_mul_pd(fscal,dy10);
984             tz               = _mm256_mul_pd(fscal,dz10);
985
986             /* Update vectorial force */
987             fix1             = _mm256_add_pd(fix1,tx);
988             fiy1             = _mm256_add_pd(fiy1,ty);
989             fiz1             = _mm256_add_pd(fiz1,tz);
990
991             fjx0             = _mm256_add_pd(fjx0,tx);
992             fjy0             = _mm256_add_pd(fjy0,ty);
993             fjz0             = _mm256_add_pd(fjz0,tz);
994
995             }
996
997             /**************************
998              * CALCULATE INTERACTIONS *
999              **************************/
1000
1001             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1002             {
1003
1004             /* REACTION-FIELD ELECTROSTATICS */
1005             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
1006             felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
1007
1008             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
1009
1010             /* Update potential sum for this i atom from the interaction with this j atom. */
1011             velec            = _mm256_and_pd(velec,cutoff_mask);
1012             velec            = _mm256_andnot_pd(dummy_mask,velec);
1013             velecsum         = _mm256_add_pd(velecsum,velec);
1014
1015             fscal            = felec;
1016
1017             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1018
1019             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1020
1021             /* Calculate temporary vectorial force */
1022             tx               = _mm256_mul_pd(fscal,dx11);
1023             ty               = _mm256_mul_pd(fscal,dy11);
1024             tz               = _mm256_mul_pd(fscal,dz11);
1025
1026             /* Update vectorial force */
1027             fix1             = _mm256_add_pd(fix1,tx);
1028             fiy1             = _mm256_add_pd(fiy1,ty);
1029             fiz1             = _mm256_add_pd(fiz1,tz);
1030
1031             fjx1             = _mm256_add_pd(fjx1,tx);
1032             fjy1             = _mm256_add_pd(fjy1,ty);
1033             fjz1             = _mm256_add_pd(fjz1,tz);
1034
1035             }
1036
1037             /**************************
1038              * CALCULATE INTERACTIONS *
1039              **************************/
1040
1041             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1042             {
1043
1044             /* REACTION-FIELD ELECTROSTATICS */
1045             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
1046             felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
1047
1048             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
1049
1050             /* Update potential sum for this i atom from the interaction with this j atom. */
1051             velec            = _mm256_and_pd(velec,cutoff_mask);
1052             velec            = _mm256_andnot_pd(dummy_mask,velec);
1053             velecsum         = _mm256_add_pd(velecsum,velec);
1054
1055             fscal            = felec;
1056
1057             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1058
1059             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1060
1061             /* Calculate temporary vectorial force */
1062             tx               = _mm256_mul_pd(fscal,dx12);
1063             ty               = _mm256_mul_pd(fscal,dy12);
1064             tz               = _mm256_mul_pd(fscal,dz12);
1065
1066             /* Update vectorial force */
1067             fix1             = _mm256_add_pd(fix1,tx);
1068             fiy1             = _mm256_add_pd(fiy1,ty);
1069             fiz1             = _mm256_add_pd(fiz1,tz);
1070
1071             fjx2             = _mm256_add_pd(fjx2,tx);
1072             fjy2             = _mm256_add_pd(fjy2,ty);
1073             fjz2             = _mm256_add_pd(fjz2,tz);
1074
1075             }
1076
1077             /**************************
1078              * CALCULATE INTERACTIONS *
1079              **************************/
1080
1081             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1082             {
1083
1084             /* REACTION-FIELD ELECTROSTATICS */
1085             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
1086             felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
1087
1088             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1089
1090             /* Update potential sum for this i atom from the interaction with this j atom. */
1091             velec            = _mm256_and_pd(velec,cutoff_mask);
1092             velec            = _mm256_andnot_pd(dummy_mask,velec);
1093             velecsum         = _mm256_add_pd(velecsum,velec);
1094
1095             fscal            = felec;
1096
1097             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1098
1099             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1100
1101             /* Calculate temporary vectorial force */
1102             tx               = _mm256_mul_pd(fscal,dx20);
1103             ty               = _mm256_mul_pd(fscal,dy20);
1104             tz               = _mm256_mul_pd(fscal,dz20);
1105
1106             /* Update vectorial force */
1107             fix2             = _mm256_add_pd(fix2,tx);
1108             fiy2             = _mm256_add_pd(fiy2,ty);
1109             fiz2             = _mm256_add_pd(fiz2,tz);
1110
1111             fjx0             = _mm256_add_pd(fjx0,tx);
1112             fjy0             = _mm256_add_pd(fjy0,ty);
1113             fjz0             = _mm256_add_pd(fjz0,tz);
1114
1115             }
1116
1117             /**************************
1118              * CALCULATE INTERACTIONS *
1119              **************************/
1120
1121             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1122             {
1123
1124             /* REACTION-FIELD ELECTROSTATICS */
1125             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
1126             felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
1127
1128             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
1129
1130             /* Update potential sum for this i atom from the interaction with this j atom. */
1131             velec            = _mm256_and_pd(velec,cutoff_mask);
1132             velec            = _mm256_andnot_pd(dummy_mask,velec);
1133             velecsum         = _mm256_add_pd(velecsum,velec);
1134
1135             fscal            = felec;
1136
1137             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1138
1139             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1140
1141             /* Calculate temporary vectorial force */
1142             tx               = _mm256_mul_pd(fscal,dx21);
1143             ty               = _mm256_mul_pd(fscal,dy21);
1144             tz               = _mm256_mul_pd(fscal,dz21);
1145
1146             /* Update vectorial force */
1147             fix2             = _mm256_add_pd(fix2,tx);
1148             fiy2             = _mm256_add_pd(fiy2,ty);
1149             fiz2             = _mm256_add_pd(fiz2,tz);
1150
1151             fjx1             = _mm256_add_pd(fjx1,tx);
1152             fjy1             = _mm256_add_pd(fjy1,ty);
1153             fjz1             = _mm256_add_pd(fjz1,tz);
1154
1155             }
1156
1157             /**************************
1158              * CALCULATE INTERACTIONS *
1159              **************************/
1160
1161             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1162             {
1163
1164             /* REACTION-FIELD ELECTROSTATICS */
1165             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
1166             felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
1167
1168             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
1169
1170             /* Update potential sum for this i atom from the interaction with this j atom. */
1171             velec            = _mm256_and_pd(velec,cutoff_mask);
1172             velec            = _mm256_andnot_pd(dummy_mask,velec);
1173             velecsum         = _mm256_add_pd(velecsum,velec);
1174
1175             fscal            = felec;
1176
1177             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1178
1179             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1180
1181             /* Calculate temporary vectorial force */
1182             tx               = _mm256_mul_pd(fscal,dx22);
1183             ty               = _mm256_mul_pd(fscal,dy22);
1184             tz               = _mm256_mul_pd(fscal,dz22);
1185
1186             /* Update vectorial force */
1187             fix2             = _mm256_add_pd(fix2,tx);
1188             fiy2             = _mm256_add_pd(fiy2,ty);
1189             fiz2             = _mm256_add_pd(fiz2,tz);
1190
1191             fjx2             = _mm256_add_pd(fjx2,tx);
1192             fjy2             = _mm256_add_pd(fjy2,ty);
1193             fjz2             = _mm256_add_pd(fjz2,tz);
1194
1195             }
1196
1197             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1198             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1199             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1200             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1201
1202             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1203                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1204
1205             /* Inner loop uses 361 flops */
1206         }
1207
1208         /* End of innermost loop */
1209
1210         gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1211                                                  f+i_coord_offset,fshift+i_shift_offset);
1212
1213         ggid                        = gid[iidx];
1214         /* Update potential energies */
1215         gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1216         gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
1217
1218         /* Increment number of inner iterations */
1219         inneriter                  += j_index_end - j_index_start;
1220
1221         /* Outer loop uses 20 flops */
1222     }
1223
1224     /* Increment number of outer iterations */
1225     outeriter        += nri;
1226
1227     /* Update outer/inner flops */
1228
1229     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*361);
1230 }
1231 /*
1232  * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_F_avx_256_double
1233  * Electrostatics interaction: ReactionField
1234  * VdW interaction:            CubicSplineTable
1235  * Geometry:                   Water3-Water3
1236  * Calculate force/pot:        Force
1237  */
1238 void
1239 nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_F_avx_256_double
1240                     (t_nblist                    * gmx_restrict       nlist,
1241                      rvec                        * gmx_restrict          xx,
1242                      rvec                        * gmx_restrict          ff,
1243                      t_forcerec                  * gmx_restrict          fr,
1244                      t_mdatoms                   * gmx_restrict     mdatoms,
1245                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1246                      t_nrnb                      * gmx_restrict        nrnb)
1247 {
1248     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1249      * just 0 for non-waters.
1250      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1251      * jnr indices corresponding to data put in the four positions in the SIMD register.
1252      */
1253     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1254     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1255     int              jnrA,jnrB,jnrC,jnrD;
1256     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1257     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1258     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1259     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1260     real             rcutoff_scalar;
1261     real             *shiftvec,*fshift,*x,*f;
1262     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1263     real             scratch[4*DIM];
1264     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1265     real *           vdwioffsetptr0;
1266     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1267     real *           vdwioffsetptr1;
1268     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1269     real *           vdwioffsetptr2;
1270     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1271     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
1272     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1273     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1274     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1275     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1276     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1277     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1278     __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1279     __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1280     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1281     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1282     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1283     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1284     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1285     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1286     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
1287     real             *charge;
1288     int              nvdwtype;
1289     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1290     int              *vdwtype;
1291     real             *vdwparam;
1292     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
1293     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
1294     __m128i          vfitab;
1295     __m128i          ifour       = _mm_set1_epi32(4);
1296     __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
1297     real             *vftab;
1298     __m256d          dummy_mask,cutoff_mask;
1299     __m128           tmpmask0,tmpmask1;
1300     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1301     __m256d          one     = _mm256_set1_pd(1.0);
1302     __m256d          two     = _mm256_set1_pd(2.0);
1303     x                = xx[0];
1304     f                = ff[0];
1305
1306     nri              = nlist->nri;
1307     iinr             = nlist->iinr;
1308     jindex           = nlist->jindex;
1309     jjnr             = nlist->jjnr;
1310     shiftidx         = nlist->shift;
1311     gid              = nlist->gid;
1312     shiftvec         = fr->shift_vec[0];
1313     fshift           = fr->fshift[0];
1314     facel            = _mm256_set1_pd(fr->epsfac);
1315     charge           = mdatoms->chargeA;
1316     krf              = _mm256_set1_pd(fr->ic->k_rf);
1317     krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
1318     crf              = _mm256_set1_pd(fr->ic->c_rf);
1319     nvdwtype         = fr->ntype;
1320     vdwparam         = fr->nbfp;
1321     vdwtype          = mdatoms->typeA;
1322
1323     vftab            = kernel_data->table_vdw->data;
1324     vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
1325
1326     /* Setup water-specific parameters */
1327     inr              = nlist->iinr[0];
1328     iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
1329     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1330     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1331     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1332
1333     jq0              = _mm256_set1_pd(charge[inr+0]);
1334     jq1              = _mm256_set1_pd(charge[inr+1]);
1335     jq2              = _mm256_set1_pd(charge[inr+2]);
1336     vdwjidx0A        = 2*vdwtype[inr+0];
1337     qq00             = _mm256_mul_pd(iq0,jq0);
1338     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
1339     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
1340     qq01             = _mm256_mul_pd(iq0,jq1);
1341     qq02             = _mm256_mul_pd(iq0,jq2);
1342     qq10             = _mm256_mul_pd(iq1,jq0);
1343     qq11             = _mm256_mul_pd(iq1,jq1);
1344     qq12             = _mm256_mul_pd(iq1,jq2);
1345     qq20             = _mm256_mul_pd(iq2,jq0);
1346     qq21             = _mm256_mul_pd(iq2,jq1);
1347     qq22             = _mm256_mul_pd(iq2,jq2);
1348
1349     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1350     rcutoff_scalar   = fr->rcoulomb;
1351     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
1352     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
1353
1354     /* Avoid stupid compiler warnings */
1355     jnrA = jnrB = jnrC = jnrD = 0;
1356     j_coord_offsetA = 0;
1357     j_coord_offsetB = 0;
1358     j_coord_offsetC = 0;
1359     j_coord_offsetD = 0;
1360
1361     outeriter        = 0;
1362     inneriter        = 0;
1363
1364     for(iidx=0;iidx<4*DIM;iidx++)
1365     {
1366         scratch[iidx] = 0.0;
1367     }
1368
1369     /* Start outer loop over neighborlists */
1370     for(iidx=0; iidx<nri; iidx++)
1371     {
1372         /* Load shift vector for this list */
1373         i_shift_offset   = DIM*shiftidx[iidx];
1374
1375         /* Load limits for loop over neighbors */
1376         j_index_start    = jindex[iidx];
1377         j_index_end      = jindex[iidx+1];
1378
1379         /* Get outer coordinate index */
1380         inr              = iinr[iidx];
1381         i_coord_offset   = DIM*inr;
1382
1383         /* Load i particle coords and add shift vector */
1384         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1385                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1386
1387         fix0             = _mm256_setzero_pd();
1388         fiy0             = _mm256_setzero_pd();
1389         fiz0             = _mm256_setzero_pd();
1390         fix1             = _mm256_setzero_pd();
1391         fiy1             = _mm256_setzero_pd();
1392         fiz1             = _mm256_setzero_pd();
1393         fix2             = _mm256_setzero_pd();
1394         fiy2             = _mm256_setzero_pd();
1395         fiz2             = _mm256_setzero_pd();
1396
1397         /* Start inner kernel loop */
1398         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1399         {
1400
1401             /* Get j neighbor index, and coordinate index */
1402             jnrA             = jjnr[jidx];
1403             jnrB             = jjnr[jidx+1];
1404             jnrC             = jjnr[jidx+2];
1405             jnrD             = jjnr[jidx+3];
1406             j_coord_offsetA  = DIM*jnrA;
1407             j_coord_offsetB  = DIM*jnrB;
1408             j_coord_offsetC  = DIM*jnrC;
1409             j_coord_offsetD  = DIM*jnrD;
1410
1411             /* load j atom coordinates */
1412             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1413                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1414                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1415
1416             /* Calculate displacement vector */
1417             dx00             = _mm256_sub_pd(ix0,jx0);
1418             dy00             = _mm256_sub_pd(iy0,jy0);
1419             dz00             = _mm256_sub_pd(iz0,jz0);
1420             dx01             = _mm256_sub_pd(ix0,jx1);
1421             dy01             = _mm256_sub_pd(iy0,jy1);
1422             dz01             = _mm256_sub_pd(iz0,jz1);
1423             dx02             = _mm256_sub_pd(ix0,jx2);
1424             dy02             = _mm256_sub_pd(iy0,jy2);
1425             dz02             = _mm256_sub_pd(iz0,jz2);
1426             dx10             = _mm256_sub_pd(ix1,jx0);
1427             dy10             = _mm256_sub_pd(iy1,jy0);
1428             dz10             = _mm256_sub_pd(iz1,jz0);
1429             dx11             = _mm256_sub_pd(ix1,jx1);
1430             dy11             = _mm256_sub_pd(iy1,jy1);
1431             dz11             = _mm256_sub_pd(iz1,jz1);
1432             dx12             = _mm256_sub_pd(ix1,jx2);
1433             dy12             = _mm256_sub_pd(iy1,jy2);
1434             dz12             = _mm256_sub_pd(iz1,jz2);
1435             dx20             = _mm256_sub_pd(ix2,jx0);
1436             dy20             = _mm256_sub_pd(iy2,jy0);
1437             dz20             = _mm256_sub_pd(iz2,jz0);
1438             dx21             = _mm256_sub_pd(ix2,jx1);
1439             dy21             = _mm256_sub_pd(iy2,jy1);
1440             dz21             = _mm256_sub_pd(iz2,jz1);
1441             dx22             = _mm256_sub_pd(ix2,jx2);
1442             dy22             = _mm256_sub_pd(iy2,jy2);
1443             dz22             = _mm256_sub_pd(iz2,jz2);
1444
1445             /* Calculate squared distance and things based on it */
1446             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1447             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1448             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1449             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1450             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1451             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1452             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1453             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1454             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1455
1456             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1457             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
1458             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
1459             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1460             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
1461             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
1462             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1463             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
1464             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
1465
1466             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1467             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
1468             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
1469             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1470             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1471             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1472             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1473             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1474             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1475
1476             fjx0             = _mm256_setzero_pd();
1477             fjy0             = _mm256_setzero_pd();
1478             fjz0             = _mm256_setzero_pd();
1479             fjx1             = _mm256_setzero_pd();
1480             fjy1             = _mm256_setzero_pd();
1481             fjz1             = _mm256_setzero_pd();
1482             fjx2             = _mm256_setzero_pd();
1483             fjy2             = _mm256_setzero_pd();
1484             fjz2             = _mm256_setzero_pd();
1485
1486             /**************************
1487              * CALCULATE INTERACTIONS *
1488              **************************/
1489
1490             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1491             {
1492
1493             r00              = _mm256_mul_pd(rsq00,rinv00);
1494
1495             /* Calculate table index by multiplying r with table scale and truncate to integer */
1496             rt               = _mm256_mul_pd(r00,vftabscale);
1497             vfitab           = _mm256_cvttpd_epi32(rt);
1498             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1499             vfitab           = _mm_slli_epi32(vfitab,3);
1500
1501             /* REACTION-FIELD ELECTROSTATICS */
1502             felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
1503
1504             /* CUBIC SPLINE TABLE DISPERSION */
1505             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1506             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1507             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1508             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1509             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1510             Heps             = _mm256_mul_pd(vfeps,H);
1511             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1512             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1513             fvdw6            = _mm256_mul_pd(c6_00,FF);
1514
1515             /* CUBIC SPLINE TABLE REPULSION */
1516             vfitab           = _mm_add_epi32(vfitab,ifour);
1517             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1518             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1519             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1520             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1521             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1522             Heps             = _mm256_mul_pd(vfeps,H);
1523             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1524             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1525             fvdw12           = _mm256_mul_pd(c12_00,FF);
1526             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1527
1528             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1529
1530             fscal            = _mm256_add_pd(felec,fvdw);
1531
1532             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1533
1534             /* Calculate temporary vectorial force */
1535             tx               = _mm256_mul_pd(fscal,dx00);
1536             ty               = _mm256_mul_pd(fscal,dy00);
1537             tz               = _mm256_mul_pd(fscal,dz00);
1538
1539             /* Update vectorial force */
1540             fix0             = _mm256_add_pd(fix0,tx);
1541             fiy0             = _mm256_add_pd(fiy0,ty);
1542             fiz0             = _mm256_add_pd(fiz0,tz);
1543
1544             fjx0             = _mm256_add_pd(fjx0,tx);
1545             fjy0             = _mm256_add_pd(fjy0,ty);
1546             fjz0             = _mm256_add_pd(fjz0,tz);
1547
1548             }
1549
1550             /**************************
1551              * CALCULATE INTERACTIONS *
1552              **************************/
1553
1554             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1555             {
1556
1557             /* REACTION-FIELD ELECTROSTATICS */
1558             felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
1559
1560             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
1561
1562             fscal            = felec;
1563
1564             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1565
1566             /* Calculate temporary vectorial force */
1567             tx               = _mm256_mul_pd(fscal,dx01);
1568             ty               = _mm256_mul_pd(fscal,dy01);
1569             tz               = _mm256_mul_pd(fscal,dz01);
1570
1571             /* Update vectorial force */
1572             fix0             = _mm256_add_pd(fix0,tx);
1573             fiy0             = _mm256_add_pd(fiy0,ty);
1574             fiz0             = _mm256_add_pd(fiz0,tz);
1575
1576             fjx1             = _mm256_add_pd(fjx1,tx);
1577             fjy1             = _mm256_add_pd(fjy1,ty);
1578             fjz1             = _mm256_add_pd(fjz1,tz);
1579
1580             }
1581
1582             /**************************
1583              * CALCULATE INTERACTIONS *
1584              **************************/
1585
1586             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1587             {
1588
1589             /* REACTION-FIELD ELECTROSTATICS */
1590             felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
1591
1592             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
1593
1594             fscal            = felec;
1595
1596             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1597
1598             /* Calculate temporary vectorial force */
1599             tx               = _mm256_mul_pd(fscal,dx02);
1600             ty               = _mm256_mul_pd(fscal,dy02);
1601             tz               = _mm256_mul_pd(fscal,dz02);
1602
1603             /* Update vectorial force */
1604             fix0             = _mm256_add_pd(fix0,tx);
1605             fiy0             = _mm256_add_pd(fiy0,ty);
1606             fiz0             = _mm256_add_pd(fiz0,tz);
1607
1608             fjx2             = _mm256_add_pd(fjx2,tx);
1609             fjy2             = _mm256_add_pd(fjy2,ty);
1610             fjz2             = _mm256_add_pd(fjz2,tz);
1611
1612             }
1613
1614             /**************************
1615              * CALCULATE INTERACTIONS *
1616              **************************/
1617
1618             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1619             {
1620
1621             /* REACTION-FIELD ELECTROSTATICS */
1622             felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
1623
1624             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1625
1626             fscal            = felec;
1627
1628             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1629
1630             /* Calculate temporary vectorial force */
1631             tx               = _mm256_mul_pd(fscal,dx10);
1632             ty               = _mm256_mul_pd(fscal,dy10);
1633             tz               = _mm256_mul_pd(fscal,dz10);
1634
1635             /* Update vectorial force */
1636             fix1             = _mm256_add_pd(fix1,tx);
1637             fiy1             = _mm256_add_pd(fiy1,ty);
1638             fiz1             = _mm256_add_pd(fiz1,tz);
1639
1640             fjx0             = _mm256_add_pd(fjx0,tx);
1641             fjy0             = _mm256_add_pd(fjy0,ty);
1642             fjz0             = _mm256_add_pd(fjz0,tz);
1643
1644             }
1645
1646             /**************************
1647              * CALCULATE INTERACTIONS *
1648              **************************/
1649
1650             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1651             {
1652
1653             /* REACTION-FIELD ELECTROSTATICS */
1654             felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
1655
1656             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
1657
1658             fscal            = felec;
1659
1660             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1661
1662             /* Calculate temporary vectorial force */
1663             tx               = _mm256_mul_pd(fscal,dx11);
1664             ty               = _mm256_mul_pd(fscal,dy11);
1665             tz               = _mm256_mul_pd(fscal,dz11);
1666
1667             /* Update vectorial force */
1668             fix1             = _mm256_add_pd(fix1,tx);
1669             fiy1             = _mm256_add_pd(fiy1,ty);
1670             fiz1             = _mm256_add_pd(fiz1,tz);
1671
1672             fjx1             = _mm256_add_pd(fjx1,tx);
1673             fjy1             = _mm256_add_pd(fjy1,ty);
1674             fjz1             = _mm256_add_pd(fjz1,tz);
1675
1676             }
1677
1678             /**************************
1679              * CALCULATE INTERACTIONS *
1680              **************************/
1681
1682             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1683             {
1684
1685             /* REACTION-FIELD ELECTROSTATICS */
1686             felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
1687
1688             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
1689
1690             fscal            = felec;
1691
1692             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1693
1694             /* Calculate temporary vectorial force */
1695             tx               = _mm256_mul_pd(fscal,dx12);
1696             ty               = _mm256_mul_pd(fscal,dy12);
1697             tz               = _mm256_mul_pd(fscal,dz12);
1698
1699             /* Update vectorial force */
1700             fix1             = _mm256_add_pd(fix1,tx);
1701             fiy1             = _mm256_add_pd(fiy1,ty);
1702             fiz1             = _mm256_add_pd(fiz1,tz);
1703
1704             fjx2             = _mm256_add_pd(fjx2,tx);
1705             fjy2             = _mm256_add_pd(fjy2,ty);
1706             fjz2             = _mm256_add_pd(fjz2,tz);
1707
1708             }
1709
1710             /**************************
1711              * CALCULATE INTERACTIONS *
1712              **************************/
1713
1714             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1715             {
1716
1717             /* REACTION-FIELD ELECTROSTATICS */
1718             felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
1719
1720             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1721
1722             fscal            = felec;
1723
1724             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1725
1726             /* Calculate temporary vectorial force */
1727             tx               = _mm256_mul_pd(fscal,dx20);
1728             ty               = _mm256_mul_pd(fscal,dy20);
1729             tz               = _mm256_mul_pd(fscal,dz20);
1730
1731             /* Update vectorial force */
1732             fix2             = _mm256_add_pd(fix2,tx);
1733             fiy2             = _mm256_add_pd(fiy2,ty);
1734             fiz2             = _mm256_add_pd(fiz2,tz);
1735
1736             fjx0             = _mm256_add_pd(fjx0,tx);
1737             fjy0             = _mm256_add_pd(fjy0,ty);
1738             fjz0             = _mm256_add_pd(fjz0,tz);
1739
1740             }
1741
1742             /**************************
1743              * CALCULATE INTERACTIONS *
1744              **************************/
1745
1746             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1747             {
1748
1749             /* REACTION-FIELD ELECTROSTATICS */
1750             felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
1751
1752             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
1753
1754             fscal            = felec;
1755
1756             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1757
1758             /* Calculate temporary vectorial force */
1759             tx               = _mm256_mul_pd(fscal,dx21);
1760             ty               = _mm256_mul_pd(fscal,dy21);
1761             tz               = _mm256_mul_pd(fscal,dz21);
1762
1763             /* Update vectorial force */
1764             fix2             = _mm256_add_pd(fix2,tx);
1765             fiy2             = _mm256_add_pd(fiy2,ty);
1766             fiz2             = _mm256_add_pd(fiz2,tz);
1767
1768             fjx1             = _mm256_add_pd(fjx1,tx);
1769             fjy1             = _mm256_add_pd(fjy1,ty);
1770             fjz1             = _mm256_add_pd(fjz1,tz);
1771
1772             }
1773
1774             /**************************
1775              * CALCULATE INTERACTIONS *
1776              **************************/
1777
1778             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1779             {
1780
1781             /* REACTION-FIELD ELECTROSTATICS */
1782             felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
1783
1784             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
1785
1786             fscal            = felec;
1787
1788             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1789
1790             /* Calculate temporary vectorial force */
1791             tx               = _mm256_mul_pd(fscal,dx22);
1792             ty               = _mm256_mul_pd(fscal,dy22);
1793             tz               = _mm256_mul_pd(fscal,dz22);
1794
1795             /* Update vectorial force */
1796             fix2             = _mm256_add_pd(fix2,tx);
1797             fiy2             = _mm256_add_pd(fiy2,ty);
1798             fiz2             = _mm256_add_pd(fiz2,tz);
1799
1800             fjx2             = _mm256_add_pd(fjx2,tx);
1801             fjy2             = _mm256_add_pd(fjy2,ty);
1802             fjz2             = _mm256_add_pd(fjz2,tz);
1803
1804             }
1805
1806             fjptrA             = f+j_coord_offsetA;
1807             fjptrB             = f+j_coord_offsetB;
1808             fjptrC             = f+j_coord_offsetC;
1809             fjptrD             = f+j_coord_offsetD;
1810
1811             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1812                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1813
1814             /* Inner loop uses 297 flops */
1815         }
1816
1817         if(jidx<j_index_end)
1818         {
1819
1820             /* Get j neighbor index, and coordinate index */
1821             jnrlistA         = jjnr[jidx];
1822             jnrlistB         = jjnr[jidx+1];
1823             jnrlistC         = jjnr[jidx+2];
1824             jnrlistD         = jjnr[jidx+3];
1825             /* Sign of each element will be negative for non-real atoms.
1826              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1827              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
1828              */
1829             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
1830
1831             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
1832             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
1833             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
1834
1835             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1836             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1837             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1838             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1839             j_coord_offsetA  = DIM*jnrA;
1840             j_coord_offsetB  = DIM*jnrB;
1841             j_coord_offsetC  = DIM*jnrC;
1842             j_coord_offsetD  = DIM*jnrD;
1843
1844             /* load j atom coordinates */
1845             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1846                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1847                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1848
1849             /* Calculate displacement vector */
1850             dx00             = _mm256_sub_pd(ix0,jx0);
1851             dy00             = _mm256_sub_pd(iy0,jy0);
1852             dz00             = _mm256_sub_pd(iz0,jz0);
1853             dx01             = _mm256_sub_pd(ix0,jx1);
1854             dy01             = _mm256_sub_pd(iy0,jy1);
1855             dz01             = _mm256_sub_pd(iz0,jz1);
1856             dx02             = _mm256_sub_pd(ix0,jx2);
1857             dy02             = _mm256_sub_pd(iy0,jy2);
1858             dz02             = _mm256_sub_pd(iz0,jz2);
1859             dx10             = _mm256_sub_pd(ix1,jx0);
1860             dy10             = _mm256_sub_pd(iy1,jy0);
1861             dz10             = _mm256_sub_pd(iz1,jz0);
1862             dx11             = _mm256_sub_pd(ix1,jx1);
1863             dy11             = _mm256_sub_pd(iy1,jy1);
1864             dz11             = _mm256_sub_pd(iz1,jz1);
1865             dx12             = _mm256_sub_pd(ix1,jx2);
1866             dy12             = _mm256_sub_pd(iy1,jy2);
1867             dz12             = _mm256_sub_pd(iz1,jz2);
1868             dx20             = _mm256_sub_pd(ix2,jx0);
1869             dy20             = _mm256_sub_pd(iy2,jy0);
1870             dz20             = _mm256_sub_pd(iz2,jz0);
1871             dx21             = _mm256_sub_pd(ix2,jx1);
1872             dy21             = _mm256_sub_pd(iy2,jy1);
1873             dz21             = _mm256_sub_pd(iz2,jz1);
1874             dx22             = _mm256_sub_pd(ix2,jx2);
1875             dy22             = _mm256_sub_pd(iy2,jy2);
1876             dz22             = _mm256_sub_pd(iz2,jz2);
1877
1878             /* Calculate squared distance and things based on it */
1879             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1880             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1881             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1882             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1883             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1884             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1885             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1886             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1887             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1888
1889             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1890             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
1891             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
1892             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1893             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
1894             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
1895             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1896             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
1897             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
1898
1899             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1900             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
1901             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
1902             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1903             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1904             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1905             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1906             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1907             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1908
1909             fjx0             = _mm256_setzero_pd();
1910             fjy0             = _mm256_setzero_pd();
1911             fjz0             = _mm256_setzero_pd();
1912             fjx1             = _mm256_setzero_pd();
1913             fjy1             = _mm256_setzero_pd();
1914             fjz1             = _mm256_setzero_pd();
1915             fjx2             = _mm256_setzero_pd();
1916             fjy2             = _mm256_setzero_pd();
1917             fjz2             = _mm256_setzero_pd();
1918
1919             /**************************
1920              * CALCULATE INTERACTIONS *
1921              **************************/
1922
1923             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1924             {
1925
1926             r00              = _mm256_mul_pd(rsq00,rinv00);
1927             r00              = _mm256_andnot_pd(dummy_mask,r00);
1928
1929             /* Calculate table index by multiplying r with table scale and truncate to integer */
1930             rt               = _mm256_mul_pd(r00,vftabscale);
1931             vfitab           = _mm256_cvttpd_epi32(rt);
1932             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1933             vfitab           = _mm_slli_epi32(vfitab,3);
1934
1935             /* REACTION-FIELD ELECTROSTATICS */
1936             felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
1937
1938             /* CUBIC SPLINE TABLE DISPERSION */
1939             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1940             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1941             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1942             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1943             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1944             Heps             = _mm256_mul_pd(vfeps,H);
1945             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1946             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1947             fvdw6            = _mm256_mul_pd(c6_00,FF);
1948
1949             /* CUBIC SPLINE TABLE REPULSION */
1950             vfitab           = _mm_add_epi32(vfitab,ifour);
1951             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1952             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1953             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1954             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1955             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1956             Heps             = _mm256_mul_pd(vfeps,H);
1957             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1958             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1959             fvdw12           = _mm256_mul_pd(c12_00,FF);
1960             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1961
1962             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1963
1964             fscal            = _mm256_add_pd(felec,fvdw);
1965
1966             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1967
1968             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1969
1970             /* Calculate temporary vectorial force */
1971             tx               = _mm256_mul_pd(fscal,dx00);
1972             ty               = _mm256_mul_pd(fscal,dy00);
1973             tz               = _mm256_mul_pd(fscal,dz00);
1974
1975             /* Update vectorial force */
1976             fix0             = _mm256_add_pd(fix0,tx);
1977             fiy0             = _mm256_add_pd(fiy0,ty);
1978             fiz0             = _mm256_add_pd(fiz0,tz);
1979
1980             fjx0             = _mm256_add_pd(fjx0,tx);
1981             fjy0             = _mm256_add_pd(fjy0,ty);
1982             fjz0             = _mm256_add_pd(fjz0,tz);
1983
1984             }
1985
1986             /**************************
1987              * CALCULATE INTERACTIONS *
1988              **************************/
1989
1990             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1991             {
1992
1993             /* REACTION-FIELD ELECTROSTATICS */
1994             felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
1995
1996             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
1997
1998             fscal            = felec;
1999
2000             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2001
2002             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2003
2004             /* Calculate temporary vectorial force */
2005             tx               = _mm256_mul_pd(fscal,dx01);
2006             ty               = _mm256_mul_pd(fscal,dy01);
2007             tz               = _mm256_mul_pd(fscal,dz01);
2008
2009             /* Update vectorial force */
2010             fix0             = _mm256_add_pd(fix0,tx);
2011             fiy0             = _mm256_add_pd(fiy0,ty);
2012             fiz0             = _mm256_add_pd(fiz0,tz);
2013
2014             fjx1             = _mm256_add_pd(fjx1,tx);
2015             fjy1             = _mm256_add_pd(fjy1,ty);
2016             fjz1             = _mm256_add_pd(fjz1,tz);
2017
2018             }
2019
2020             /**************************
2021              * CALCULATE INTERACTIONS *
2022              **************************/
2023
2024             if (gmx_mm256_any_lt(rsq02,rcutoff2))
2025             {
2026
2027             /* REACTION-FIELD ELECTROSTATICS */
2028             felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
2029
2030             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
2031
2032             fscal            = felec;
2033
2034             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2035
2036             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2037
2038             /* Calculate temporary vectorial force */
2039             tx               = _mm256_mul_pd(fscal,dx02);
2040             ty               = _mm256_mul_pd(fscal,dy02);
2041             tz               = _mm256_mul_pd(fscal,dz02);
2042
2043             /* Update vectorial force */
2044             fix0             = _mm256_add_pd(fix0,tx);
2045             fiy0             = _mm256_add_pd(fiy0,ty);
2046             fiz0             = _mm256_add_pd(fiz0,tz);
2047
2048             fjx2             = _mm256_add_pd(fjx2,tx);
2049             fjy2             = _mm256_add_pd(fjy2,ty);
2050             fjz2             = _mm256_add_pd(fjz2,tz);
2051
2052             }
2053
2054             /**************************
2055              * CALCULATE INTERACTIONS *
2056              **************************/
2057
2058             if (gmx_mm256_any_lt(rsq10,rcutoff2))
2059             {
2060
2061             /* REACTION-FIELD ELECTROSTATICS */
2062             felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
2063
2064             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
2065
2066             fscal            = felec;
2067
2068             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2069
2070             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2071
2072             /* Calculate temporary vectorial force */
2073             tx               = _mm256_mul_pd(fscal,dx10);
2074             ty               = _mm256_mul_pd(fscal,dy10);
2075             tz               = _mm256_mul_pd(fscal,dz10);
2076
2077             /* Update vectorial force */
2078             fix1             = _mm256_add_pd(fix1,tx);
2079             fiy1             = _mm256_add_pd(fiy1,ty);
2080             fiz1             = _mm256_add_pd(fiz1,tz);
2081
2082             fjx0             = _mm256_add_pd(fjx0,tx);
2083             fjy0             = _mm256_add_pd(fjy0,ty);
2084             fjz0             = _mm256_add_pd(fjz0,tz);
2085
2086             }
2087
2088             /**************************
2089              * CALCULATE INTERACTIONS *
2090              **************************/
2091
2092             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2093             {
2094
2095             /* REACTION-FIELD ELECTROSTATICS */
2096             felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
2097
2098             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
2099
2100             fscal            = felec;
2101
2102             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2103
2104             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2105
2106             /* Calculate temporary vectorial force */
2107             tx               = _mm256_mul_pd(fscal,dx11);
2108             ty               = _mm256_mul_pd(fscal,dy11);
2109             tz               = _mm256_mul_pd(fscal,dz11);
2110
2111             /* Update vectorial force */
2112             fix1             = _mm256_add_pd(fix1,tx);
2113             fiy1             = _mm256_add_pd(fiy1,ty);
2114             fiz1             = _mm256_add_pd(fiz1,tz);
2115
2116             fjx1             = _mm256_add_pd(fjx1,tx);
2117             fjy1             = _mm256_add_pd(fjy1,ty);
2118             fjz1             = _mm256_add_pd(fjz1,tz);
2119
2120             }
2121
2122             /**************************
2123              * CALCULATE INTERACTIONS *
2124              **************************/
2125
2126             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2127             {
2128
2129             /* REACTION-FIELD ELECTROSTATICS */
2130             felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
2131
2132             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2133
2134             fscal            = felec;
2135
2136             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2137
2138             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2139
2140             /* Calculate temporary vectorial force */
2141             tx               = _mm256_mul_pd(fscal,dx12);
2142             ty               = _mm256_mul_pd(fscal,dy12);
2143             tz               = _mm256_mul_pd(fscal,dz12);
2144
2145             /* Update vectorial force */
2146             fix1             = _mm256_add_pd(fix1,tx);
2147             fiy1             = _mm256_add_pd(fiy1,ty);
2148             fiz1             = _mm256_add_pd(fiz1,tz);
2149
2150             fjx2             = _mm256_add_pd(fjx2,tx);
2151             fjy2             = _mm256_add_pd(fjy2,ty);
2152             fjz2             = _mm256_add_pd(fjz2,tz);
2153
2154             }
2155
2156             /**************************
2157              * CALCULATE INTERACTIONS *
2158              **************************/
2159
2160             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2161             {
2162
2163             /* REACTION-FIELD ELECTROSTATICS */
2164             felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
2165
2166             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
2167
2168             fscal            = felec;
2169
2170             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2171
2172             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2173
2174             /* Calculate temporary vectorial force */
2175             tx               = _mm256_mul_pd(fscal,dx20);
2176             ty               = _mm256_mul_pd(fscal,dy20);
2177             tz               = _mm256_mul_pd(fscal,dz20);
2178
2179             /* Update vectorial force */
2180             fix2             = _mm256_add_pd(fix2,tx);
2181             fiy2             = _mm256_add_pd(fiy2,ty);
2182             fiz2             = _mm256_add_pd(fiz2,tz);
2183
2184             fjx0             = _mm256_add_pd(fjx0,tx);
2185             fjy0             = _mm256_add_pd(fjy0,ty);
2186             fjz0             = _mm256_add_pd(fjz0,tz);
2187
2188             }
2189
2190             /**************************
2191              * CALCULATE INTERACTIONS *
2192              **************************/
2193
2194             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2195             {
2196
2197             /* REACTION-FIELD ELECTROSTATICS */
2198             felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
2199
2200             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
2201
2202             fscal            = felec;
2203
2204             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2205
2206             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2207
2208             /* Calculate temporary vectorial force */
2209             tx               = _mm256_mul_pd(fscal,dx21);
2210             ty               = _mm256_mul_pd(fscal,dy21);
2211             tz               = _mm256_mul_pd(fscal,dz21);
2212
2213             /* Update vectorial force */
2214             fix2             = _mm256_add_pd(fix2,tx);
2215             fiy2             = _mm256_add_pd(fiy2,ty);
2216             fiz2             = _mm256_add_pd(fiz2,tz);
2217
2218             fjx1             = _mm256_add_pd(fjx1,tx);
2219             fjy1             = _mm256_add_pd(fjy1,ty);
2220             fjz1             = _mm256_add_pd(fjz1,tz);
2221
2222             }
2223
2224             /**************************
2225              * CALCULATE INTERACTIONS *
2226              **************************/
2227
2228             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2229             {
2230
2231             /* REACTION-FIELD ELECTROSTATICS */
2232             felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
2233
2234             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
2235
2236             fscal            = felec;
2237
2238             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2239
2240             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2241
2242             /* Calculate temporary vectorial force */
2243             tx               = _mm256_mul_pd(fscal,dx22);
2244             ty               = _mm256_mul_pd(fscal,dy22);
2245             tz               = _mm256_mul_pd(fscal,dz22);
2246
2247             /* Update vectorial force */
2248             fix2             = _mm256_add_pd(fix2,tx);
2249             fiy2             = _mm256_add_pd(fiy2,ty);
2250             fiz2             = _mm256_add_pd(fiz2,tz);
2251
2252             fjx2             = _mm256_add_pd(fjx2,tx);
2253             fjy2             = _mm256_add_pd(fjy2,ty);
2254             fjz2             = _mm256_add_pd(fjz2,tz);
2255
2256             }
2257
2258             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2259             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2260             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2261             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2262
2263             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
2264                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2265
2266             /* Inner loop uses 298 flops */
2267         }
2268
2269         /* End of innermost loop */
2270
2271         gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
2272                                                  f+i_coord_offset,fshift+i_shift_offset);
2273
2274         /* Increment number of inner iterations */
2275         inneriter                  += j_index_end - j_index_start;
2276
2277         /* Outer loop uses 18 flops */
2278     }
2279
2280     /* Increment number of outer iterations */
2281     outeriter        += nri;
2282
2283     /* Update outer/inner flops */
2284
2285     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*298);
2286 }