741d3e161d78f368a420761a41e54d39988220be
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_avx_256_double.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_double kernel generator.
37  */
38 #include "config.h"
39
40 #include <math.h>
41
42 #include "../nb_kernel.h"
43 #include "gromacs/legacyheaders/types/simple.h"
44 #include "gromacs/math/vec.h"
45 #include "gromacs/legacyheaders/nrnb.h"
46
47 #include "gromacs/simd/math_x86_avx_256_double.h"
48 #include "kernelutil_x86_avx_256_double.h"
49
50 /*
51  * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_VF_avx_256_double
52  * Electrostatics interaction: ReactionField
53  * VdW interaction:            CubicSplineTable
54  * Geometry:                   Water3-Water3
55  * Calculate force/pot:        PotentialAndForce
56  */
57 void
58 nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_VF_avx_256_double
59                     (t_nblist                    * gmx_restrict       nlist,
60                      rvec                        * gmx_restrict          xx,
61                      rvec                        * gmx_restrict          ff,
62                      t_forcerec                  * gmx_restrict          fr,
63                      t_mdatoms                   * gmx_restrict     mdatoms,
64                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65                      t_nrnb                      * gmx_restrict        nrnb)
66 {
67     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
68      * just 0 for non-waters.
69      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
70      * jnr indices corresponding to data put in the four positions in the SIMD register.
71      */
72     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
73     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74     int              jnrA,jnrB,jnrC,jnrD;
75     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
76     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
77     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
78     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
79     real             rcutoff_scalar;
80     real             *shiftvec,*fshift,*x,*f;
81     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
82     real             scratch[4*DIM];
83     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
84     real *           vdwioffsetptr0;
85     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
86     real *           vdwioffsetptr1;
87     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
88     real *           vdwioffsetptr2;
89     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
90     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
91     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
92     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
93     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
94     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
95     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
96     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
97     __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
98     __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
99     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
100     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
101     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
102     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
103     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
104     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
105     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
106     real             *charge;
107     int              nvdwtype;
108     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
109     int              *vdwtype;
110     real             *vdwparam;
111     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
112     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
113     __m128i          vfitab;
114     __m128i          ifour       = _mm_set1_epi32(4);
115     __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
116     real             *vftab;
117     __m256d          dummy_mask,cutoff_mask;
118     __m128           tmpmask0,tmpmask1;
119     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
120     __m256d          one     = _mm256_set1_pd(1.0);
121     __m256d          two     = _mm256_set1_pd(2.0);
122     x                = xx[0];
123     f                = ff[0];
124
125     nri              = nlist->nri;
126     iinr             = nlist->iinr;
127     jindex           = nlist->jindex;
128     jjnr             = nlist->jjnr;
129     shiftidx         = nlist->shift;
130     gid              = nlist->gid;
131     shiftvec         = fr->shift_vec[0];
132     fshift           = fr->fshift[0];
133     facel            = _mm256_set1_pd(fr->epsfac);
134     charge           = mdatoms->chargeA;
135     krf              = _mm256_set1_pd(fr->ic->k_rf);
136     krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
137     crf              = _mm256_set1_pd(fr->ic->c_rf);
138     nvdwtype         = fr->ntype;
139     vdwparam         = fr->nbfp;
140     vdwtype          = mdatoms->typeA;
141
142     vftab            = kernel_data->table_vdw->data;
143     vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
144
145     /* Setup water-specific parameters */
146     inr              = nlist->iinr[0];
147     iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
148     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
149     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
150     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
151
152     jq0              = _mm256_set1_pd(charge[inr+0]);
153     jq1              = _mm256_set1_pd(charge[inr+1]);
154     jq2              = _mm256_set1_pd(charge[inr+2]);
155     vdwjidx0A        = 2*vdwtype[inr+0];
156     qq00             = _mm256_mul_pd(iq0,jq0);
157     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
158     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
159     qq01             = _mm256_mul_pd(iq0,jq1);
160     qq02             = _mm256_mul_pd(iq0,jq2);
161     qq10             = _mm256_mul_pd(iq1,jq0);
162     qq11             = _mm256_mul_pd(iq1,jq1);
163     qq12             = _mm256_mul_pd(iq1,jq2);
164     qq20             = _mm256_mul_pd(iq2,jq0);
165     qq21             = _mm256_mul_pd(iq2,jq1);
166     qq22             = _mm256_mul_pd(iq2,jq2);
167
168     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
169     rcutoff_scalar   = fr->rcoulomb;
170     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
171     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
172
173     /* Avoid stupid compiler warnings */
174     jnrA = jnrB = jnrC = jnrD = 0;
175     j_coord_offsetA = 0;
176     j_coord_offsetB = 0;
177     j_coord_offsetC = 0;
178     j_coord_offsetD = 0;
179
180     outeriter        = 0;
181     inneriter        = 0;
182
183     for(iidx=0;iidx<4*DIM;iidx++)
184     {
185         scratch[iidx] = 0.0;
186     }
187
188     /* Start outer loop over neighborlists */
189     for(iidx=0; iidx<nri; iidx++)
190     {
191         /* Load shift vector for this list */
192         i_shift_offset   = DIM*shiftidx[iidx];
193
194         /* Load limits for loop over neighbors */
195         j_index_start    = jindex[iidx];
196         j_index_end      = jindex[iidx+1];
197
198         /* Get outer coordinate index */
199         inr              = iinr[iidx];
200         i_coord_offset   = DIM*inr;
201
202         /* Load i particle coords and add shift vector */
203         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
204                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
205
206         fix0             = _mm256_setzero_pd();
207         fiy0             = _mm256_setzero_pd();
208         fiz0             = _mm256_setzero_pd();
209         fix1             = _mm256_setzero_pd();
210         fiy1             = _mm256_setzero_pd();
211         fiz1             = _mm256_setzero_pd();
212         fix2             = _mm256_setzero_pd();
213         fiy2             = _mm256_setzero_pd();
214         fiz2             = _mm256_setzero_pd();
215
216         /* Reset potential sums */
217         velecsum         = _mm256_setzero_pd();
218         vvdwsum          = _mm256_setzero_pd();
219
220         /* Start inner kernel loop */
221         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
222         {
223
224             /* Get j neighbor index, and coordinate index */
225             jnrA             = jjnr[jidx];
226             jnrB             = jjnr[jidx+1];
227             jnrC             = jjnr[jidx+2];
228             jnrD             = jjnr[jidx+3];
229             j_coord_offsetA  = DIM*jnrA;
230             j_coord_offsetB  = DIM*jnrB;
231             j_coord_offsetC  = DIM*jnrC;
232             j_coord_offsetD  = DIM*jnrD;
233
234             /* load j atom coordinates */
235             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
236                                                  x+j_coord_offsetC,x+j_coord_offsetD,
237                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
238
239             /* Calculate displacement vector */
240             dx00             = _mm256_sub_pd(ix0,jx0);
241             dy00             = _mm256_sub_pd(iy0,jy0);
242             dz00             = _mm256_sub_pd(iz0,jz0);
243             dx01             = _mm256_sub_pd(ix0,jx1);
244             dy01             = _mm256_sub_pd(iy0,jy1);
245             dz01             = _mm256_sub_pd(iz0,jz1);
246             dx02             = _mm256_sub_pd(ix0,jx2);
247             dy02             = _mm256_sub_pd(iy0,jy2);
248             dz02             = _mm256_sub_pd(iz0,jz2);
249             dx10             = _mm256_sub_pd(ix1,jx0);
250             dy10             = _mm256_sub_pd(iy1,jy0);
251             dz10             = _mm256_sub_pd(iz1,jz0);
252             dx11             = _mm256_sub_pd(ix1,jx1);
253             dy11             = _mm256_sub_pd(iy1,jy1);
254             dz11             = _mm256_sub_pd(iz1,jz1);
255             dx12             = _mm256_sub_pd(ix1,jx2);
256             dy12             = _mm256_sub_pd(iy1,jy2);
257             dz12             = _mm256_sub_pd(iz1,jz2);
258             dx20             = _mm256_sub_pd(ix2,jx0);
259             dy20             = _mm256_sub_pd(iy2,jy0);
260             dz20             = _mm256_sub_pd(iz2,jz0);
261             dx21             = _mm256_sub_pd(ix2,jx1);
262             dy21             = _mm256_sub_pd(iy2,jy1);
263             dz21             = _mm256_sub_pd(iz2,jz1);
264             dx22             = _mm256_sub_pd(ix2,jx2);
265             dy22             = _mm256_sub_pd(iy2,jy2);
266             dz22             = _mm256_sub_pd(iz2,jz2);
267
268             /* Calculate squared distance and things based on it */
269             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
270             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
271             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
272             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
273             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
274             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
275             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
276             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
277             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
278
279             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
280             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
281             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
282             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
283             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
284             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
285             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
286             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
287             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
288
289             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
290             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
291             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
292             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
293             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
294             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
295             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
296             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
297             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
298
299             fjx0             = _mm256_setzero_pd();
300             fjy0             = _mm256_setzero_pd();
301             fjz0             = _mm256_setzero_pd();
302             fjx1             = _mm256_setzero_pd();
303             fjy1             = _mm256_setzero_pd();
304             fjz1             = _mm256_setzero_pd();
305             fjx2             = _mm256_setzero_pd();
306             fjy2             = _mm256_setzero_pd();
307             fjz2             = _mm256_setzero_pd();
308
309             /**************************
310              * CALCULATE INTERACTIONS *
311              **************************/
312
313             if (gmx_mm256_any_lt(rsq00,rcutoff2))
314             {
315
316             r00              = _mm256_mul_pd(rsq00,rinv00);
317
318             /* Calculate table index by multiplying r with table scale and truncate to integer */
319             rt               = _mm256_mul_pd(r00,vftabscale);
320             vfitab           = _mm256_cvttpd_epi32(rt);
321             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
322             vfitab           = _mm_slli_epi32(vfitab,3);
323
324             /* REACTION-FIELD ELECTROSTATICS */
325             velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
326             felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
327
328             /* CUBIC SPLINE TABLE DISPERSION */
329             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
330             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
331             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
332             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
333             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
334             Heps             = _mm256_mul_pd(vfeps,H);
335             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
336             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
337             vvdw6            = _mm256_mul_pd(c6_00,VV);
338             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
339             fvdw6            = _mm256_mul_pd(c6_00,FF);
340
341             /* CUBIC SPLINE TABLE REPULSION */
342             vfitab           = _mm_add_epi32(vfitab,ifour);
343             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
344             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
345             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
346             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
347             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
348             Heps             = _mm256_mul_pd(vfeps,H);
349             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
350             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
351             vvdw12           = _mm256_mul_pd(c12_00,VV);
352             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
353             fvdw12           = _mm256_mul_pd(c12_00,FF);
354             vvdw             = _mm256_add_pd(vvdw12,vvdw6);
355             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
356
357             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
358
359             /* Update potential sum for this i atom from the interaction with this j atom. */
360             velec            = _mm256_and_pd(velec,cutoff_mask);
361             velecsum         = _mm256_add_pd(velecsum,velec);
362             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
363             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
364
365             fscal            = _mm256_add_pd(felec,fvdw);
366
367             fscal            = _mm256_and_pd(fscal,cutoff_mask);
368
369             /* Calculate temporary vectorial force */
370             tx               = _mm256_mul_pd(fscal,dx00);
371             ty               = _mm256_mul_pd(fscal,dy00);
372             tz               = _mm256_mul_pd(fscal,dz00);
373
374             /* Update vectorial force */
375             fix0             = _mm256_add_pd(fix0,tx);
376             fiy0             = _mm256_add_pd(fiy0,ty);
377             fiz0             = _mm256_add_pd(fiz0,tz);
378
379             fjx0             = _mm256_add_pd(fjx0,tx);
380             fjy0             = _mm256_add_pd(fjy0,ty);
381             fjz0             = _mm256_add_pd(fjz0,tz);
382
383             }
384
385             /**************************
386              * CALCULATE INTERACTIONS *
387              **************************/
388
389             if (gmx_mm256_any_lt(rsq01,rcutoff2))
390             {
391
392             /* REACTION-FIELD ELECTROSTATICS */
393             velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
394             felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
395
396             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
397
398             /* Update potential sum for this i atom from the interaction with this j atom. */
399             velec            = _mm256_and_pd(velec,cutoff_mask);
400             velecsum         = _mm256_add_pd(velecsum,velec);
401
402             fscal            = felec;
403
404             fscal            = _mm256_and_pd(fscal,cutoff_mask);
405
406             /* Calculate temporary vectorial force */
407             tx               = _mm256_mul_pd(fscal,dx01);
408             ty               = _mm256_mul_pd(fscal,dy01);
409             tz               = _mm256_mul_pd(fscal,dz01);
410
411             /* Update vectorial force */
412             fix0             = _mm256_add_pd(fix0,tx);
413             fiy0             = _mm256_add_pd(fiy0,ty);
414             fiz0             = _mm256_add_pd(fiz0,tz);
415
416             fjx1             = _mm256_add_pd(fjx1,tx);
417             fjy1             = _mm256_add_pd(fjy1,ty);
418             fjz1             = _mm256_add_pd(fjz1,tz);
419
420             }
421
422             /**************************
423              * CALCULATE INTERACTIONS *
424              **************************/
425
426             if (gmx_mm256_any_lt(rsq02,rcutoff2))
427             {
428
429             /* REACTION-FIELD ELECTROSTATICS */
430             velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
431             felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
432
433             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
434
435             /* Update potential sum for this i atom from the interaction with this j atom. */
436             velec            = _mm256_and_pd(velec,cutoff_mask);
437             velecsum         = _mm256_add_pd(velecsum,velec);
438
439             fscal            = felec;
440
441             fscal            = _mm256_and_pd(fscal,cutoff_mask);
442
443             /* Calculate temporary vectorial force */
444             tx               = _mm256_mul_pd(fscal,dx02);
445             ty               = _mm256_mul_pd(fscal,dy02);
446             tz               = _mm256_mul_pd(fscal,dz02);
447
448             /* Update vectorial force */
449             fix0             = _mm256_add_pd(fix0,tx);
450             fiy0             = _mm256_add_pd(fiy0,ty);
451             fiz0             = _mm256_add_pd(fiz0,tz);
452
453             fjx2             = _mm256_add_pd(fjx2,tx);
454             fjy2             = _mm256_add_pd(fjy2,ty);
455             fjz2             = _mm256_add_pd(fjz2,tz);
456
457             }
458
459             /**************************
460              * CALCULATE INTERACTIONS *
461              **************************/
462
463             if (gmx_mm256_any_lt(rsq10,rcutoff2))
464             {
465
466             /* REACTION-FIELD ELECTROSTATICS */
467             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
468             felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
469
470             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
471
472             /* Update potential sum for this i atom from the interaction with this j atom. */
473             velec            = _mm256_and_pd(velec,cutoff_mask);
474             velecsum         = _mm256_add_pd(velecsum,velec);
475
476             fscal            = felec;
477
478             fscal            = _mm256_and_pd(fscal,cutoff_mask);
479
480             /* Calculate temporary vectorial force */
481             tx               = _mm256_mul_pd(fscal,dx10);
482             ty               = _mm256_mul_pd(fscal,dy10);
483             tz               = _mm256_mul_pd(fscal,dz10);
484
485             /* Update vectorial force */
486             fix1             = _mm256_add_pd(fix1,tx);
487             fiy1             = _mm256_add_pd(fiy1,ty);
488             fiz1             = _mm256_add_pd(fiz1,tz);
489
490             fjx0             = _mm256_add_pd(fjx0,tx);
491             fjy0             = _mm256_add_pd(fjy0,ty);
492             fjz0             = _mm256_add_pd(fjz0,tz);
493
494             }
495
496             /**************************
497              * CALCULATE INTERACTIONS *
498              **************************/
499
500             if (gmx_mm256_any_lt(rsq11,rcutoff2))
501             {
502
503             /* REACTION-FIELD ELECTROSTATICS */
504             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
505             felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
506
507             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
508
509             /* Update potential sum for this i atom from the interaction with this j atom. */
510             velec            = _mm256_and_pd(velec,cutoff_mask);
511             velecsum         = _mm256_add_pd(velecsum,velec);
512
513             fscal            = felec;
514
515             fscal            = _mm256_and_pd(fscal,cutoff_mask);
516
517             /* Calculate temporary vectorial force */
518             tx               = _mm256_mul_pd(fscal,dx11);
519             ty               = _mm256_mul_pd(fscal,dy11);
520             tz               = _mm256_mul_pd(fscal,dz11);
521
522             /* Update vectorial force */
523             fix1             = _mm256_add_pd(fix1,tx);
524             fiy1             = _mm256_add_pd(fiy1,ty);
525             fiz1             = _mm256_add_pd(fiz1,tz);
526
527             fjx1             = _mm256_add_pd(fjx1,tx);
528             fjy1             = _mm256_add_pd(fjy1,ty);
529             fjz1             = _mm256_add_pd(fjz1,tz);
530
531             }
532
533             /**************************
534              * CALCULATE INTERACTIONS *
535              **************************/
536
537             if (gmx_mm256_any_lt(rsq12,rcutoff2))
538             {
539
540             /* REACTION-FIELD ELECTROSTATICS */
541             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
542             felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
543
544             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
545
546             /* Update potential sum for this i atom from the interaction with this j atom. */
547             velec            = _mm256_and_pd(velec,cutoff_mask);
548             velecsum         = _mm256_add_pd(velecsum,velec);
549
550             fscal            = felec;
551
552             fscal            = _mm256_and_pd(fscal,cutoff_mask);
553
554             /* Calculate temporary vectorial force */
555             tx               = _mm256_mul_pd(fscal,dx12);
556             ty               = _mm256_mul_pd(fscal,dy12);
557             tz               = _mm256_mul_pd(fscal,dz12);
558
559             /* Update vectorial force */
560             fix1             = _mm256_add_pd(fix1,tx);
561             fiy1             = _mm256_add_pd(fiy1,ty);
562             fiz1             = _mm256_add_pd(fiz1,tz);
563
564             fjx2             = _mm256_add_pd(fjx2,tx);
565             fjy2             = _mm256_add_pd(fjy2,ty);
566             fjz2             = _mm256_add_pd(fjz2,tz);
567
568             }
569
570             /**************************
571              * CALCULATE INTERACTIONS *
572              **************************/
573
574             if (gmx_mm256_any_lt(rsq20,rcutoff2))
575             {
576
577             /* REACTION-FIELD ELECTROSTATICS */
578             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
579             felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
580
581             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
582
583             /* Update potential sum for this i atom from the interaction with this j atom. */
584             velec            = _mm256_and_pd(velec,cutoff_mask);
585             velecsum         = _mm256_add_pd(velecsum,velec);
586
587             fscal            = felec;
588
589             fscal            = _mm256_and_pd(fscal,cutoff_mask);
590
591             /* Calculate temporary vectorial force */
592             tx               = _mm256_mul_pd(fscal,dx20);
593             ty               = _mm256_mul_pd(fscal,dy20);
594             tz               = _mm256_mul_pd(fscal,dz20);
595
596             /* Update vectorial force */
597             fix2             = _mm256_add_pd(fix2,tx);
598             fiy2             = _mm256_add_pd(fiy2,ty);
599             fiz2             = _mm256_add_pd(fiz2,tz);
600
601             fjx0             = _mm256_add_pd(fjx0,tx);
602             fjy0             = _mm256_add_pd(fjy0,ty);
603             fjz0             = _mm256_add_pd(fjz0,tz);
604
605             }
606
607             /**************************
608              * CALCULATE INTERACTIONS *
609              **************************/
610
611             if (gmx_mm256_any_lt(rsq21,rcutoff2))
612             {
613
614             /* REACTION-FIELD ELECTROSTATICS */
615             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
616             felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
617
618             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
619
620             /* Update potential sum for this i atom from the interaction with this j atom. */
621             velec            = _mm256_and_pd(velec,cutoff_mask);
622             velecsum         = _mm256_add_pd(velecsum,velec);
623
624             fscal            = felec;
625
626             fscal            = _mm256_and_pd(fscal,cutoff_mask);
627
628             /* Calculate temporary vectorial force */
629             tx               = _mm256_mul_pd(fscal,dx21);
630             ty               = _mm256_mul_pd(fscal,dy21);
631             tz               = _mm256_mul_pd(fscal,dz21);
632
633             /* Update vectorial force */
634             fix2             = _mm256_add_pd(fix2,tx);
635             fiy2             = _mm256_add_pd(fiy2,ty);
636             fiz2             = _mm256_add_pd(fiz2,tz);
637
638             fjx1             = _mm256_add_pd(fjx1,tx);
639             fjy1             = _mm256_add_pd(fjy1,ty);
640             fjz1             = _mm256_add_pd(fjz1,tz);
641
642             }
643
644             /**************************
645              * CALCULATE INTERACTIONS *
646              **************************/
647
648             if (gmx_mm256_any_lt(rsq22,rcutoff2))
649             {
650
651             /* REACTION-FIELD ELECTROSTATICS */
652             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
653             felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
654
655             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
656
657             /* Update potential sum for this i atom from the interaction with this j atom. */
658             velec            = _mm256_and_pd(velec,cutoff_mask);
659             velecsum         = _mm256_add_pd(velecsum,velec);
660
661             fscal            = felec;
662
663             fscal            = _mm256_and_pd(fscal,cutoff_mask);
664
665             /* Calculate temporary vectorial force */
666             tx               = _mm256_mul_pd(fscal,dx22);
667             ty               = _mm256_mul_pd(fscal,dy22);
668             tz               = _mm256_mul_pd(fscal,dz22);
669
670             /* Update vectorial force */
671             fix2             = _mm256_add_pd(fix2,tx);
672             fiy2             = _mm256_add_pd(fiy2,ty);
673             fiz2             = _mm256_add_pd(fiz2,tz);
674
675             fjx2             = _mm256_add_pd(fjx2,tx);
676             fjy2             = _mm256_add_pd(fjy2,ty);
677             fjz2             = _mm256_add_pd(fjz2,tz);
678
679             }
680
681             fjptrA             = f+j_coord_offsetA;
682             fjptrB             = f+j_coord_offsetB;
683             fjptrC             = f+j_coord_offsetC;
684             fjptrD             = f+j_coord_offsetD;
685
686             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
687                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
688
689             /* Inner loop uses 360 flops */
690         }
691
692         if(jidx<j_index_end)
693         {
694
695             /* Get j neighbor index, and coordinate index */
696             jnrlistA         = jjnr[jidx];
697             jnrlistB         = jjnr[jidx+1];
698             jnrlistC         = jjnr[jidx+2];
699             jnrlistD         = jjnr[jidx+3];
700             /* Sign of each element will be negative for non-real atoms.
701              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
702              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
703              */
704             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
705
706             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
707             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
708             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
709
710             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
711             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
712             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
713             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
714             j_coord_offsetA  = DIM*jnrA;
715             j_coord_offsetB  = DIM*jnrB;
716             j_coord_offsetC  = DIM*jnrC;
717             j_coord_offsetD  = DIM*jnrD;
718
719             /* load j atom coordinates */
720             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
721                                                  x+j_coord_offsetC,x+j_coord_offsetD,
722                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
723
724             /* Calculate displacement vector */
725             dx00             = _mm256_sub_pd(ix0,jx0);
726             dy00             = _mm256_sub_pd(iy0,jy0);
727             dz00             = _mm256_sub_pd(iz0,jz0);
728             dx01             = _mm256_sub_pd(ix0,jx1);
729             dy01             = _mm256_sub_pd(iy0,jy1);
730             dz01             = _mm256_sub_pd(iz0,jz1);
731             dx02             = _mm256_sub_pd(ix0,jx2);
732             dy02             = _mm256_sub_pd(iy0,jy2);
733             dz02             = _mm256_sub_pd(iz0,jz2);
734             dx10             = _mm256_sub_pd(ix1,jx0);
735             dy10             = _mm256_sub_pd(iy1,jy0);
736             dz10             = _mm256_sub_pd(iz1,jz0);
737             dx11             = _mm256_sub_pd(ix1,jx1);
738             dy11             = _mm256_sub_pd(iy1,jy1);
739             dz11             = _mm256_sub_pd(iz1,jz1);
740             dx12             = _mm256_sub_pd(ix1,jx2);
741             dy12             = _mm256_sub_pd(iy1,jy2);
742             dz12             = _mm256_sub_pd(iz1,jz2);
743             dx20             = _mm256_sub_pd(ix2,jx0);
744             dy20             = _mm256_sub_pd(iy2,jy0);
745             dz20             = _mm256_sub_pd(iz2,jz0);
746             dx21             = _mm256_sub_pd(ix2,jx1);
747             dy21             = _mm256_sub_pd(iy2,jy1);
748             dz21             = _mm256_sub_pd(iz2,jz1);
749             dx22             = _mm256_sub_pd(ix2,jx2);
750             dy22             = _mm256_sub_pd(iy2,jy2);
751             dz22             = _mm256_sub_pd(iz2,jz2);
752
753             /* Calculate squared distance and things based on it */
754             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
755             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
756             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
757             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
758             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
759             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
760             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
761             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
762             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
763
764             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
765             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
766             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
767             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
768             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
769             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
770             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
771             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
772             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
773
774             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
775             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
776             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
777             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
778             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
779             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
780             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
781             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
782             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
783
784             fjx0             = _mm256_setzero_pd();
785             fjy0             = _mm256_setzero_pd();
786             fjz0             = _mm256_setzero_pd();
787             fjx1             = _mm256_setzero_pd();
788             fjy1             = _mm256_setzero_pd();
789             fjz1             = _mm256_setzero_pd();
790             fjx2             = _mm256_setzero_pd();
791             fjy2             = _mm256_setzero_pd();
792             fjz2             = _mm256_setzero_pd();
793
794             /**************************
795              * CALCULATE INTERACTIONS *
796              **************************/
797
798             if (gmx_mm256_any_lt(rsq00,rcutoff2))
799             {
800
801             r00              = _mm256_mul_pd(rsq00,rinv00);
802             r00              = _mm256_andnot_pd(dummy_mask,r00);
803
804             /* Calculate table index by multiplying r with table scale and truncate to integer */
805             rt               = _mm256_mul_pd(r00,vftabscale);
806             vfitab           = _mm256_cvttpd_epi32(rt);
807             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
808             vfitab           = _mm_slli_epi32(vfitab,3);
809
810             /* REACTION-FIELD ELECTROSTATICS */
811             velec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_add_pd(rinv00,_mm256_mul_pd(krf,rsq00)),crf));
812             felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
813
814             /* CUBIC SPLINE TABLE DISPERSION */
815             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
816             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
817             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
818             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
819             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
820             Heps             = _mm256_mul_pd(vfeps,H);
821             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
822             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
823             vvdw6            = _mm256_mul_pd(c6_00,VV);
824             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
825             fvdw6            = _mm256_mul_pd(c6_00,FF);
826
827             /* CUBIC SPLINE TABLE REPULSION */
828             vfitab           = _mm_add_epi32(vfitab,ifour);
829             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
830             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
831             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
832             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
833             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
834             Heps             = _mm256_mul_pd(vfeps,H);
835             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
836             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
837             vvdw12           = _mm256_mul_pd(c12_00,VV);
838             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
839             fvdw12           = _mm256_mul_pd(c12_00,FF);
840             vvdw             = _mm256_add_pd(vvdw12,vvdw6);
841             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
842
843             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
844
845             /* Update potential sum for this i atom from the interaction with this j atom. */
846             velec            = _mm256_and_pd(velec,cutoff_mask);
847             velec            = _mm256_andnot_pd(dummy_mask,velec);
848             velecsum         = _mm256_add_pd(velecsum,velec);
849             vvdw             = _mm256_and_pd(vvdw,cutoff_mask);
850             vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
851             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
852
853             fscal            = _mm256_add_pd(felec,fvdw);
854
855             fscal            = _mm256_and_pd(fscal,cutoff_mask);
856
857             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
858
859             /* Calculate temporary vectorial force */
860             tx               = _mm256_mul_pd(fscal,dx00);
861             ty               = _mm256_mul_pd(fscal,dy00);
862             tz               = _mm256_mul_pd(fscal,dz00);
863
864             /* Update vectorial force */
865             fix0             = _mm256_add_pd(fix0,tx);
866             fiy0             = _mm256_add_pd(fiy0,ty);
867             fiz0             = _mm256_add_pd(fiz0,tz);
868
869             fjx0             = _mm256_add_pd(fjx0,tx);
870             fjy0             = _mm256_add_pd(fjy0,ty);
871             fjz0             = _mm256_add_pd(fjz0,tz);
872
873             }
874
875             /**************************
876              * CALCULATE INTERACTIONS *
877              **************************/
878
879             if (gmx_mm256_any_lt(rsq01,rcutoff2))
880             {
881
882             /* REACTION-FIELD ELECTROSTATICS */
883             velec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_add_pd(rinv01,_mm256_mul_pd(krf,rsq01)),crf));
884             felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
885
886             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
887
888             /* Update potential sum for this i atom from the interaction with this j atom. */
889             velec            = _mm256_and_pd(velec,cutoff_mask);
890             velec            = _mm256_andnot_pd(dummy_mask,velec);
891             velecsum         = _mm256_add_pd(velecsum,velec);
892
893             fscal            = felec;
894
895             fscal            = _mm256_and_pd(fscal,cutoff_mask);
896
897             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
898
899             /* Calculate temporary vectorial force */
900             tx               = _mm256_mul_pd(fscal,dx01);
901             ty               = _mm256_mul_pd(fscal,dy01);
902             tz               = _mm256_mul_pd(fscal,dz01);
903
904             /* Update vectorial force */
905             fix0             = _mm256_add_pd(fix0,tx);
906             fiy0             = _mm256_add_pd(fiy0,ty);
907             fiz0             = _mm256_add_pd(fiz0,tz);
908
909             fjx1             = _mm256_add_pd(fjx1,tx);
910             fjy1             = _mm256_add_pd(fjy1,ty);
911             fjz1             = _mm256_add_pd(fjz1,tz);
912
913             }
914
915             /**************************
916              * CALCULATE INTERACTIONS *
917              **************************/
918
919             if (gmx_mm256_any_lt(rsq02,rcutoff2))
920             {
921
922             /* REACTION-FIELD ELECTROSTATICS */
923             velec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_add_pd(rinv02,_mm256_mul_pd(krf,rsq02)),crf));
924             felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
925
926             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
927
928             /* Update potential sum for this i atom from the interaction with this j atom. */
929             velec            = _mm256_and_pd(velec,cutoff_mask);
930             velec            = _mm256_andnot_pd(dummy_mask,velec);
931             velecsum         = _mm256_add_pd(velecsum,velec);
932
933             fscal            = felec;
934
935             fscal            = _mm256_and_pd(fscal,cutoff_mask);
936
937             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
938
939             /* Calculate temporary vectorial force */
940             tx               = _mm256_mul_pd(fscal,dx02);
941             ty               = _mm256_mul_pd(fscal,dy02);
942             tz               = _mm256_mul_pd(fscal,dz02);
943
944             /* Update vectorial force */
945             fix0             = _mm256_add_pd(fix0,tx);
946             fiy0             = _mm256_add_pd(fiy0,ty);
947             fiz0             = _mm256_add_pd(fiz0,tz);
948
949             fjx2             = _mm256_add_pd(fjx2,tx);
950             fjy2             = _mm256_add_pd(fjy2,ty);
951             fjz2             = _mm256_add_pd(fjz2,tz);
952
953             }
954
955             /**************************
956              * CALCULATE INTERACTIONS *
957              **************************/
958
959             if (gmx_mm256_any_lt(rsq10,rcutoff2))
960             {
961
962             /* REACTION-FIELD ELECTROSTATICS */
963             velec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_add_pd(rinv10,_mm256_mul_pd(krf,rsq10)),crf));
964             felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
965
966             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
967
968             /* Update potential sum for this i atom from the interaction with this j atom. */
969             velec            = _mm256_and_pd(velec,cutoff_mask);
970             velec            = _mm256_andnot_pd(dummy_mask,velec);
971             velecsum         = _mm256_add_pd(velecsum,velec);
972
973             fscal            = felec;
974
975             fscal            = _mm256_and_pd(fscal,cutoff_mask);
976
977             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
978
979             /* Calculate temporary vectorial force */
980             tx               = _mm256_mul_pd(fscal,dx10);
981             ty               = _mm256_mul_pd(fscal,dy10);
982             tz               = _mm256_mul_pd(fscal,dz10);
983
984             /* Update vectorial force */
985             fix1             = _mm256_add_pd(fix1,tx);
986             fiy1             = _mm256_add_pd(fiy1,ty);
987             fiz1             = _mm256_add_pd(fiz1,tz);
988
989             fjx0             = _mm256_add_pd(fjx0,tx);
990             fjy0             = _mm256_add_pd(fjy0,ty);
991             fjz0             = _mm256_add_pd(fjz0,tz);
992
993             }
994
995             /**************************
996              * CALCULATE INTERACTIONS *
997              **************************/
998
999             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1000             {
1001
1002             /* REACTION-FIELD ELECTROSTATICS */
1003             velec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_add_pd(rinv11,_mm256_mul_pd(krf,rsq11)),crf));
1004             felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
1005
1006             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
1007
1008             /* Update potential sum for this i atom from the interaction with this j atom. */
1009             velec            = _mm256_and_pd(velec,cutoff_mask);
1010             velec            = _mm256_andnot_pd(dummy_mask,velec);
1011             velecsum         = _mm256_add_pd(velecsum,velec);
1012
1013             fscal            = felec;
1014
1015             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1016
1017             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1018
1019             /* Calculate temporary vectorial force */
1020             tx               = _mm256_mul_pd(fscal,dx11);
1021             ty               = _mm256_mul_pd(fscal,dy11);
1022             tz               = _mm256_mul_pd(fscal,dz11);
1023
1024             /* Update vectorial force */
1025             fix1             = _mm256_add_pd(fix1,tx);
1026             fiy1             = _mm256_add_pd(fiy1,ty);
1027             fiz1             = _mm256_add_pd(fiz1,tz);
1028
1029             fjx1             = _mm256_add_pd(fjx1,tx);
1030             fjy1             = _mm256_add_pd(fjy1,ty);
1031             fjz1             = _mm256_add_pd(fjz1,tz);
1032
1033             }
1034
1035             /**************************
1036              * CALCULATE INTERACTIONS *
1037              **************************/
1038
1039             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1040             {
1041
1042             /* REACTION-FIELD ELECTROSTATICS */
1043             velec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_add_pd(rinv12,_mm256_mul_pd(krf,rsq12)),crf));
1044             felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
1045
1046             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
1047
1048             /* Update potential sum for this i atom from the interaction with this j atom. */
1049             velec            = _mm256_and_pd(velec,cutoff_mask);
1050             velec            = _mm256_andnot_pd(dummy_mask,velec);
1051             velecsum         = _mm256_add_pd(velecsum,velec);
1052
1053             fscal            = felec;
1054
1055             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1056
1057             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1058
1059             /* Calculate temporary vectorial force */
1060             tx               = _mm256_mul_pd(fscal,dx12);
1061             ty               = _mm256_mul_pd(fscal,dy12);
1062             tz               = _mm256_mul_pd(fscal,dz12);
1063
1064             /* Update vectorial force */
1065             fix1             = _mm256_add_pd(fix1,tx);
1066             fiy1             = _mm256_add_pd(fiy1,ty);
1067             fiz1             = _mm256_add_pd(fiz1,tz);
1068
1069             fjx2             = _mm256_add_pd(fjx2,tx);
1070             fjy2             = _mm256_add_pd(fjy2,ty);
1071             fjz2             = _mm256_add_pd(fjz2,tz);
1072
1073             }
1074
1075             /**************************
1076              * CALCULATE INTERACTIONS *
1077              **************************/
1078
1079             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1080             {
1081
1082             /* REACTION-FIELD ELECTROSTATICS */
1083             velec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_add_pd(rinv20,_mm256_mul_pd(krf,rsq20)),crf));
1084             felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
1085
1086             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1087
1088             /* Update potential sum for this i atom from the interaction with this j atom. */
1089             velec            = _mm256_and_pd(velec,cutoff_mask);
1090             velec            = _mm256_andnot_pd(dummy_mask,velec);
1091             velecsum         = _mm256_add_pd(velecsum,velec);
1092
1093             fscal            = felec;
1094
1095             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1096
1097             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1098
1099             /* Calculate temporary vectorial force */
1100             tx               = _mm256_mul_pd(fscal,dx20);
1101             ty               = _mm256_mul_pd(fscal,dy20);
1102             tz               = _mm256_mul_pd(fscal,dz20);
1103
1104             /* Update vectorial force */
1105             fix2             = _mm256_add_pd(fix2,tx);
1106             fiy2             = _mm256_add_pd(fiy2,ty);
1107             fiz2             = _mm256_add_pd(fiz2,tz);
1108
1109             fjx0             = _mm256_add_pd(fjx0,tx);
1110             fjy0             = _mm256_add_pd(fjy0,ty);
1111             fjz0             = _mm256_add_pd(fjz0,tz);
1112
1113             }
1114
1115             /**************************
1116              * CALCULATE INTERACTIONS *
1117              **************************/
1118
1119             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1120             {
1121
1122             /* REACTION-FIELD ELECTROSTATICS */
1123             velec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_add_pd(rinv21,_mm256_mul_pd(krf,rsq21)),crf));
1124             felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
1125
1126             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
1127
1128             /* Update potential sum for this i atom from the interaction with this j atom. */
1129             velec            = _mm256_and_pd(velec,cutoff_mask);
1130             velec            = _mm256_andnot_pd(dummy_mask,velec);
1131             velecsum         = _mm256_add_pd(velecsum,velec);
1132
1133             fscal            = felec;
1134
1135             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1136
1137             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1138
1139             /* Calculate temporary vectorial force */
1140             tx               = _mm256_mul_pd(fscal,dx21);
1141             ty               = _mm256_mul_pd(fscal,dy21);
1142             tz               = _mm256_mul_pd(fscal,dz21);
1143
1144             /* Update vectorial force */
1145             fix2             = _mm256_add_pd(fix2,tx);
1146             fiy2             = _mm256_add_pd(fiy2,ty);
1147             fiz2             = _mm256_add_pd(fiz2,tz);
1148
1149             fjx1             = _mm256_add_pd(fjx1,tx);
1150             fjy1             = _mm256_add_pd(fjy1,ty);
1151             fjz1             = _mm256_add_pd(fjz1,tz);
1152
1153             }
1154
1155             /**************************
1156              * CALCULATE INTERACTIONS *
1157              **************************/
1158
1159             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1160             {
1161
1162             /* REACTION-FIELD ELECTROSTATICS */
1163             velec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_add_pd(rinv22,_mm256_mul_pd(krf,rsq22)),crf));
1164             felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
1165
1166             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
1167
1168             /* Update potential sum for this i atom from the interaction with this j atom. */
1169             velec            = _mm256_and_pd(velec,cutoff_mask);
1170             velec            = _mm256_andnot_pd(dummy_mask,velec);
1171             velecsum         = _mm256_add_pd(velecsum,velec);
1172
1173             fscal            = felec;
1174
1175             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1176
1177             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1178
1179             /* Calculate temporary vectorial force */
1180             tx               = _mm256_mul_pd(fscal,dx22);
1181             ty               = _mm256_mul_pd(fscal,dy22);
1182             tz               = _mm256_mul_pd(fscal,dz22);
1183
1184             /* Update vectorial force */
1185             fix2             = _mm256_add_pd(fix2,tx);
1186             fiy2             = _mm256_add_pd(fiy2,ty);
1187             fiz2             = _mm256_add_pd(fiz2,tz);
1188
1189             fjx2             = _mm256_add_pd(fjx2,tx);
1190             fjy2             = _mm256_add_pd(fjy2,ty);
1191             fjz2             = _mm256_add_pd(fjz2,tz);
1192
1193             }
1194
1195             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1196             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1197             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1198             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1199
1200             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1201                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1202
1203             /* Inner loop uses 361 flops */
1204         }
1205
1206         /* End of innermost loop */
1207
1208         gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1209                                                  f+i_coord_offset,fshift+i_shift_offset);
1210
1211         ggid                        = gid[iidx];
1212         /* Update potential energies */
1213         gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1214         gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
1215
1216         /* Increment number of inner iterations */
1217         inneriter                  += j_index_end - j_index_start;
1218
1219         /* Outer loop uses 20 flops */
1220     }
1221
1222     /* Increment number of outer iterations */
1223     outeriter        += nri;
1224
1225     /* Update outer/inner flops */
1226
1227     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*361);
1228 }
1229 /*
1230  * Gromacs nonbonded kernel:   nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_F_avx_256_double
1231  * Electrostatics interaction: ReactionField
1232  * VdW interaction:            CubicSplineTable
1233  * Geometry:                   Water3-Water3
1234  * Calculate force/pot:        Force
1235  */
1236 void
1237 nb_kernel_ElecRFCut_VdwCSTab_GeomW3W3_F_avx_256_double
1238                     (t_nblist                    * gmx_restrict       nlist,
1239                      rvec                        * gmx_restrict          xx,
1240                      rvec                        * gmx_restrict          ff,
1241                      t_forcerec                  * gmx_restrict          fr,
1242                      t_mdatoms                   * gmx_restrict     mdatoms,
1243                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1244                      t_nrnb                      * gmx_restrict        nrnb)
1245 {
1246     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1247      * just 0 for non-waters.
1248      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1249      * jnr indices corresponding to data put in the four positions in the SIMD register.
1250      */
1251     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1252     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1253     int              jnrA,jnrB,jnrC,jnrD;
1254     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1255     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1256     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1257     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1258     real             rcutoff_scalar;
1259     real             *shiftvec,*fshift,*x,*f;
1260     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1261     real             scratch[4*DIM];
1262     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1263     real *           vdwioffsetptr0;
1264     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1265     real *           vdwioffsetptr1;
1266     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1267     real *           vdwioffsetptr2;
1268     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1269     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
1270     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1271     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1272     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1273     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1274     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1275     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1276     __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1277     __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1278     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1279     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1280     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1281     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1282     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1283     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1284     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
1285     real             *charge;
1286     int              nvdwtype;
1287     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1288     int              *vdwtype;
1289     real             *vdwparam;
1290     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
1291     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
1292     __m128i          vfitab;
1293     __m128i          ifour       = _mm_set1_epi32(4);
1294     __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
1295     real             *vftab;
1296     __m256d          dummy_mask,cutoff_mask;
1297     __m128           tmpmask0,tmpmask1;
1298     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1299     __m256d          one     = _mm256_set1_pd(1.0);
1300     __m256d          two     = _mm256_set1_pd(2.0);
1301     x                = xx[0];
1302     f                = ff[0];
1303
1304     nri              = nlist->nri;
1305     iinr             = nlist->iinr;
1306     jindex           = nlist->jindex;
1307     jjnr             = nlist->jjnr;
1308     shiftidx         = nlist->shift;
1309     gid              = nlist->gid;
1310     shiftvec         = fr->shift_vec[0];
1311     fshift           = fr->fshift[0];
1312     facel            = _mm256_set1_pd(fr->epsfac);
1313     charge           = mdatoms->chargeA;
1314     krf              = _mm256_set1_pd(fr->ic->k_rf);
1315     krf2             = _mm256_set1_pd(fr->ic->k_rf*2.0);
1316     crf              = _mm256_set1_pd(fr->ic->c_rf);
1317     nvdwtype         = fr->ntype;
1318     vdwparam         = fr->nbfp;
1319     vdwtype          = mdatoms->typeA;
1320
1321     vftab            = kernel_data->table_vdw->data;
1322     vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
1323
1324     /* Setup water-specific parameters */
1325     inr              = nlist->iinr[0];
1326     iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
1327     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1328     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1329     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1330
1331     jq0              = _mm256_set1_pd(charge[inr+0]);
1332     jq1              = _mm256_set1_pd(charge[inr+1]);
1333     jq2              = _mm256_set1_pd(charge[inr+2]);
1334     vdwjidx0A        = 2*vdwtype[inr+0];
1335     qq00             = _mm256_mul_pd(iq0,jq0);
1336     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
1337     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
1338     qq01             = _mm256_mul_pd(iq0,jq1);
1339     qq02             = _mm256_mul_pd(iq0,jq2);
1340     qq10             = _mm256_mul_pd(iq1,jq0);
1341     qq11             = _mm256_mul_pd(iq1,jq1);
1342     qq12             = _mm256_mul_pd(iq1,jq2);
1343     qq20             = _mm256_mul_pd(iq2,jq0);
1344     qq21             = _mm256_mul_pd(iq2,jq1);
1345     qq22             = _mm256_mul_pd(iq2,jq2);
1346
1347     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1348     rcutoff_scalar   = fr->rcoulomb;
1349     rcutoff          = _mm256_set1_pd(rcutoff_scalar);
1350     rcutoff2         = _mm256_mul_pd(rcutoff,rcutoff);
1351
1352     /* Avoid stupid compiler warnings */
1353     jnrA = jnrB = jnrC = jnrD = 0;
1354     j_coord_offsetA = 0;
1355     j_coord_offsetB = 0;
1356     j_coord_offsetC = 0;
1357     j_coord_offsetD = 0;
1358
1359     outeriter        = 0;
1360     inneriter        = 0;
1361
1362     for(iidx=0;iidx<4*DIM;iidx++)
1363     {
1364         scratch[iidx] = 0.0;
1365     }
1366
1367     /* Start outer loop over neighborlists */
1368     for(iidx=0; iidx<nri; iidx++)
1369     {
1370         /* Load shift vector for this list */
1371         i_shift_offset   = DIM*shiftidx[iidx];
1372
1373         /* Load limits for loop over neighbors */
1374         j_index_start    = jindex[iidx];
1375         j_index_end      = jindex[iidx+1];
1376
1377         /* Get outer coordinate index */
1378         inr              = iinr[iidx];
1379         i_coord_offset   = DIM*inr;
1380
1381         /* Load i particle coords and add shift vector */
1382         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1383                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1384
1385         fix0             = _mm256_setzero_pd();
1386         fiy0             = _mm256_setzero_pd();
1387         fiz0             = _mm256_setzero_pd();
1388         fix1             = _mm256_setzero_pd();
1389         fiy1             = _mm256_setzero_pd();
1390         fiz1             = _mm256_setzero_pd();
1391         fix2             = _mm256_setzero_pd();
1392         fiy2             = _mm256_setzero_pd();
1393         fiz2             = _mm256_setzero_pd();
1394
1395         /* Start inner kernel loop */
1396         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1397         {
1398
1399             /* Get j neighbor index, and coordinate index */
1400             jnrA             = jjnr[jidx];
1401             jnrB             = jjnr[jidx+1];
1402             jnrC             = jjnr[jidx+2];
1403             jnrD             = jjnr[jidx+3];
1404             j_coord_offsetA  = DIM*jnrA;
1405             j_coord_offsetB  = DIM*jnrB;
1406             j_coord_offsetC  = DIM*jnrC;
1407             j_coord_offsetD  = DIM*jnrD;
1408
1409             /* load j atom coordinates */
1410             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1411                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1412                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1413
1414             /* Calculate displacement vector */
1415             dx00             = _mm256_sub_pd(ix0,jx0);
1416             dy00             = _mm256_sub_pd(iy0,jy0);
1417             dz00             = _mm256_sub_pd(iz0,jz0);
1418             dx01             = _mm256_sub_pd(ix0,jx1);
1419             dy01             = _mm256_sub_pd(iy0,jy1);
1420             dz01             = _mm256_sub_pd(iz0,jz1);
1421             dx02             = _mm256_sub_pd(ix0,jx2);
1422             dy02             = _mm256_sub_pd(iy0,jy2);
1423             dz02             = _mm256_sub_pd(iz0,jz2);
1424             dx10             = _mm256_sub_pd(ix1,jx0);
1425             dy10             = _mm256_sub_pd(iy1,jy0);
1426             dz10             = _mm256_sub_pd(iz1,jz0);
1427             dx11             = _mm256_sub_pd(ix1,jx1);
1428             dy11             = _mm256_sub_pd(iy1,jy1);
1429             dz11             = _mm256_sub_pd(iz1,jz1);
1430             dx12             = _mm256_sub_pd(ix1,jx2);
1431             dy12             = _mm256_sub_pd(iy1,jy2);
1432             dz12             = _mm256_sub_pd(iz1,jz2);
1433             dx20             = _mm256_sub_pd(ix2,jx0);
1434             dy20             = _mm256_sub_pd(iy2,jy0);
1435             dz20             = _mm256_sub_pd(iz2,jz0);
1436             dx21             = _mm256_sub_pd(ix2,jx1);
1437             dy21             = _mm256_sub_pd(iy2,jy1);
1438             dz21             = _mm256_sub_pd(iz2,jz1);
1439             dx22             = _mm256_sub_pd(ix2,jx2);
1440             dy22             = _mm256_sub_pd(iy2,jy2);
1441             dz22             = _mm256_sub_pd(iz2,jz2);
1442
1443             /* Calculate squared distance and things based on it */
1444             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1445             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1446             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1447             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1448             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1449             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1450             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1451             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1452             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1453
1454             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1455             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
1456             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
1457             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1458             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
1459             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
1460             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1461             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
1462             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
1463
1464             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1465             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
1466             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
1467             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1468             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1469             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1470             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1471             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1472             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1473
1474             fjx0             = _mm256_setzero_pd();
1475             fjy0             = _mm256_setzero_pd();
1476             fjz0             = _mm256_setzero_pd();
1477             fjx1             = _mm256_setzero_pd();
1478             fjy1             = _mm256_setzero_pd();
1479             fjz1             = _mm256_setzero_pd();
1480             fjx2             = _mm256_setzero_pd();
1481             fjy2             = _mm256_setzero_pd();
1482             fjz2             = _mm256_setzero_pd();
1483
1484             /**************************
1485              * CALCULATE INTERACTIONS *
1486              **************************/
1487
1488             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1489             {
1490
1491             r00              = _mm256_mul_pd(rsq00,rinv00);
1492
1493             /* Calculate table index by multiplying r with table scale and truncate to integer */
1494             rt               = _mm256_mul_pd(r00,vftabscale);
1495             vfitab           = _mm256_cvttpd_epi32(rt);
1496             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1497             vfitab           = _mm_slli_epi32(vfitab,3);
1498
1499             /* REACTION-FIELD ELECTROSTATICS */
1500             felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
1501
1502             /* CUBIC SPLINE TABLE DISPERSION */
1503             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1504             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1505             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1506             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1507             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1508             Heps             = _mm256_mul_pd(vfeps,H);
1509             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1510             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1511             fvdw6            = _mm256_mul_pd(c6_00,FF);
1512
1513             /* CUBIC SPLINE TABLE REPULSION */
1514             vfitab           = _mm_add_epi32(vfitab,ifour);
1515             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1516             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1517             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1518             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1519             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1520             Heps             = _mm256_mul_pd(vfeps,H);
1521             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1522             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1523             fvdw12           = _mm256_mul_pd(c12_00,FF);
1524             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1525
1526             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1527
1528             fscal            = _mm256_add_pd(felec,fvdw);
1529
1530             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1531
1532             /* Calculate temporary vectorial force */
1533             tx               = _mm256_mul_pd(fscal,dx00);
1534             ty               = _mm256_mul_pd(fscal,dy00);
1535             tz               = _mm256_mul_pd(fscal,dz00);
1536
1537             /* Update vectorial force */
1538             fix0             = _mm256_add_pd(fix0,tx);
1539             fiy0             = _mm256_add_pd(fiy0,ty);
1540             fiz0             = _mm256_add_pd(fiz0,tz);
1541
1542             fjx0             = _mm256_add_pd(fjx0,tx);
1543             fjy0             = _mm256_add_pd(fjy0,ty);
1544             fjz0             = _mm256_add_pd(fjz0,tz);
1545
1546             }
1547
1548             /**************************
1549              * CALCULATE INTERACTIONS *
1550              **************************/
1551
1552             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1553             {
1554
1555             /* REACTION-FIELD ELECTROSTATICS */
1556             felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
1557
1558             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
1559
1560             fscal            = felec;
1561
1562             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1563
1564             /* Calculate temporary vectorial force */
1565             tx               = _mm256_mul_pd(fscal,dx01);
1566             ty               = _mm256_mul_pd(fscal,dy01);
1567             tz               = _mm256_mul_pd(fscal,dz01);
1568
1569             /* Update vectorial force */
1570             fix0             = _mm256_add_pd(fix0,tx);
1571             fiy0             = _mm256_add_pd(fiy0,ty);
1572             fiz0             = _mm256_add_pd(fiz0,tz);
1573
1574             fjx1             = _mm256_add_pd(fjx1,tx);
1575             fjy1             = _mm256_add_pd(fjy1,ty);
1576             fjz1             = _mm256_add_pd(fjz1,tz);
1577
1578             }
1579
1580             /**************************
1581              * CALCULATE INTERACTIONS *
1582              **************************/
1583
1584             if (gmx_mm256_any_lt(rsq02,rcutoff2))
1585             {
1586
1587             /* REACTION-FIELD ELECTROSTATICS */
1588             felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
1589
1590             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
1591
1592             fscal            = felec;
1593
1594             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1595
1596             /* Calculate temporary vectorial force */
1597             tx               = _mm256_mul_pd(fscal,dx02);
1598             ty               = _mm256_mul_pd(fscal,dy02);
1599             tz               = _mm256_mul_pd(fscal,dz02);
1600
1601             /* Update vectorial force */
1602             fix0             = _mm256_add_pd(fix0,tx);
1603             fiy0             = _mm256_add_pd(fiy0,ty);
1604             fiz0             = _mm256_add_pd(fiz0,tz);
1605
1606             fjx2             = _mm256_add_pd(fjx2,tx);
1607             fjy2             = _mm256_add_pd(fjy2,ty);
1608             fjz2             = _mm256_add_pd(fjz2,tz);
1609
1610             }
1611
1612             /**************************
1613              * CALCULATE INTERACTIONS *
1614              **************************/
1615
1616             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1617             {
1618
1619             /* REACTION-FIELD ELECTROSTATICS */
1620             felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
1621
1622             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
1623
1624             fscal            = felec;
1625
1626             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1627
1628             /* Calculate temporary vectorial force */
1629             tx               = _mm256_mul_pd(fscal,dx10);
1630             ty               = _mm256_mul_pd(fscal,dy10);
1631             tz               = _mm256_mul_pd(fscal,dz10);
1632
1633             /* Update vectorial force */
1634             fix1             = _mm256_add_pd(fix1,tx);
1635             fiy1             = _mm256_add_pd(fiy1,ty);
1636             fiz1             = _mm256_add_pd(fiz1,tz);
1637
1638             fjx0             = _mm256_add_pd(fjx0,tx);
1639             fjy0             = _mm256_add_pd(fjy0,ty);
1640             fjz0             = _mm256_add_pd(fjz0,tz);
1641
1642             }
1643
1644             /**************************
1645              * CALCULATE INTERACTIONS *
1646              **************************/
1647
1648             if (gmx_mm256_any_lt(rsq11,rcutoff2))
1649             {
1650
1651             /* REACTION-FIELD ELECTROSTATICS */
1652             felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
1653
1654             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
1655
1656             fscal            = felec;
1657
1658             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1659
1660             /* Calculate temporary vectorial force */
1661             tx               = _mm256_mul_pd(fscal,dx11);
1662             ty               = _mm256_mul_pd(fscal,dy11);
1663             tz               = _mm256_mul_pd(fscal,dz11);
1664
1665             /* Update vectorial force */
1666             fix1             = _mm256_add_pd(fix1,tx);
1667             fiy1             = _mm256_add_pd(fiy1,ty);
1668             fiz1             = _mm256_add_pd(fiz1,tz);
1669
1670             fjx1             = _mm256_add_pd(fjx1,tx);
1671             fjy1             = _mm256_add_pd(fjy1,ty);
1672             fjz1             = _mm256_add_pd(fjz1,tz);
1673
1674             }
1675
1676             /**************************
1677              * CALCULATE INTERACTIONS *
1678              **************************/
1679
1680             if (gmx_mm256_any_lt(rsq12,rcutoff2))
1681             {
1682
1683             /* REACTION-FIELD ELECTROSTATICS */
1684             felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
1685
1686             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
1687
1688             fscal            = felec;
1689
1690             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1691
1692             /* Calculate temporary vectorial force */
1693             tx               = _mm256_mul_pd(fscal,dx12);
1694             ty               = _mm256_mul_pd(fscal,dy12);
1695             tz               = _mm256_mul_pd(fscal,dz12);
1696
1697             /* Update vectorial force */
1698             fix1             = _mm256_add_pd(fix1,tx);
1699             fiy1             = _mm256_add_pd(fiy1,ty);
1700             fiz1             = _mm256_add_pd(fiz1,tz);
1701
1702             fjx2             = _mm256_add_pd(fjx2,tx);
1703             fjy2             = _mm256_add_pd(fjy2,ty);
1704             fjz2             = _mm256_add_pd(fjz2,tz);
1705
1706             }
1707
1708             /**************************
1709              * CALCULATE INTERACTIONS *
1710              **************************/
1711
1712             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1713             {
1714
1715             /* REACTION-FIELD ELECTROSTATICS */
1716             felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
1717
1718             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
1719
1720             fscal            = felec;
1721
1722             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1723
1724             /* Calculate temporary vectorial force */
1725             tx               = _mm256_mul_pd(fscal,dx20);
1726             ty               = _mm256_mul_pd(fscal,dy20);
1727             tz               = _mm256_mul_pd(fscal,dz20);
1728
1729             /* Update vectorial force */
1730             fix2             = _mm256_add_pd(fix2,tx);
1731             fiy2             = _mm256_add_pd(fiy2,ty);
1732             fiz2             = _mm256_add_pd(fiz2,tz);
1733
1734             fjx0             = _mm256_add_pd(fjx0,tx);
1735             fjy0             = _mm256_add_pd(fjy0,ty);
1736             fjz0             = _mm256_add_pd(fjz0,tz);
1737
1738             }
1739
1740             /**************************
1741              * CALCULATE INTERACTIONS *
1742              **************************/
1743
1744             if (gmx_mm256_any_lt(rsq21,rcutoff2))
1745             {
1746
1747             /* REACTION-FIELD ELECTROSTATICS */
1748             felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
1749
1750             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
1751
1752             fscal            = felec;
1753
1754             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1755
1756             /* Calculate temporary vectorial force */
1757             tx               = _mm256_mul_pd(fscal,dx21);
1758             ty               = _mm256_mul_pd(fscal,dy21);
1759             tz               = _mm256_mul_pd(fscal,dz21);
1760
1761             /* Update vectorial force */
1762             fix2             = _mm256_add_pd(fix2,tx);
1763             fiy2             = _mm256_add_pd(fiy2,ty);
1764             fiz2             = _mm256_add_pd(fiz2,tz);
1765
1766             fjx1             = _mm256_add_pd(fjx1,tx);
1767             fjy1             = _mm256_add_pd(fjy1,ty);
1768             fjz1             = _mm256_add_pd(fjz1,tz);
1769
1770             }
1771
1772             /**************************
1773              * CALCULATE INTERACTIONS *
1774              **************************/
1775
1776             if (gmx_mm256_any_lt(rsq22,rcutoff2))
1777             {
1778
1779             /* REACTION-FIELD ELECTROSTATICS */
1780             felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
1781
1782             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
1783
1784             fscal            = felec;
1785
1786             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1787
1788             /* Calculate temporary vectorial force */
1789             tx               = _mm256_mul_pd(fscal,dx22);
1790             ty               = _mm256_mul_pd(fscal,dy22);
1791             tz               = _mm256_mul_pd(fscal,dz22);
1792
1793             /* Update vectorial force */
1794             fix2             = _mm256_add_pd(fix2,tx);
1795             fiy2             = _mm256_add_pd(fiy2,ty);
1796             fiz2             = _mm256_add_pd(fiz2,tz);
1797
1798             fjx2             = _mm256_add_pd(fjx2,tx);
1799             fjy2             = _mm256_add_pd(fjy2,ty);
1800             fjz2             = _mm256_add_pd(fjz2,tz);
1801
1802             }
1803
1804             fjptrA             = f+j_coord_offsetA;
1805             fjptrB             = f+j_coord_offsetB;
1806             fjptrC             = f+j_coord_offsetC;
1807             fjptrD             = f+j_coord_offsetD;
1808
1809             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1810                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1811
1812             /* Inner loop uses 297 flops */
1813         }
1814
1815         if(jidx<j_index_end)
1816         {
1817
1818             /* Get j neighbor index, and coordinate index */
1819             jnrlistA         = jjnr[jidx];
1820             jnrlistB         = jjnr[jidx+1];
1821             jnrlistC         = jjnr[jidx+2];
1822             jnrlistD         = jjnr[jidx+3];
1823             /* Sign of each element will be negative for non-real atoms.
1824              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1825              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
1826              */
1827             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
1828
1829             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
1830             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
1831             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
1832
1833             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1834             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1835             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1836             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1837             j_coord_offsetA  = DIM*jnrA;
1838             j_coord_offsetB  = DIM*jnrB;
1839             j_coord_offsetC  = DIM*jnrC;
1840             j_coord_offsetD  = DIM*jnrD;
1841
1842             /* load j atom coordinates */
1843             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1844                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1845                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1846
1847             /* Calculate displacement vector */
1848             dx00             = _mm256_sub_pd(ix0,jx0);
1849             dy00             = _mm256_sub_pd(iy0,jy0);
1850             dz00             = _mm256_sub_pd(iz0,jz0);
1851             dx01             = _mm256_sub_pd(ix0,jx1);
1852             dy01             = _mm256_sub_pd(iy0,jy1);
1853             dz01             = _mm256_sub_pd(iz0,jz1);
1854             dx02             = _mm256_sub_pd(ix0,jx2);
1855             dy02             = _mm256_sub_pd(iy0,jy2);
1856             dz02             = _mm256_sub_pd(iz0,jz2);
1857             dx10             = _mm256_sub_pd(ix1,jx0);
1858             dy10             = _mm256_sub_pd(iy1,jy0);
1859             dz10             = _mm256_sub_pd(iz1,jz0);
1860             dx11             = _mm256_sub_pd(ix1,jx1);
1861             dy11             = _mm256_sub_pd(iy1,jy1);
1862             dz11             = _mm256_sub_pd(iz1,jz1);
1863             dx12             = _mm256_sub_pd(ix1,jx2);
1864             dy12             = _mm256_sub_pd(iy1,jy2);
1865             dz12             = _mm256_sub_pd(iz1,jz2);
1866             dx20             = _mm256_sub_pd(ix2,jx0);
1867             dy20             = _mm256_sub_pd(iy2,jy0);
1868             dz20             = _mm256_sub_pd(iz2,jz0);
1869             dx21             = _mm256_sub_pd(ix2,jx1);
1870             dy21             = _mm256_sub_pd(iy2,jy1);
1871             dz21             = _mm256_sub_pd(iz2,jz1);
1872             dx22             = _mm256_sub_pd(ix2,jx2);
1873             dy22             = _mm256_sub_pd(iy2,jy2);
1874             dz22             = _mm256_sub_pd(iz2,jz2);
1875
1876             /* Calculate squared distance and things based on it */
1877             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1878             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1879             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1880             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1881             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1882             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1883             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1884             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1885             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1886
1887             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1888             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
1889             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
1890             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1891             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
1892             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
1893             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1894             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
1895             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
1896
1897             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1898             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
1899             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
1900             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1901             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1902             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1903             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1904             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1905             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1906
1907             fjx0             = _mm256_setzero_pd();
1908             fjy0             = _mm256_setzero_pd();
1909             fjz0             = _mm256_setzero_pd();
1910             fjx1             = _mm256_setzero_pd();
1911             fjy1             = _mm256_setzero_pd();
1912             fjz1             = _mm256_setzero_pd();
1913             fjx2             = _mm256_setzero_pd();
1914             fjy2             = _mm256_setzero_pd();
1915             fjz2             = _mm256_setzero_pd();
1916
1917             /**************************
1918              * CALCULATE INTERACTIONS *
1919              **************************/
1920
1921             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1922             {
1923
1924             r00              = _mm256_mul_pd(rsq00,rinv00);
1925             r00              = _mm256_andnot_pd(dummy_mask,r00);
1926
1927             /* Calculate table index by multiplying r with table scale and truncate to integer */
1928             rt               = _mm256_mul_pd(r00,vftabscale);
1929             vfitab           = _mm256_cvttpd_epi32(rt);
1930             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1931             vfitab           = _mm_slli_epi32(vfitab,3);
1932
1933             /* REACTION-FIELD ELECTROSTATICS */
1934             felec            = _mm256_mul_pd(qq00,_mm256_sub_pd(_mm256_mul_pd(rinv00,rinvsq00),krf2));
1935
1936             /* CUBIC SPLINE TABLE DISPERSION */
1937             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1938             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1939             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1940             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1941             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1942             Heps             = _mm256_mul_pd(vfeps,H);
1943             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1944             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1945             fvdw6            = _mm256_mul_pd(c6_00,FF);
1946
1947             /* CUBIC SPLINE TABLE REPULSION */
1948             vfitab           = _mm_add_epi32(vfitab,ifour);
1949             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1950             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1951             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1952             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1953             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1954             Heps             = _mm256_mul_pd(vfeps,H);
1955             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1956             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1957             fvdw12           = _mm256_mul_pd(c12_00,FF);
1958             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1959
1960             cutoff_mask      = _mm256_cmp_pd(rsq00,rcutoff2,_CMP_LT_OQ);
1961
1962             fscal            = _mm256_add_pd(felec,fvdw);
1963
1964             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1965
1966             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1967
1968             /* Calculate temporary vectorial force */
1969             tx               = _mm256_mul_pd(fscal,dx00);
1970             ty               = _mm256_mul_pd(fscal,dy00);
1971             tz               = _mm256_mul_pd(fscal,dz00);
1972
1973             /* Update vectorial force */
1974             fix0             = _mm256_add_pd(fix0,tx);
1975             fiy0             = _mm256_add_pd(fiy0,ty);
1976             fiz0             = _mm256_add_pd(fiz0,tz);
1977
1978             fjx0             = _mm256_add_pd(fjx0,tx);
1979             fjy0             = _mm256_add_pd(fjy0,ty);
1980             fjz0             = _mm256_add_pd(fjz0,tz);
1981
1982             }
1983
1984             /**************************
1985              * CALCULATE INTERACTIONS *
1986              **************************/
1987
1988             if (gmx_mm256_any_lt(rsq01,rcutoff2))
1989             {
1990
1991             /* REACTION-FIELD ELECTROSTATICS */
1992             felec            = _mm256_mul_pd(qq01,_mm256_sub_pd(_mm256_mul_pd(rinv01,rinvsq01),krf2));
1993
1994             cutoff_mask      = _mm256_cmp_pd(rsq01,rcutoff2,_CMP_LT_OQ);
1995
1996             fscal            = felec;
1997
1998             fscal            = _mm256_and_pd(fscal,cutoff_mask);
1999
2000             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2001
2002             /* Calculate temporary vectorial force */
2003             tx               = _mm256_mul_pd(fscal,dx01);
2004             ty               = _mm256_mul_pd(fscal,dy01);
2005             tz               = _mm256_mul_pd(fscal,dz01);
2006
2007             /* Update vectorial force */
2008             fix0             = _mm256_add_pd(fix0,tx);
2009             fiy0             = _mm256_add_pd(fiy0,ty);
2010             fiz0             = _mm256_add_pd(fiz0,tz);
2011
2012             fjx1             = _mm256_add_pd(fjx1,tx);
2013             fjy1             = _mm256_add_pd(fjy1,ty);
2014             fjz1             = _mm256_add_pd(fjz1,tz);
2015
2016             }
2017
2018             /**************************
2019              * CALCULATE INTERACTIONS *
2020              **************************/
2021
2022             if (gmx_mm256_any_lt(rsq02,rcutoff2))
2023             {
2024
2025             /* REACTION-FIELD ELECTROSTATICS */
2026             felec            = _mm256_mul_pd(qq02,_mm256_sub_pd(_mm256_mul_pd(rinv02,rinvsq02),krf2));
2027
2028             cutoff_mask      = _mm256_cmp_pd(rsq02,rcutoff2,_CMP_LT_OQ);
2029
2030             fscal            = felec;
2031
2032             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2033
2034             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2035
2036             /* Calculate temporary vectorial force */
2037             tx               = _mm256_mul_pd(fscal,dx02);
2038             ty               = _mm256_mul_pd(fscal,dy02);
2039             tz               = _mm256_mul_pd(fscal,dz02);
2040
2041             /* Update vectorial force */
2042             fix0             = _mm256_add_pd(fix0,tx);
2043             fiy0             = _mm256_add_pd(fiy0,ty);
2044             fiz0             = _mm256_add_pd(fiz0,tz);
2045
2046             fjx2             = _mm256_add_pd(fjx2,tx);
2047             fjy2             = _mm256_add_pd(fjy2,ty);
2048             fjz2             = _mm256_add_pd(fjz2,tz);
2049
2050             }
2051
2052             /**************************
2053              * CALCULATE INTERACTIONS *
2054              **************************/
2055
2056             if (gmx_mm256_any_lt(rsq10,rcutoff2))
2057             {
2058
2059             /* REACTION-FIELD ELECTROSTATICS */
2060             felec            = _mm256_mul_pd(qq10,_mm256_sub_pd(_mm256_mul_pd(rinv10,rinvsq10),krf2));
2061
2062             cutoff_mask      = _mm256_cmp_pd(rsq10,rcutoff2,_CMP_LT_OQ);
2063
2064             fscal            = felec;
2065
2066             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2067
2068             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2069
2070             /* Calculate temporary vectorial force */
2071             tx               = _mm256_mul_pd(fscal,dx10);
2072             ty               = _mm256_mul_pd(fscal,dy10);
2073             tz               = _mm256_mul_pd(fscal,dz10);
2074
2075             /* Update vectorial force */
2076             fix1             = _mm256_add_pd(fix1,tx);
2077             fiy1             = _mm256_add_pd(fiy1,ty);
2078             fiz1             = _mm256_add_pd(fiz1,tz);
2079
2080             fjx0             = _mm256_add_pd(fjx0,tx);
2081             fjy0             = _mm256_add_pd(fjy0,ty);
2082             fjz0             = _mm256_add_pd(fjz0,tz);
2083
2084             }
2085
2086             /**************************
2087              * CALCULATE INTERACTIONS *
2088              **************************/
2089
2090             if (gmx_mm256_any_lt(rsq11,rcutoff2))
2091             {
2092
2093             /* REACTION-FIELD ELECTROSTATICS */
2094             felec            = _mm256_mul_pd(qq11,_mm256_sub_pd(_mm256_mul_pd(rinv11,rinvsq11),krf2));
2095
2096             cutoff_mask      = _mm256_cmp_pd(rsq11,rcutoff2,_CMP_LT_OQ);
2097
2098             fscal            = felec;
2099
2100             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2101
2102             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2103
2104             /* Calculate temporary vectorial force */
2105             tx               = _mm256_mul_pd(fscal,dx11);
2106             ty               = _mm256_mul_pd(fscal,dy11);
2107             tz               = _mm256_mul_pd(fscal,dz11);
2108
2109             /* Update vectorial force */
2110             fix1             = _mm256_add_pd(fix1,tx);
2111             fiy1             = _mm256_add_pd(fiy1,ty);
2112             fiz1             = _mm256_add_pd(fiz1,tz);
2113
2114             fjx1             = _mm256_add_pd(fjx1,tx);
2115             fjy1             = _mm256_add_pd(fjy1,ty);
2116             fjz1             = _mm256_add_pd(fjz1,tz);
2117
2118             }
2119
2120             /**************************
2121              * CALCULATE INTERACTIONS *
2122              **************************/
2123
2124             if (gmx_mm256_any_lt(rsq12,rcutoff2))
2125             {
2126
2127             /* REACTION-FIELD ELECTROSTATICS */
2128             felec            = _mm256_mul_pd(qq12,_mm256_sub_pd(_mm256_mul_pd(rinv12,rinvsq12),krf2));
2129
2130             cutoff_mask      = _mm256_cmp_pd(rsq12,rcutoff2,_CMP_LT_OQ);
2131
2132             fscal            = felec;
2133
2134             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2135
2136             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2137
2138             /* Calculate temporary vectorial force */
2139             tx               = _mm256_mul_pd(fscal,dx12);
2140             ty               = _mm256_mul_pd(fscal,dy12);
2141             tz               = _mm256_mul_pd(fscal,dz12);
2142
2143             /* Update vectorial force */
2144             fix1             = _mm256_add_pd(fix1,tx);
2145             fiy1             = _mm256_add_pd(fiy1,ty);
2146             fiz1             = _mm256_add_pd(fiz1,tz);
2147
2148             fjx2             = _mm256_add_pd(fjx2,tx);
2149             fjy2             = _mm256_add_pd(fjy2,ty);
2150             fjz2             = _mm256_add_pd(fjz2,tz);
2151
2152             }
2153
2154             /**************************
2155              * CALCULATE INTERACTIONS *
2156              **************************/
2157
2158             if (gmx_mm256_any_lt(rsq20,rcutoff2))
2159             {
2160
2161             /* REACTION-FIELD ELECTROSTATICS */
2162             felec            = _mm256_mul_pd(qq20,_mm256_sub_pd(_mm256_mul_pd(rinv20,rinvsq20),krf2));
2163
2164             cutoff_mask      = _mm256_cmp_pd(rsq20,rcutoff2,_CMP_LT_OQ);
2165
2166             fscal            = felec;
2167
2168             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2169
2170             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2171
2172             /* Calculate temporary vectorial force */
2173             tx               = _mm256_mul_pd(fscal,dx20);
2174             ty               = _mm256_mul_pd(fscal,dy20);
2175             tz               = _mm256_mul_pd(fscal,dz20);
2176
2177             /* Update vectorial force */
2178             fix2             = _mm256_add_pd(fix2,tx);
2179             fiy2             = _mm256_add_pd(fiy2,ty);
2180             fiz2             = _mm256_add_pd(fiz2,tz);
2181
2182             fjx0             = _mm256_add_pd(fjx0,tx);
2183             fjy0             = _mm256_add_pd(fjy0,ty);
2184             fjz0             = _mm256_add_pd(fjz0,tz);
2185
2186             }
2187
2188             /**************************
2189              * CALCULATE INTERACTIONS *
2190              **************************/
2191
2192             if (gmx_mm256_any_lt(rsq21,rcutoff2))
2193             {
2194
2195             /* REACTION-FIELD ELECTROSTATICS */
2196             felec            = _mm256_mul_pd(qq21,_mm256_sub_pd(_mm256_mul_pd(rinv21,rinvsq21),krf2));
2197
2198             cutoff_mask      = _mm256_cmp_pd(rsq21,rcutoff2,_CMP_LT_OQ);
2199
2200             fscal            = felec;
2201
2202             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2203
2204             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2205
2206             /* Calculate temporary vectorial force */
2207             tx               = _mm256_mul_pd(fscal,dx21);
2208             ty               = _mm256_mul_pd(fscal,dy21);
2209             tz               = _mm256_mul_pd(fscal,dz21);
2210
2211             /* Update vectorial force */
2212             fix2             = _mm256_add_pd(fix2,tx);
2213             fiy2             = _mm256_add_pd(fiy2,ty);
2214             fiz2             = _mm256_add_pd(fiz2,tz);
2215
2216             fjx1             = _mm256_add_pd(fjx1,tx);
2217             fjy1             = _mm256_add_pd(fjy1,ty);
2218             fjz1             = _mm256_add_pd(fjz1,tz);
2219
2220             }
2221
2222             /**************************
2223              * CALCULATE INTERACTIONS *
2224              **************************/
2225
2226             if (gmx_mm256_any_lt(rsq22,rcutoff2))
2227             {
2228
2229             /* REACTION-FIELD ELECTROSTATICS */
2230             felec            = _mm256_mul_pd(qq22,_mm256_sub_pd(_mm256_mul_pd(rinv22,rinvsq22),krf2));
2231
2232             cutoff_mask      = _mm256_cmp_pd(rsq22,rcutoff2,_CMP_LT_OQ);
2233
2234             fscal            = felec;
2235
2236             fscal            = _mm256_and_pd(fscal,cutoff_mask);
2237
2238             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
2239
2240             /* Calculate temporary vectorial force */
2241             tx               = _mm256_mul_pd(fscal,dx22);
2242             ty               = _mm256_mul_pd(fscal,dy22);
2243             tz               = _mm256_mul_pd(fscal,dz22);
2244
2245             /* Update vectorial force */
2246             fix2             = _mm256_add_pd(fix2,tx);
2247             fiy2             = _mm256_add_pd(fiy2,ty);
2248             fiz2             = _mm256_add_pd(fiz2,tz);
2249
2250             fjx2             = _mm256_add_pd(fjx2,tx);
2251             fjy2             = _mm256_add_pd(fjy2,ty);
2252             fjz2             = _mm256_add_pd(fjz2,tz);
2253
2254             }
2255
2256             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2257             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2258             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2259             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2260
2261             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
2262                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2263
2264             /* Inner loop uses 298 flops */
2265         }
2266
2267         /* End of innermost loop */
2268
2269         gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
2270                                                  f+i_coord_offset,fshift+i_shift_offset);
2271
2272         /* Increment number of inner iterations */
2273         inneriter                  += j_index_end - j_index_start;
2274
2275         /* Outer loop uses 18 flops */
2276     }
2277
2278     /* Increment number of outer iterations */
2279     outeriter        += nri;
2280
2281     /* Update outer/inner flops */
2282
2283     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*298);
2284 }