Remove no-inline-max-size and suppress remark
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_avx_256_double.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_double kernel generator.
37  */
38 #ifdef HAVE_CONFIG_H
39 #include <config.h>
40 #endif
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "types/simple.h"
46 #include "vec.h"
47 #include "nrnb.h"
48
49 #include "gromacs/simd/math_x86_avx_256_double.h"
50 #include "kernelutil_x86_avx_256_double.h"
51
52 /*
53  * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_VF_avx_256_double
54  * Electrostatics interaction: Coulomb
55  * VdW interaction:            CubicSplineTable
56  * Geometry:                   Water3-Water3
57  * Calculate force/pot:        PotentialAndForce
58  */
59 void
60 nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_VF_avx_256_double
61                     (t_nblist                    * gmx_restrict       nlist,
62                      rvec                        * gmx_restrict          xx,
63                      rvec                        * gmx_restrict          ff,
64                      t_forcerec                  * gmx_restrict          fr,
65                      t_mdatoms                   * gmx_restrict     mdatoms,
66                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67                      t_nrnb                      * gmx_restrict        nrnb)
68 {
69     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
70      * just 0 for non-waters.
71      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
72      * jnr indices corresponding to data put in the four positions in the SIMD register.
73      */
74     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
75     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76     int              jnrA,jnrB,jnrC,jnrD;
77     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
78     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
79     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
80     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
81     real             rcutoff_scalar;
82     real             *shiftvec,*fshift,*x,*f;
83     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
84     real             scratch[4*DIM];
85     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86     real *           vdwioffsetptr0;
87     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
88     real *           vdwioffsetptr1;
89     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
90     real *           vdwioffsetptr2;
91     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
92     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
93     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
94     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
95     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
96     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
97     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
98     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
99     __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
100     __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
101     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
102     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
103     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
104     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
105     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
106     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
107     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
108     real             *charge;
109     int              nvdwtype;
110     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
111     int              *vdwtype;
112     real             *vdwparam;
113     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
114     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
115     __m128i          vfitab;
116     __m128i          ifour       = _mm_set1_epi32(4);
117     __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
118     real             *vftab;
119     __m256d          dummy_mask,cutoff_mask;
120     __m128           tmpmask0,tmpmask1;
121     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
122     __m256d          one     = _mm256_set1_pd(1.0);
123     __m256d          two     = _mm256_set1_pd(2.0);
124     x                = xx[0];
125     f                = ff[0];
126
127     nri              = nlist->nri;
128     iinr             = nlist->iinr;
129     jindex           = nlist->jindex;
130     jjnr             = nlist->jjnr;
131     shiftidx         = nlist->shift;
132     gid              = nlist->gid;
133     shiftvec         = fr->shift_vec[0];
134     fshift           = fr->fshift[0];
135     facel            = _mm256_set1_pd(fr->epsfac);
136     charge           = mdatoms->chargeA;
137     nvdwtype         = fr->ntype;
138     vdwparam         = fr->nbfp;
139     vdwtype          = mdatoms->typeA;
140
141     vftab            = kernel_data->table_vdw->data;
142     vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
143
144     /* Setup water-specific parameters */
145     inr              = nlist->iinr[0];
146     iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
147     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
148     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
149     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
150
151     jq0              = _mm256_set1_pd(charge[inr+0]);
152     jq1              = _mm256_set1_pd(charge[inr+1]);
153     jq2              = _mm256_set1_pd(charge[inr+2]);
154     vdwjidx0A        = 2*vdwtype[inr+0];
155     qq00             = _mm256_mul_pd(iq0,jq0);
156     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
157     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
158     qq01             = _mm256_mul_pd(iq0,jq1);
159     qq02             = _mm256_mul_pd(iq0,jq2);
160     qq10             = _mm256_mul_pd(iq1,jq0);
161     qq11             = _mm256_mul_pd(iq1,jq1);
162     qq12             = _mm256_mul_pd(iq1,jq2);
163     qq20             = _mm256_mul_pd(iq2,jq0);
164     qq21             = _mm256_mul_pd(iq2,jq1);
165     qq22             = _mm256_mul_pd(iq2,jq2);
166
167     /* Avoid stupid compiler warnings */
168     jnrA = jnrB = jnrC = jnrD = 0;
169     j_coord_offsetA = 0;
170     j_coord_offsetB = 0;
171     j_coord_offsetC = 0;
172     j_coord_offsetD = 0;
173
174     outeriter        = 0;
175     inneriter        = 0;
176
177     for(iidx=0;iidx<4*DIM;iidx++)
178     {
179         scratch[iidx] = 0.0;
180     }
181
182     /* Start outer loop over neighborlists */
183     for(iidx=0; iidx<nri; iidx++)
184     {
185         /* Load shift vector for this list */
186         i_shift_offset   = DIM*shiftidx[iidx];
187
188         /* Load limits for loop over neighbors */
189         j_index_start    = jindex[iidx];
190         j_index_end      = jindex[iidx+1];
191
192         /* Get outer coordinate index */
193         inr              = iinr[iidx];
194         i_coord_offset   = DIM*inr;
195
196         /* Load i particle coords and add shift vector */
197         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
198                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
199
200         fix0             = _mm256_setzero_pd();
201         fiy0             = _mm256_setzero_pd();
202         fiz0             = _mm256_setzero_pd();
203         fix1             = _mm256_setzero_pd();
204         fiy1             = _mm256_setzero_pd();
205         fiz1             = _mm256_setzero_pd();
206         fix2             = _mm256_setzero_pd();
207         fiy2             = _mm256_setzero_pd();
208         fiz2             = _mm256_setzero_pd();
209
210         /* Reset potential sums */
211         velecsum         = _mm256_setzero_pd();
212         vvdwsum          = _mm256_setzero_pd();
213
214         /* Start inner kernel loop */
215         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
216         {
217
218             /* Get j neighbor index, and coordinate index */
219             jnrA             = jjnr[jidx];
220             jnrB             = jjnr[jidx+1];
221             jnrC             = jjnr[jidx+2];
222             jnrD             = jjnr[jidx+3];
223             j_coord_offsetA  = DIM*jnrA;
224             j_coord_offsetB  = DIM*jnrB;
225             j_coord_offsetC  = DIM*jnrC;
226             j_coord_offsetD  = DIM*jnrD;
227
228             /* load j atom coordinates */
229             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
230                                                  x+j_coord_offsetC,x+j_coord_offsetD,
231                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
232
233             /* Calculate displacement vector */
234             dx00             = _mm256_sub_pd(ix0,jx0);
235             dy00             = _mm256_sub_pd(iy0,jy0);
236             dz00             = _mm256_sub_pd(iz0,jz0);
237             dx01             = _mm256_sub_pd(ix0,jx1);
238             dy01             = _mm256_sub_pd(iy0,jy1);
239             dz01             = _mm256_sub_pd(iz0,jz1);
240             dx02             = _mm256_sub_pd(ix0,jx2);
241             dy02             = _mm256_sub_pd(iy0,jy2);
242             dz02             = _mm256_sub_pd(iz0,jz2);
243             dx10             = _mm256_sub_pd(ix1,jx0);
244             dy10             = _mm256_sub_pd(iy1,jy0);
245             dz10             = _mm256_sub_pd(iz1,jz0);
246             dx11             = _mm256_sub_pd(ix1,jx1);
247             dy11             = _mm256_sub_pd(iy1,jy1);
248             dz11             = _mm256_sub_pd(iz1,jz1);
249             dx12             = _mm256_sub_pd(ix1,jx2);
250             dy12             = _mm256_sub_pd(iy1,jy2);
251             dz12             = _mm256_sub_pd(iz1,jz2);
252             dx20             = _mm256_sub_pd(ix2,jx0);
253             dy20             = _mm256_sub_pd(iy2,jy0);
254             dz20             = _mm256_sub_pd(iz2,jz0);
255             dx21             = _mm256_sub_pd(ix2,jx1);
256             dy21             = _mm256_sub_pd(iy2,jy1);
257             dz21             = _mm256_sub_pd(iz2,jz1);
258             dx22             = _mm256_sub_pd(ix2,jx2);
259             dy22             = _mm256_sub_pd(iy2,jy2);
260             dz22             = _mm256_sub_pd(iz2,jz2);
261
262             /* Calculate squared distance and things based on it */
263             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
264             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
265             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
266             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
267             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
268             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
269             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
270             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
271             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
272
273             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
274             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
275             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
276             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
277             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
278             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
279             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
280             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
281             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
282
283             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
284             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
285             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
286             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
287             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
288             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
289             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
290             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
291             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
292
293             fjx0             = _mm256_setzero_pd();
294             fjy0             = _mm256_setzero_pd();
295             fjz0             = _mm256_setzero_pd();
296             fjx1             = _mm256_setzero_pd();
297             fjy1             = _mm256_setzero_pd();
298             fjz1             = _mm256_setzero_pd();
299             fjx2             = _mm256_setzero_pd();
300             fjy2             = _mm256_setzero_pd();
301             fjz2             = _mm256_setzero_pd();
302
303             /**************************
304              * CALCULATE INTERACTIONS *
305              **************************/
306
307             r00              = _mm256_mul_pd(rsq00,rinv00);
308
309             /* Calculate table index by multiplying r with table scale and truncate to integer */
310             rt               = _mm256_mul_pd(r00,vftabscale);
311             vfitab           = _mm256_cvttpd_epi32(rt);
312             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
313             vfitab           = _mm_slli_epi32(vfitab,3);
314
315             /* COULOMB ELECTROSTATICS */
316             velec            = _mm256_mul_pd(qq00,rinv00);
317             felec            = _mm256_mul_pd(velec,rinvsq00);
318
319             /* CUBIC SPLINE TABLE DISPERSION */
320             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
321             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
322             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
323             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
324             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
325             Heps             = _mm256_mul_pd(vfeps,H);
326             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
327             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
328             vvdw6            = _mm256_mul_pd(c6_00,VV);
329             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
330             fvdw6            = _mm256_mul_pd(c6_00,FF);
331
332             /* CUBIC SPLINE TABLE REPULSION */
333             vfitab           = _mm_add_epi32(vfitab,ifour);
334             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
335             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
336             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
337             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
338             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
339             Heps             = _mm256_mul_pd(vfeps,H);
340             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
341             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
342             vvdw12           = _mm256_mul_pd(c12_00,VV);
343             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
344             fvdw12           = _mm256_mul_pd(c12_00,FF);
345             vvdw             = _mm256_add_pd(vvdw12,vvdw6);
346             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
347
348             /* Update potential sum for this i atom from the interaction with this j atom. */
349             velecsum         = _mm256_add_pd(velecsum,velec);
350             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
351
352             fscal            = _mm256_add_pd(felec,fvdw);
353
354             /* Calculate temporary vectorial force */
355             tx               = _mm256_mul_pd(fscal,dx00);
356             ty               = _mm256_mul_pd(fscal,dy00);
357             tz               = _mm256_mul_pd(fscal,dz00);
358
359             /* Update vectorial force */
360             fix0             = _mm256_add_pd(fix0,tx);
361             fiy0             = _mm256_add_pd(fiy0,ty);
362             fiz0             = _mm256_add_pd(fiz0,tz);
363
364             fjx0             = _mm256_add_pd(fjx0,tx);
365             fjy0             = _mm256_add_pd(fjy0,ty);
366             fjz0             = _mm256_add_pd(fjz0,tz);
367
368             /**************************
369              * CALCULATE INTERACTIONS *
370              **************************/
371
372             /* COULOMB ELECTROSTATICS */
373             velec            = _mm256_mul_pd(qq01,rinv01);
374             felec            = _mm256_mul_pd(velec,rinvsq01);
375
376             /* Update potential sum for this i atom from the interaction with this j atom. */
377             velecsum         = _mm256_add_pd(velecsum,velec);
378
379             fscal            = felec;
380
381             /* Calculate temporary vectorial force */
382             tx               = _mm256_mul_pd(fscal,dx01);
383             ty               = _mm256_mul_pd(fscal,dy01);
384             tz               = _mm256_mul_pd(fscal,dz01);
385
386             /* Update vectorial force */
387             fix0             = _mm256_add_pd(fix0,tx);
388             fiy0             = _mm256_add_pd(fiy0,ty);
389             fiz0             = _mm256_add_pd(fiz0,tz);
390
391             fjx1             = _mm256_add_pd(fjx1,tx);
392             fjy1             = _mm256_add_pd(fjy1,ty);
393             fjz1             = _mm256_add_pd(fjz1,tz);
394
395             /**************************
396              * CALCULATE INTERACTIONS *
397              **************************/
398
399             /* COULOMB ELECTROSTATICS */
400             velec            = _mm256_mul_pd(qq02,rinv02);
401             felec            = _mm256_mul_pd(velec,rinvsq02);
402
403             /* Update potential sum for this i atom from the interaction with this j atom. */
404             velecsum         = _mm256_add_pd(velecsum,velec);
405
406             fscal            = felec;
407
408             /* Calculate temporary vectorial force */
409             tx               = _mm256_mul_pd(fscal,dx02);
410             ty               = _mm256_mul_pd(fscal,dy02);
411             tz               = _mm256_mul_pd(fscal,dz02);
412
413             /* Update vectorial force */
414             fix0             = _mm256_add_pd(fix0,tx);
415             fiy0             = _mm256_add_pd(fiy0,ty);
416             fiz0             = _mm256_add_pd(fiz0,tz);
417
418             fjx2             = _mm256_add_pd(fjx2,tx);
419             fjy2             = _mm256_add_pd(fjy2,ty);
420             fjz2             = _mm256_add_pd(fjz2,tz);
421
422             /**************************
423              * CALCULATE INTERACTIONS *
424              **************************/
425
426             /* COULOMB ELECTROSTATICS */
427             velec            = _mm256_mul_pd(qq10,rinv10);
428             felec            = _mm256_mul_pd(velec,rinvsq10);
429
430             /* Update potential sum for this i atom from the interaction with this j atom. */
431             velecsum         = _mm256_add_pd(velecsum,velec);
432
433             fscal            = felec;
434
435             /* Calculate temporary vectorial force */
436             tx               = _mm256_mul_pd(fscal,dx10);
437             ty               = _mm256_mul_pd(fscal,dy10);
438             tz               = _mm256_mul_pd(fscal,dz10);
439
440             /* Update vectorial force */
441             fix1             = _mm256_add_pd(fix1,tx);
442             fiy1             = _mm256_add_pd(fiy1,ty);
443             fiz1             = _mm256_add_pd(fiz1,tz);
444
445             fjx0             = _mm256_add_pd(fjx0,tx);
446             fjy0             = _mm256_add_pd(fjy0,ty);
447             fjz0             = _mm256_add_pd(fjz0,tz);
448
449             /**************************
450              * CALCULATE INTERACTIONS *
451              **************************/
452
453             /* COULOMB ELECTROSTATICS */
454             velec            = _mm256_mul_pd(qq11,rinv11);
455             felec            = _mm256_mul_pd(velec,rinvsq11);
456
457             /* Update potential sum for this i atom from the interaction with this j atom. */
458             velecsum         = _mm256_add_pd(velecsum,velec);
459
460             fscal            = felec;
461
462             /* Calculate temporary vectorial force */
463             tx               = _mm256_mul_pd(fscal,dx11);
464             ty               = _mm256_mul_pd(fscal,dy11);
465             tz               = _mm256_mul_pd(fscal,dz11);
466
467             /* Update vectorial force */
468             fix1             = _mm256_add_pd(fix1,tx);
469             fiy1             = _mm256_add_pd(fiy1,ty);
470             fiz1             = _mm256_add_pd(fiz1,tz);
471
472             fjx1             = _mm256_add_pd(fjx1,tx);
473             fjy1             = _mm256_add_pd(fjy1,ty);
474             fjz1             = _mm256_add_pd(fjz1,tz);
475
476             /**************************
477              * CALCULATE INTERACTIONS *
478              **************************/
479
480             /* COULOMB ELECTROSTATICS */
481             velec            = _mm256_mul_pd(qq12,rinv12);
482             felec            = _mm256_mul_pd(velec,rinvsq12);
483
484             /* Update potential sum for this i atom from the interaction with this j atom. */
485             velecsum         = _mm256_add_pd(velecsum,velec);
486
487             fscal            = felec;
488
489             /* Calculate temporary vectorial force */
490             tx               = _mm256_mul_pd(fscal,dx12);
491             ty               = _mm256_mul_pd(fscal,dy12);
492             tz               = _mm256_mul_pd(fscal,dz12);
493
494             /* Update vectorial force */
495             fix1             = _mm256_add_pd(fix1,tx);
496             fiy1             = _mm256_add_pd(fiy1,ty);
497             fiz1             = _mm256_add_pd(fiz1,tz);
498
499             fjx2             = _mm256_add_pd(fjx2,tx);
500             fjy2             = _mm256_add_pd(fjy2,ty);
501             fjz2             = _mm256_add_pd(fjz2,tz);
502
503             /**************************
504              * CALCULATE INTERACTIONS *
505              **************************/
506
507             /* COULOMB ELECTROSTATICS */
508             velec            = _mm256_mul_pd(qq20,rinv20);
509             felec            = _mm256_mul_pd(velec,rinvsq20);
510
511             /* Update potential sum for this i atom from the interaction with this j atom. */
512             velecsum         = _mm256_add_pd(velecsum,velec);
513
514             fscal            = felec;
515
516             /* Calculate temporary vectorial force */
517             tx               = _mm256_mul_pd(fscal,dx20);
518             ty               = _mm256_mul_pd(fscal,dy20);
519             tz               = _mm256_mul_pd(fscal,dz20);
520
521             /* Update vectorial force */
522             fix2             = _mm256_add_pd(fix2,tx);
523             fiy2             = _mm256_add_pd(fiy2,ty);
524             fiz2             = _mm256_add_pd(fiz2,tz);
525
526             fjx0             = _mm256_add_pd(fjx0,tx);
527             fjy0             = _mm256_add_pd(fjy0,ty);
528             fjz0             = _mm256_add_pd(fjz0,tz);
529
530             /**************************
531              * CALCULATE INTERACTIONS *
532              **************************/
533
534             /* COULOMB ELECTROSTATICS */
535             velec            = _mm256_mul_pd(qq21,rinv21);
536             felec            = _mm256_mul_pd(velec,rinvsq21);
537
538             /* Update potential sum for this i atom from the interaction with this j atom. */
539             velecsum         = _mm256_add_pd(velecsum,velec);
540
541             fscal            = felec;
542
543             /* Calculate temporary vectorial force */
544             tx               = _mm256_mul_pd(fscal,dx21);
545             ty               = _mm256_mul_pd(fscal,dy21);
546             tz               = _mm256_mul_pd(fscal,dz21);
547
548             /* Update vectorial force */
549             fix2             = _mm256_add_pd(fix2,tx);
550             fiy2             = _mm256_add_pd(fiy2,ty);
551             fiz2             = _mm256_add_pd(fiz2,tz);
552
553             fjx1             = _mm256_add_pd(fjx1,tx);
554             fjy1             = _mm256_add_pd(fjy1,ty);
555             fjz1             = _mm256_add_pd(fjz1,tz);
556
557             /**************************
558              * CALCULATE INTERACTIONS *
559              **************************/
560
561             /* COULOMB ELECTROSTATICS */
562             velec            = _mm256_mul_pd(qq22,rinv22);
563             felec            = _mm256_mul_pd(velec,rinvsq22);
564
565             /* Update potential sum for this i atom from the interaction with this j atom. */
566             velecsum         = _mm256_add_pd(velecsum,velec);
567
568             fscal            = felec;
569
570             /* Calculate temporary vectorial force */
571             tx               = _mm256_mul_pd(fscal,dx22);
572             ty               = _mm256_mul_pd(fscal,dy22);
573             tz               = _mm256_mul_pd(fscal,dz22);
574
575             /* Update vectorial force */
576             fix2             = _mm256_add_pd(fix2,tx);
577             fiy2             = _mm256_add_pd(fiy2,ty);
578             fiz2             = _mm256_add_pd(fiz2,tz);
579
580             fjx2             = _mm256_add_pd(fjx2,tx);
581             fjy2             = _mm256_add_pd(fjy2,ty);
582             fjz2             = _mm256_add_pd(fjz2,tz);
583
584             fjptrA             = f+j_coord_offsetA;
585             fjptrB             = f+j_coord_offsetB;
586             fjptrC             = f+j_coord_offsetC;
587             fjptrD             = f+j_coord_offsetD;
588
589             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
590                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
591
592             /* Inner loop uses 278 flops */
593         }
594
595         if(jidx<j_index_end)
596         {
597
598             /* Get j neighbor index, and coordinate index */
599             jnrlistA         = jjnr[jidx];
600             jnrlistB         = jjnr[jidx+1];
601             jnrlistC         = jjnr[jidx+2];
602             jnrlistD         = jjnr[jidx+3];
603             /* Sign of each element will be negative for non-real atoms.
604              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
605              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
606              */
607             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
608
609             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
610             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
611             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
612
613             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
614             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
615             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
616             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
617             j_coord_offsetA  = DIM*jnrA;
618             j_coord_offsetB  = DIM*jnrB;
619             j_coord_offsetC  = DIM*jnrC;
620             j_coord_offsetD  = DIM*jnrD;
621
622             /* load j atom coordinates */
623             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
624                                                  x+j_coord_offsetC,x+j_coord_offsetD,
625                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
626
627             /* Calculate displacement vector */
628             dx00             = _mm256_sub_pd(ix0,jx0);
629             dy00             = _mm256_sub_pd(iy0,jy0);
630             dz00             = _mm256_sub_pd(iz0,jz0);
631             dx01             = _mm256_sub_pd(ix0,jx1);
632             dy01             = _mm256_sub_pd(iy0,jy1);
633             dz01             = _mm256_sub_pd(iz0,jz1);
634             dx02             = _mm256_sub_pd(ix0,jx2);
635             dy02             = _mm256_sub_pd(iy0,jy2);
636             dz02             = _mm256_sub_pd(iz0,jz2);
637             dx10             = _mm256_sub_pd(ix1,jx0);
638             dy10             = _mm256_sub_pd(iy1,jy0);
639             dz10             = _mm256_sub_pd(iz1,jz0);
640             dx11             = _mm256_sub_pd(ix1,jx1);
641             dy11             = _mm256_sub_pd(iy1,jy1);
642             dz11             = _mm256_sub_pd(iz1,jz1);
643             dx12             = _mm256_sub_pd(ix1,jx2);
644             dy12             = _mm256_sub_pd(iy1,jy2);
645             dz12             = _mm256_sub_pd(iz1,jz2);
646             dx20             = _mm256_sub_pd(ix2,jx0);
647             dy20             = _mm256_sub_pd(iy2,jy0);
648             dz20             = _mm256_sub_pd(iz2,jz0);
649             dx21             = _mm256_sub_pd(ix2,jx1);
650             dy21             = _mm256_sub_pd(iy2,jy1);
651             dz21             = _mm256_sub_pd(iz2,jz1);
652             dx22             = _mm256_sub_pd(ix2,jx2);
653             dy22             = _mm256_sub_pd(iy2,jy2);
654             dz22             = _mm256_sub_pd(iz2,jz2);
655
656             /* Calculate squared distance and things based on it */
657             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
658             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
659             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
660             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
661             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
662             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
663             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
664             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
665             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
666
667             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
668             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
669             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
670             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
671             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
672             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
673             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
674             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
675             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
676
677             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
678             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
679             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
680             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
681             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
682             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
683             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
684             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
685             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
686
687             fjx0             = _mm256_setzero_pd();
688             fjy0             = _mm256_setzero_pd();
689             fjz0             = _mm256_setzero_pd();
690             fjx1             = _mm256_setzero_pd();
691             fjy1             = _mm256_setzero_pd();
692             fjz1             = _mm256_setzero_pd();
693             fjx2             = _mm256_setzero_pd();
694             fjy2             = _mm256_setzero_pd();
695             fjz2             = _mm256_setzero_pd();
696
697             /**************************
698              * CALCULATE INTERACTIONS *
699              **************************/
700
701             r00              = _mm256_mul_pd(rsq00,rinv00);
702             r00              = _mm256_andnot_pd(dummy_mask,r00);
703
704             /* Calculate table index by multiplying r with table scale and truncate to integer */
705             rt               = _mm256_mul_pd(r00,vftabscale);
706             vfitab           = _mm256_cvttpd_epi32(rt);
707             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
708             vfitab           = _mm_slli_epi32(vfitab,3);
709
710             /* COULOMB ELECTROSTATICS */
711             velec            = _mm256_mul_pd(qq00,rinv00);
712             felec            = _mm256_mul_pd(velec,rinvsq00);
713
714             /* CUBIC SPLINE TABLE DISPERSION */
715             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
716             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
717             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
718             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
719             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
720             Heps             = _mm256_mul_pd(vfeps,H);
721             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
722             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
723             vvdw6            = _mm256_mul_pd(c6_00,VV);
724             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
725             fvdw6            = _mm256_mul_pd(c6_00,FF);
726
727             /* CUBIC SPLINE TABLE REPULSION */
728             vfitab           = _mm_add_epi32(vfitab,ifour);
729             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
730             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
731             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
732             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
733             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
734             Heps             = _mm256_mul_pd(vfeps,H);
735             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
736             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
737             vvdw12           = _mm256_mul_pd(c12_00,VV);
738             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
739             fvdw12           = _mm256_mul_pd(c12_00,FF);
740             vvdw             = _mm256_add_pd(vvdw12,vvdw6);
741             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
742
743             /* Update potential sum for this i atom from the interaction with this j atom. */
744             velec            = _mm256_andnot_pd(dummy_mask,velec);
745             velecsum         = _mm256_add_pd(velecsum,velec);
746             vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
747             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
748
749             fscal            = _mm256_add_pd(felec,fvdw);
750
751             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
752
753             /* Calculate temporary vectorial force */
754             tx               = _mm256_mul_pd(fscal,dx00);
755             ty               = _mm256_mul_pd(fscal,dy00);
756             tz               = _mm256_mul_pd(fscal,dz00);
757
758             /* Update vectorial force */
759             fix0             = _mm256_add_pd(fix0,tx);
760             fiy0             = _mm256_add_pd(fiy0,ty);
761             fiz0             = _mm256_add_pd(fiz0,tz);
762
763             fjx0             = _mm256_add_pd(fjx0,tx);
764             fjy0             = _mm256_add_pd(fjy0,ty);
765             fjz0             = _mm256_add_pd(fjz0,tz);
766
767             /**************************
768              * CALCULATE INTERACTIONS *
769              **************************/
770
771             /* COULOMB ELECTROSTATICS */
772             velec            = _mm256_mul_pd(qq01,rinv01);
773             felec            = _mm256_mul_pd(velec,rinvsq01);
774
775             /* Update potential sum for this i atom from the interaction with this j atom. */
776             velec            = _mm256_andnot_pd(dummy_mask,velec);
777             velecsum         = _mm256_add_pd(velecsum,velec);
778
779             fscal            = felec;
780
781             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
782
783             /* Calculate temporary vectorial force */
784             tx               = _mm256_mul_pd(fscal,dx01);
785             ty               = _mm256_mul_pd(fscal,dy01);
786             tz               = _mm256_mul_pd(fscal,dz01);
787
788             /* Update vectorial force */
789             fix0             = _mm256_add_pd(fix0,tx);
790             fiy0             = _mm256_add_pd(fiy0,ty);
791             fiz0             = _mm256_add_pd(fiz0,tz);
792
793             fjx1             = _mm256_add_pd(fjx1,tx);
794             fjy1             = _mm256_add_pd(fjy1,ty);
795             fjz1             = _mm256_add_pd(fjz1,tz);
796
797             /**************************
798              * CALCULATE INTERACTIONS *
799              **************************/
800
801             /* COULOMB ELECTROSTATICS */
802             velec            = _mm256_mul_pd(qq02,rinv02);
803             felec            = _mm256_mul_pd(velec,rinvsq02);
804
805             /* Update potential sum for this i atom from the interaction with this j atom. */
806             velec            = _mm256_andnot_pd(dummy_mask,velec);
807             velecsum         = _mm256_add_pd(velecsum,velec);
808
809             fscal            = felec;
810
811             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
812
813             /* Calculate temporary vectorial force */
814             tx               = _mm256_mul_pd(fscal,dx02);
815             ty               = _mm256_mul_pd(fscal,dy02);
816             tz               = _mm256_mul_pd(fscal,dz02);
817
818             /* Update vectorial force */
819             fix0             = _mm256_add_pd(fix0,tx);
820             fiy0             = _mm256_add_pd(fiy0,ty);
821             fiz0             = _mm256_add_pd(fiz0,tz);
822
823             fjx2             = _mm256_add_pd(fjx2,tx);
824             fjy2             = _mm256_add_pd(fjy2,ty);
825             fjz2             = _mm256_add_pd(fjz2,tz);
826
827             /**************************
828              * CALCULATE INTERACTIONS *
829              **************************/
830
831             /* COULOMB ELECTROSTATICS */
832             velec            = _mm256_mul_pd(qq10,rinv10);
833             felec            = _mm256_mul_pd(velec,rinvsq10);
834
835             /* Update potential sum for this i atom from the interaction with this j atom. */
836             velec            = _mm256_andnot_pd(dummy_mask,velec);
837             velecsum         = _mm256_add_pd(velecsum,velec);
838
839             fscal            = felec;
840
841             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
842
843             /* Calculate temporary vectorial force */
844             tx               = _mm256_mul_pd(fscal,dx10);
845             ty               = _mm256_mul_pd(fscal,dy10);
846             tz               = _mm256_mul_pd(fscal,dz10);
847
848             /* Update vectorial force */
849             fix1             = _mm256_add_pd(fix1,tx);
850             fiy1             = _mm256_add_pd(fiy1,ty);
851             fiz1             = _mm256_add_pd(fiz1,tz);
852
853             fjx0             = _mm256_add_pd(fjx0,tx);
854             fjy0             = _mm256_add_pd(fjy0,ty);
855             fjz0             = _mm256_add_pd(fjz0,tz);
856
857             /**************************
858              * CALCULATE INTERACTIONS *
859              **************************/
860
861             /* COULOMB ELECTROSTATICS */
862             velec            = _mm256_mul_pd(qq11,rinv11);
863             felec            = _mm256_mul_pd(velec,rinvsq11);
864
865             /* Update potential sum for this i atom from the interaction with this j atom. */
866             velec            = _mm256_andnot_pd(dummy_mask,velec);
867             velecsum         = _mm256_add_pd(velecsum,velec);
868
869             fscal            = felec;
870
871             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
872
873             /* Calculate temporary vectorial force */
874             tx               = _mm256_mul_pd(fscal,dx11);
875             ty               = _mm256_mul_pd(fscal,dy11);
876             tz               = _mm256_mul_pd(fscal,dz11);
877
878             /* Update vectorial force */
879             fix1             = _mm256_add_pd(fix1,tx);
880             fiy1             = _mm256_add_pd(fiy1,ty);
881             fiz1             = _mm256_add_pd(fiz1,tz);
882
883             fjx1             = _mm256_add_pd(fjx1,tx);
884             fjy1             = _mm256_add_pd(fjy1,ty);
885             fjz1             = _mm256_add_pd(fjz1,tz);
886
887             /**************************
888              * CALCULATE INTERACTIONS *
889              **************************/
890
891             /* COULOMB ELECTROSTATICS */
892             velec            = _mm256_mul_pd(qq12,rinv12);
893             felec            = _mm256_mul_pd(velec,rinvsq12);
894
895             /* Update potential sum for this i atom from the interaction with this j atom. */
896             velec            = _mm256_andnot_pd(dummy_mask,velec);
897             velecsum         = _mm256_add_pd(velecsum,velec);
898
899             fscal            = felec;
900
901             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
902
903             /* Calculate temporary vectorial force */
904             tx               = _mm256_mul_pd(fscal,dx12);
905             ty               = _mm256_mul_pd(fscal,dy12);
906             tz               = _mm256_mul_pd(fscal,dz12);
907
908             /* Update vectorial force */
909             fix1             = _mm256_add_pd(fix1,tx);
910             fiy1             = _mm256_add_pd(fiy1,ty);
911             fiz1             = _mm256_add_pd(fiz1,tz);
912
913             fjx2             = _mm256_add_pd(fjx2,tx);
914             fjy2             = _mm256_add_pd(fjy2,ty);
915             fjz2             = _mm256_add_pd(fjz2,tz);
916
917             /**************************
918              * CALCULATE INTERACTIONS *
919              **************************/
920
921             /* COULOMB ELECTROSTATICS */
922             velec            = _mm256_mul_pd(qq20,rinv20);
923             felec            = _mm256_mul_pd(velec,rinvsq20);
924
925             /* Update potential sum for this i atom from the interaction with this j atom. */
926             velec            = _mm256_andnot_pd(dummy_mask,velec);
927             velecsum         = _mm256_add_pd(velecsum,velec);
928
929             fscal            = felec;
930
931             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
932
933             /* Calculate temporary vectorial force */
934             tx               = _mm256_mul_pd(fscal,dx20);
935             ty               = _mm256_mul_pd(fscal,dy20);
936             tz               = _mm256_mul_pd(fscal,dz20);
937
938             /* Update vectorial force */
939             fix2             = _mm256_add_pd(fix2,tx);
940             fiy2             = _mm256_add_pd(fiy2,ty);
941             fiz2             = _mm256_add_pd(fiz2,tz);
942
943             fjx0             = _mm256_add_pd(fjx0,tx);
944             fjy0             = _mm256_add_pd(fjy0,ty);
945             fjz0             = _mm256_add_pd(fjz0,tz);
946
947             /**************************
948              * CALCULATE INTERACTIONS *
949              **************************/
950
951             /* COULOMB ELECTROSTATICS */
952             velec            = _mm256_mul_pd(qq21,rinv21);
953             felec            = _mm256_mul_pd(velec,rinvsq21);
954
955             /* Update potential sum for this i atom from the interaction with this j atom. */
956             velec            = _mm256_andnot_pd(dummy_mask,velec);
957             velecsum         = _mm256_add_pd(velecsum,velec);
958
959             fscal            = felec;
960
961             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
962
963             /* Calculate temporary vectorial force */
964             tx               = _mm256_mul_pd(fscal,dx21);
965             ty               = _mm256_mul_pd(fscal,dy21);
966             tz               = _mm256_mul_pd(fscal,dz21);
967
968             /* Update vectorial force */
969             fix2             = _mm256_add_pd(fix2,tx);
970             fiy2             = _mm256_add_pd(fiy2,ty);
971             fiz2             = _mm256_add_pd(fiz2,tz);
972
973             fjx1             = _mm256_add_pd(fjx1,tx);
974             fjy1             = _mm256_add_pd(fjy1,ty);
975             fjz1             = _mm256_add_pd(fjz1,tz);
976
977             /**************************
978              * CALCULATE INTERACTIONS *
979              **************************/
980
981             /* COULOMB ELECTROSTATICS */
982             velec            = _mm256_mul_pd(qq22,rinv22);
983             felec            = _mm256_mul_pd(velec,rinvsq22);
984
985             /* Update potential sum for this i atom from the interaction with this j atom. */
986             velec            = _mm256_andnot_pd(dummy_mask,velec);
987             velecsum         = _mm256_add_pd(velecsum,velec);
988
989             fscal            = felec;
990
991             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
992
993             /* Calculate temporary vectorial force */
994             tx               = _mm256_mul_pd(fscal,dx22);
995             ty               = _mm256_mul_pd(fscal,dy22);
996             tz               = _mm256_mul_pd(fscal,dz22);
997
998             /* Update vectorial force */
999             fix2             = _mm256_add_pd(fix2,tx);
1000             fiy2             = _mm256_add_pd(fiy2,ty);
1001             fiz2             = _mm256_add_pd(fiz2,tz);
1002
1003             fjx2             = _mm256_add_pd(fjx2,tx);
1004             fjy2             = _mm256_add_pd(fjy2,ty);
1005             fjz2             = _mm256_add_pd(fjz2,tz);
1006
1007             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1008             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1009             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1010             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1011
1012             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1013                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1014
1015             /* Inner loop uses 279 flops */
1016         }
1017
1018         /* End of innermost loop */
1019
1020         gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1021                                                  f+i_coord_offset,fshift+i_shift_offset);
1022
1023         ggid                        = gid[iidx];
1024         /* Update potential energies */
1025         gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1026         gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
1027
1028         /* Increment number of inner iterations */
1029         inneriter                  += j_index_end - j_index_start;
1030
1031         /* Outer loop uses 20 flops */
1032     }
1033
1034     /* Increment number of outer iterations */
1035     outeriter        += nri;
1036
1037     /* Update outer/inner flops */
1038
1039     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*279);
1040 }
1041 /*
1042  * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_F_avx_256_double
1043  * Electrostatics interaction: Coulomb
1044  * VdW interaction:            CubicSplineTable
1045  * Geometry:                   Water3-Water3
1046  * Calculate force/pot:        Force
1047  */
1048 void
1049 nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_F_avx_256_double
1050                     (t_nblist                    * gmx_restrict       nlist,
1051                      rvec                        * gmx_restrict          xx,
1052                      rvec                        * gmx_restrict          ff,
1053                      t_forcerec                  * gmx_restrict          fr,
1054                      t_mdatoms                   * gmx_restrict     mdatoms,
1055                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1056                      t_nrnb                      * gmx_restrict        nrnb)
1057 {
1058     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1059      * just 0 for non-waters.
1060      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1061      * jnr indices corresponding to data put in the four positions in the SIMD register.
1062      */
1063     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1064     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1065     int              jnrA,jnrB,jnrC,jnrD;
1066     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1067     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1068     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1069     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1070     real             rcutoff_scalar;
1071     real             *shiftvec,*fshift,*x,*f;
1072     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1073     real             scratch[4*DIM];
1074     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1075     real *           vdwioffsetptr0;
1076     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1077     real *           vdwioffsetptr1;
1078     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1079     real *           vdwioffsetptr2;
1080     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1081     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
1082     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1083     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1084     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1085     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1086     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1087     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1088     __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1089     __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1090     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1091     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1092     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1093     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1094     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1095     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1096     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
1097     real             *charge;
1098     int              nvdwtype;
1099     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1100     int              *vdwtype;
1101     real             *vdwparam;
1102     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
1103     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
1104     __m128i          vfitab;
1105     __m128i          ifour       = _mm_set1_epi32(4);
1106     __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
1107     real             *vftab;
1108     __m256d          dummy_mask,cutoff_mask;
1109     __m128           tmpmask0,tmpmask1;
1110     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1111     __m256d          one     = _mm256_set1_pd(1.0);
1112     __m256d          two     = _mm256_set1_pd(2.0);
1113     x                = xx[0];
1114     f                = ff[0];
1115
1116     nri              = nlist->nri;
1117     iinr             = nlist->iinr;
1118     jindex           = nlist->jindex;
1119     jjnr             = nlist->jjnr;
1120     shiftidx         = nlist->shift;
1121     gid              = nlist->gid;
1122     shiftvec         = fr->shift_vec[0];
1123     fshift           = fr->fshift[0];
1124     facel            = _mm256_set1_pd(fr->epsfac);
1125     charge           = mdatoms->chargeA;
1126     nvdwtype         = fr->ntype;
1127     vdwparam         = fr->nbfp;
1128     vdwtype          = mdatoms->typeA;
1129
1130     vftab            = kernel_data->table_vdw->data;
1131     vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
1132
1133     /* Setup water-specific parameters */
1134     inr              = nlist->iinr[0];
1135     iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
1136     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1137     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1138     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1139
1140     jq0              = _mm256_set1_pd(charge[inr+0]);
1141     jq1              = _mm256_set1_pd(charge[inr+1]);
1142     jq2              = _mm256_set1_pd(charge[inr+2]);
1143     vdwjidx0A        = 2*vdwtype[inr+0];
1144     qq00             = _mm256_mul_pd(iq0,jq0);
1145     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
1146     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
1147     qq01             = _mm256_mul_pd(iq0,jq1);
1148     qq02             = _mm256_mul_pd(iq0,jq2);
1149     qq10             = _mm256_mul_pd(iq1,jq0);
1150     qq11             = _mm256_mul_pd(iq1,jq1);
1151     qq12             = _mm256_mul_pd(iq1,jq2);
1152     qq20             = _mm256_mul_pd(iq2,jq0);
1153     qq21             = _mm256_mul_pd(iq2,jq1);
1154     qq22             = _mm256_mul_pd(iq2,jq2);
1155
1156     /* Avoid stupid compiler warnings */
1157     jnrA = jnrB = jnrC = jnrD = 0;
1158     j_coord_offsetA = 0;
1159     j_coord_offsetB = 0;
1160     j_coord_offsetC = 0;
1161     j_coord_offsetD = 0;
1162
1163     outeriter        = 0;
1164     inneriter        = 0;
1165
1166     for(iidx=0;iidx<4*DIM;iidx++)
1167     {
1168         scratch[iidx] = 0.0;
1169     }
1170
1171     /* Start outer loop over neighborlists */
1172     for(iidx=0; iidx<nri; iidx++)
1173     {
1174         /* Load shift vector for this list */
1175         i_shift_offset   = DIM*shiftidx[iidx];
1176
1177         /* Load limits for loop over neighbors */
1178         j_index_start    = jindex[iidx];
1179         j_index_end      = jindex[iidx+1];
1180
1181         /* Get outer coordinate index */
1182         inr              = iinr[iidx];
1183         i_coord_offset   = DIM*inr;
1184
1185         /* Load i particle coords and add shift vector */
1186         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1187                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1188
1189         fix0             = _mm256_setzero_pd();
1190         fiy0             = _mm256_setzero_pd();
1191         fiz0             = _mm256_setzero_pd();
1192         fix1             = _mm256_setzero_pd();
1193         fiy1             = _mm256_setzero_pd();
1194         fiz1             = _mm256_setzero_pd();
1195         fix2             = _mm256_setzero_pd();
1196         fiy2             = _mm256_setzero_pd();
1197         fiz2             = _mm256_setzero_pd();
1198
1199         /* Start inner kernel loop */
1200         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1201         {
1202
1203             /* Get j neighbor index, and coordinate index */
1204             jnrA             = jjnr[jidx];
1205             jnrB             = jjnr[jidx+1];
1206             jnrC             = jjnr[jidx+2];
1207             jnrD             = jjnr[jidx+3];
1208             j_coord_offsetA  = DIM*jnrA;
1209             j_coord_offsetB  = DIM*jnrB;
1210             j_coord_offsetC  = DIM*jnrC;
1211             j_coord_offsetD  = DIM*jnrD;
1212
1213             /* load j atom coordinates */
1214             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1215                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1216                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1217
1218             /* Calculate displacement vector */
1219             dx00             = _mm256_sub_pd(ix0,jx0);
1220             dy00             = _mm256_sub_pd(iy0,jy0);
1221             dz00             = _mm256_sub_pd(iz0,jz0);
1222             dx01             = _mm256_sub_pd(ix0,jx1);
1223             dy01             = _mm256_sub_pd(iy0,jy1);
1224             dz01             = _mm256_sub_pd(iz0,jz1);
1225             dx02             = _mm256_sub_pd(ix0,jx2);
1226             dy02             = _mm256_sub_pd(iy0,jy2);
1227             dz02             = _mm256_sub_pd(iz0,jz2);
1228             dx10             = _mm256_sub_pd(ix1,jx0);
1229             dy10             = _mm256_sub_pd(iy1,jy0);
1230             dz10             = _mm256_sub_pd(iz1,jz0);
1231             dx11             = _mm256_sub_pd(ix1,jx1);
1232             dy11             = _mm256_sub_pd(iy1,jy1);
1233             dz11             = _mm256_sub_pd(iz1,jz1);
1234             dx12             = _mm256_sub_pd(ix1,jx2);
1235             dy12             = _mm256_sub_pd(iy1,jy2);
1236             dz12             = _mm256_sub_pd(iz1,jz2);
1237             dx20             = _mm256_sub_pd(ix2,jx0);
1238             dy20             = _mm256_sub_pd(iy2,jy0);
1239             dz20             = _mm256_sub_pd(iz2,jz0);
1240             dx21             = _mm256_sub_pd(ix2,jx1);
1241             dy21             = _mm256_sub_pd(iy2,jy1);
1242             dz21             = _mm256_sub_pd(iz2,jz1);
1243             dx22             = _mm256_sub_pd(ix2,jx2);
1244             dy22             = _mm256_sub_pd(iy2,jy2);
1245             dz22             = _mm256_sub_pd(iz2,jz2);
1246
1247             /* Calculate squared distance and things based on it */
1248             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1249             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1250             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1251             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1252             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1253             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1254             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1255             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1256             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1257
1258             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1259             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
1260             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
1261             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1262             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
1263             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
1264             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1265             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
1266             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
1267
1268             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1269             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
1270             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
1271             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1272             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1273             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1274             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1275             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1276             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1277
1278             fjx0             = _mm256_setzero_pd();
1279             fjy0             = _mm256_setzero_pd();
1280             fjz0             = _mm256_setzero_pd();
1281             fjx1             = _mm256_setzero_pd();
1282             fjy1             = _mm256_setzero_pd();
1283             fjz1             = _mm256_setzero_pd();
1284             fjx2             = _mm256_setzero_pd();
1285             fjy2             = _mm256_setzero_pd();
1286             fjz2             = _mm256_setzero_pd();
1287
1288             /**************************
1289              * CALCULATE INTERACTIONS *
1290              **************************/
1291
1292             r00              = _mm256_mul_pd(rsq00,rinv00);
1293
1294             /* Calculate table index by multiplying r with table scale and truncate to integer */
1295             rt               = _mm256_mul_pd(r00,vftabscale);
1296             vfitab           = _mm256_cvttpd_epi32(rt);
1297             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1298             vfitab           = _mm_slli_epi32(vfitab,3);
1299
1300             /* COULOMB ELECTROSTATICS */
1301             velec            = _mm256_mul_pd(qq00,rinv00);
1302             felec            = _mm256_mul_pd(velec,rinvsq00);
1303
1304             /* CUBIC SPLINE TABLE DISPERSION */
1305             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1306             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1307             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1308             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1309             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1310             Heps             = _mm256_mul_pd(vfeps,H);
1311             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1312             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1313             fvdw6            = _mm256_mul_pd(c6_00,FF);
1314
1315             /* CUBIC SPLINE TABLE REPULSION */
1316             vfitab           = _mm_add_epi32(vfitab,ifour);
1317             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1318             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1319             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1320             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1321             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1322             Heps             = _mm256_mul_pd(vfeps,H);
1323             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1324             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1325             fvdw12           = _mm256_mul_pd(c12_00,FF);
1326             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1327
1328             fscal            = _mm256_add_pd(felec,fvdw);
1329
1330             /* Calculate temporary vectorial force */
1331             tx               = _mm256_mul_pd(fscal,dx00);
1332             ty               = _mm256_mul_pd(fscal,dy00);
1333             tz               = _mm256_mul_pd(fscal,dz00);
1334
1335             /* Update vectorial force */
1336             fix0             = _mm256_add_pd(fix0,tx);
1337             fiy0             = _mm256_add_pd(fiy0,ty);
1338             fiz0             = _mm256_add_pd(fiz0,tz);
1339
1340             fjx0             = _mm256_add_pd(fjx0,tx);
1341             fjy0             = _mm256_add_pd(fjy0,ty);
1342             fjz0             = _mm256_add_pd(fjz0,tz);
1343
1344             /**************************
1345              * CALCULATE INTERACTIONS *
1346              **************************/
1347
1348             /* COULOMB ELECTROSTATICS */
1349             velec            = _mm256_mul_pd(qq01,rinv01);
1350             felec            = _mm256_mul_pd(velec,rinvsq01);
1351
1352             fscal            = felec;
1353
1354             /* Calculate temporary vectorial force */
1355             tx               = _mm256_mul_pd(fscal,dx01);
1356             ty               = _mm256_mul_pd(fscal,dy01);
1357             tz               = _mm256_mul_pd(fscal,dz01);
1358
1359             /* Update vectorial force */
1360             fix0             = _mm256_add_pd(fix0,tx);
1361             fiy0             = _mm256_add_pd(fiy0,ty);
1362             fiz0             = _mm256_add_pd(fiz0,tz);
1363
1364             fjx1             = _mm256_add_pd(fjx1,tx);
1365             fjy1             = _mm256_add_pd(fjy1,ty);
1366             fjz1             = _mm256_add_pd(fjz1,tz);
1367
1368             /**************************
1369              * CALCULATE INTERACTIONS *
1370              **************************/
1371
1372             /* COULOMB ELECTROSTATICS */
1373             velec            = _mm256_mul_pd(qq02,rinv02);
1374             felec            = _mm256_mul_pd(velec,rinvsq02);
1375
1376             fscal            = felec;
1377
1378             /* Calculate temporary vectorial force */
1379             tx               = _mm256_mul_pd(fscal,dx02);
1380             ty               = _mm256_mul_pd(fscal,dy02);
1381             tz               = _mm256_mul_pd(fscal,dz02);
1382
1383             /* Update vectorial force */
1384             fix0             = _mm256_add_pd(fix0,tx);
1385             fiy0             = _mm256_add_pd(fiy0,ty);
1386             fiz0             = _mm256_add_pd(fiz0,tz);
1387
1388             fjx2             = _mm256_add_pd(fjx2,tx);
1389             fjy2             = _mm256_add_pd(fjy2,ty);
1390             fjz2             = _mm256_add_pd(fjz2,tz);
1391
1392             /**************************
1393              * CALCULATE INTERACTIONS *
1394              **************************/
1395
1396             /* COULOMB ELECTROSTATICS */
1397             velec            = _mm256_mul_pd(qq10,rinv10);
1398             felec            = _mm256_mul_pd(velec,rinvsq10);
1399
1400             fscal            = felec;
1401
1402             /* Calculate temporary vectorial force */
1403             tx               = _mm256_mul_pd(fscal,dx10);
1404             ty               = _mm256_mul_pd(fscal,dy10);
1405             tz               = _mm256_mul_pd(fscal,dz10);
1406
1407             /* Update vectorial force */
1408             fix1             = _mm256_add_pd(fix1,tx);
1409             fiy1             = _mm256_add_pd(fiy1,ty);
1410             fiz1             = _mm256_add_pd(fiz1,tz);
1411
1412             fjx0             = _mm256_add_pd(fjx0,tx);
1413             fjy0             = _mm256_add_pd(fjy0,ty);
1414             fjz0             = _mm256_add_pd(fjz0,tz);
1415
1416             /**************************
1417              * CALCULATE INTERACTIONS *
1418              **************************/
1419
1420             /* COULOMB ELECTROSTATICS */
1421             velec            = _mm256_mul_pd(qq11,rinv11);
1422             felec            = _mm256_mul_pd(velec,rinvsq11);
1423
1424             fscal            = felec;
1425
1426             /* Calculate temporary vectorial force */
1427             tx               = _mm256_mul_pd(fscal,dx11);
1428             ty               = _mm256_mul_pd(fscal,dy11);
1429             tz               = _mm256_mul_pd(fscal,dz11);
1430
1431             /* Update vectorial force */
1432             fix1             = _mm256_add_pd(fix1,tx);
1433             fiy1             = _mm256_add_pd(fiy1,ty);
1434             fiz1             = _mm256_add_pd(fiz1,tz);
1435
1436             fjx1             = _mm256_add_pd(fjx1,tx);
1437             fjy1             = _mm256_add_pd(fjy1,ty);
1438             fjz1             = _mm256_add_pd(fjz1,tz);
1439
1440             /**************************
1441              * CALCULATE INTERACTIONS *
1442              **************************/
1443
1444             /* COULOMB ELECTROSTATICS */
1445             velec            = _mm256_mul_pd(qq12,rinv12);
1446             felec            = _mm256_mul_pd(velec,rinvsq12);
1447
1448             fscal            = felec;
1449
1450             /* Calculate temporary vectorial force */
1451             tx               = _mm256_mul_pd(fscal,dx12);
1452             ty               = _mm256_mul_pd(fscal,dy12);
1453             tz               = _mm256_mul_pd(fscal,dz12);
1454
1455             /* Update vectorial force */
1456             fix1             = _mm256_add_pd(fix1,tx);
1457             fiy1             = _mm256_add_pd(fiy1,ty);
1458             fiz1             = _mm256_add_pd(fiz1,tz);
1459
1460             fjx2             = _mm256_add_pd(fjx2,tx);
1461             fjy2             = _mm256_add_pd(fjy2,ty);
1462             fjz2             = _mm256_add_pd(fjz2,tz);
1463
1464             /**************************
1465              * CALCULATE INTERACTIONS *
1466              **************************/
1467
1468             /* COULOMB ELECTROSTATICS */
1469             velec            = _mm256_mul_pd(qq20,rinv20);
1470             felec            = _mm256_mul_pd(velec,rinvsq20);
1471
1472             fscal            = felec;
1473
1474             /* Calculate temporary vectorial force */
1475             tx               = _mm256_mul_pd(fscal,dx20);
1476             ty               = _mm256_mul_pd(fscal,dy20);
1477             tz               = _mm256_mul_pd(fscal,dz20);
1478
1479             /* Update vectorial force */
1480             fix2             = _mm256_add_pd(fix2,tx);
1481             fiy2             = _mm256_add_pd(fiy2,ty);
1482             fiz2             = _mm256_add_pd(fiz2,tz);
1483
1484             fjx0             = _mm256_add_pd(fjx0,tx);
1485             fjy0             = _mm256_add_pd(fjy0,ty);
1486             fjz0             = _mm256_add_pd(fjz0,tz);
1487
1488             /**************************
1489              * CALCULATE INTERACTIONS *
1490              **************************/
1491
1492             /* COULOMB ELECTROSTATICS */
1493             velec            = _mm256_mul_pd(qq21,rinv21);
1494             felec            = _mm256_mul_pd(velec,rinvsq21);
1495
1496             fscal            = felec;
1497
1498             /* Calculate temporary vectorial force */
1499             tx               = _mm256_mul_pd(fscal,dx21);
1500             ty               = _mm256_mul_pd(fscal,dy21);
1501             tz               = _mm256_mul_pd(fscal,dz21);
1502
1503             /* Update vectorial force */
1504             fix2             = _mm256_add_pd(fix2,tx);
1505             fiy2             = _mm256_add_pd(fiy2,ty);
1506             fiz2             = _mm256_add_pd(fiz2,tz);
1507
1508             fjx1             = _mm256_add_pd(fjx1,tx);
1509             fjy1             = _mm256_add_pd(fjy1,ty);
1510             fjz1             = _mm256_add_pd(fjz1,tz);
1511
1512             /**************************
1513              * CALCULATE INTERACTIONS *
1514              **************************/
1515
1516             /* COULOMB ELECTROSTATICS */
1517             velec            = _mm256_mul_pd(qq22,rinv22);
1518             felec            = _mm256_mul_pd(velec,rinvsq22);
1519
1520             fscal            = felec;
1521
1522             /* Calculate temporary vectorial force */
1523             tx               = _mm256_mul_pd(fscal,dx22);
1524             ty               = _mm256_mul_pd(fscal,dy22);
1525             tz               = _mm256_mul_pd(fscal,dz22);
1526
1527             /* Update vectorial force */
1528             fix2             = _mm256_add_pd(fix2,tx);
1529             fiy2             = _mm256_add_pd(fiy2,ty);
1530             fiz2             = _mm256_add_pd(fiz2,tz);
1531
1532             fjx2             = _mm256_add_pd(fjx2,tx);
1533             fjy2             = _mm256_add_pd(fjy2,ty);
1534             fjz2             = _mm256_add_pd(fjz2,tz);
1535
1536             fjptrA             = f+j_coord_offsetA;
1537             fjptrB             = f+j_coord_offsetB;
1538             fjptrC             = f+j_coord_offsetC;
1539             fjptrD             = f+j_coord_offsetD;
1540
1541             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1542                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1543
1544             /* Inner loop uses 261 flops */
1545         }
1546
1547         if(jidx<j_index_end)
1548         {
1549
1550             /* Get j neighbor index, and coordinate index */
1551             jnrlistA         = jjnr[jidx];
1552             jnrlistB         = jjnr[jidx+1];
1553             jnrlistC         = jjnr[jidx+2];
1554             jnrlistD         = jjnr[jidx+3];
1555             /* Sign of each element will be negative for non-real atoms.
1556              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1557              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
1558              */
1559             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
1560
1561             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
1562             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
1563             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
1564
1565             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1566             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1567             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1568             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1569             j_coord_offsetA  = DIM*jnrA;
1570             j_coord_offsetB  = DIM*jnrB;
1571             j_coord_offsetC  = DIM*jnrC;
1572             j_coord_offsetD  = DIM*jnrD;
1573
1574             /* load j atom coordinates */
1575             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1576                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1577                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1578
1579             /* Calculate displacement vector */
1580             dx00             = _mm256_sub_pd(ix0,jx0);
1581             dy00             = _mm256_sub_pd(iy0,jy0);
1582             dz00             = _mm256_sub_pd(iz0,jz0);
1583             dx01             = _mm256_sub_pd(ix0,jx1);
1584             dy01             = _mm256_sub_pd(iy0,jy1);
1585             dz01             = _mm256_sub_pd(iz0,jz1);
1586             dx02             = _mm256_sub_pd(ix0,jx2);
1587             dy02             = _mm256_sub_pd(iy0,jy2);
1588             dz02             = _mm256_sub_pd(iz0,jz2);
1589             dx10             = _mm256_sub_pd(ix1,jx0);
1590             dy10             = _mm256_sub_pd(iy1,jy0);
1591             dz10             = _mm256_sub_pd(iz1,jz0);
1592             dx11             = _mm256_sub_pd(ix1,jx1);
1593             dy11             = _mm256_sub_pd(iy1,jy1);
1594             dz11             = _mm256_sub_pd(iz1,jz1);
1595             dx12             = _mm256_sub_pd(ix1,jx2);
1596             dy12             = _mm256_sub_pd(iy1,jy2);
1597             dz12             = _mm256_sub_pd(iz1,jz2);
1598             dx20             = _mm256_sub_pd(ix2,jx0);
1599             dy20             = _mm256_sub_pd(iy2,jy0);
1600             dz20             = _mm256_sub_pd(iz2,jz0);
1601             dx21             = _mm256_sub_pd(ix2,jx1);
1602             dy21             = _mm256_sub_pd(iy2,jy1);
1603             dz21             = _mm256_sub_pd(iz2,jz1);
1604             dx22             = _mm256_sub_pd(ix2,jx2);
1605             dy22             = _mm256_sub_pd(iy2,jy2);
1606             dz22             = _mm256_sub_pd(iz2,jz2);
1607
1608             /* Calculate squared distance and things based on it */
1609             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1610             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1611             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1612             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1613             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1614             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1615             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1616             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1617             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1618
1619             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1620             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
1621             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
1622             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1623             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
1624             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
1625             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1626             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
1627             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
1628
1629             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1630             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
1631             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
1632             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1633             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1634             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1635             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1636             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1637             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1638
1639             fjx0             = _mm256_setzero_pd();
1640             fjy0             = _mm256_setzero_pd();
1641             fjz0             = _mm256_setzero_pd();
1642             fjx1             = _mm256_setzero_pd();
1643             fjy1             = _mm256_setzero_pd();
1644             fjz1             = _mm256_setzero_pd();
1645             fjx2             = _mm256_setzero_pd();
1646             fjy2             = _mm256_setzero_pd();
1647             fjz2             = _mm256_setzero_pd();
1648
1649             /**************************
1650              * CALCULATE INTERACTIONS *
1651              **************************/
1652
1653             r00              = _mm256_mul_pd(rsq00,rinv00);
1654             r00              = _mm256_andnot_pd(dummy_mask,r00);
1655
1656             /* Calculate table index by multiplying r with table scale and truncate to integer */
1657             rt               = _mm256_mul_pd(r00,vftabscale);
1658             vfitab           = _mm256_cvttpd_epi32(rt);
1659             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1660             vfitab           = _mm_slli_epi32(vfitab,3);
1661
1662             /* COULOMB ELECTROSTATICS */
1663             velec            = _mm256_mul_pd(qq00,rinv00);
1664             felec            = _mm256_mul_pd(velec,rinvsq00);
1665
1666             /* CUBIC SPLINE TABLE DISPERSION */
1667             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1668             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1669             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1670             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1671             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1672             Heps             = _mm256_mul_pd(vfeps,H);
1673             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1674             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1675             fvdw6            = _mm256_mul_pd(c6_00,FF);
1676
1677             /* CUBIC SPLINE TABLE REPULSION */
1678             vfitab           = _mm_add_epi32(vfitab,ifour);
1679             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1680             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1681             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1682             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1683             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1684             Heps             = _mm256_mul_pd(vfeps,H);
1685             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1686             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1687             fvdw12           = _mm256_mul_pd(c12_00,FF);
1688             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1689
1690             fscal            = _mm256_add_pd(felec,fvdw);
1691
1692             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1693
1694             /* Calculate temporary vectorial force */
1695             tx               = _mm256_mul_pd(fscal,dx00);
1696             ty               = _mm256_mul_pd(fscal,dy00);
1697             tz               = _mm256_mul_pd(fscal,dz00);
1698
1699             /* Update vectorial force */
1700             fix0             = _mm256_add_pd(fix0,tx);
1701             fiy0             = _mm256_add_pd(fiy0,ty);
1702             fiz0             = _mm256_add_pd(fiz0,tz);
1703
1704             fjx0             = _mm256_add_pd(fjx0,tx);
1705             fjy0             = _mm256_add_pd(fjy0,ty);
1706             fjz0             = _mm256_add_pd(fjz0,tz);
1707
1708             /**************************
1709              * CALCULATE INTERACTIONS *
1710              **************************/
1711
1712             /* COULOMB ELECTROSTATICS */
1713             velec            = _mm256_mul_pd(qq01,rinv01);
1714             felec            = _mm256_mul_pd(velec,rinvsq01);
1715
1716             fscal            = felec;
1717
1718             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1719
1720             /* Calculate temporary vectorial force */
1721             tx               = _mm256_mul_pd(fscal,dx01);
1722             ty               = _mm256_mul_pd(fscal,dy01);
1723             tz               = _mm256_mul_pd(fscal,dz01);
1724
1725             /* Update vectorial force */
1726             fix0             = _mm256_add_pd(fix0,tx);
1727             fiy0             = _mm256_add_pd(fiy0,ty);
1728             fiz0             = _mm256_add_pd(fiz0,tz);
1729
1730             fjx1             = _mm256_add_pd(fjx1,tx);
1731             fjy1             = _mm256_add_pd(fjy1,ty);
1732             fjz1             = _mm256_add_pd(fjz1,tz);
1733
1734             /**************************
1735              * CALCULATE INTERACTIONS *
1736              **************************/
1737
1738             /* COULOMB ELECTROSTATICS */
1739             velec            = _mm256_mul_pd(qq02,rinv02);
1740             felec            = _mm256_mul_pd(velec,rinvsq02);
1741
1742             fscal            = felec;
1743
1744             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1745
1746             /* Calculate temporary vectorial force */
1747             tx               = _mm256_mul_pd(fscal,dx02);
1748             ty               = _mm256_mul_pd(fscal,dy02);
1749             tz               = _mm256_mul_pd(fscal,dz02);
1750
1751             /* Update vectorial force */
1752             fix0             = _mm256_add_pd(fix0,tx);
1753             fiy0             = _mm256_add_pd(fiy0,ty);
1754             fiz0             = _mm256_add_pd(fiz0,tz);
1755
1756             fjx2             = _mm256_add_pd(fjx2,tx);
1757             fjy2             = _mm256_add_pd(fjy2,ty);
1758             fjz2             = _mm256_add_pd(fjz2,tz);
1759
1760             /**************************
1761              * CALCULATE INTERACTIONS *
1762              **************************/
1763
1764             /* COULOMB ELECTROSTATICS */
1765             velec            = _mm256_mul_pd(qq10,rinv10);
1766             felec            = _mm256_mul_pd(velec,rinvsq10);
1767
1768             fscal            = felec;
1769
1770             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1771
1772             /* Calculate temporary vectorial force */
1773             tx               = _mm256_mul_pd(fscal,dx10);
1774             ty               = _mm256_mul_pd(fscal,dy10);
1775             tz               = _mm256_mul_pd(fscal,dz10);
1776
1777             /* Update vectorial force */
1778             fix1             = _mm256_add_pd(fix1,tx);
1779             fiy1             = _mm256_add_pd(fiy1,ty);
1780             fiz1             = _mm256_add_pd(fiz1,tz);
1781
1782             fjx0             = _mm256_add_pd(fjx0,tx);
1783             fjy0             = _mm256_add_pd(fjy0,ty);
1784             fjz0             = _mm256_add_pd(fjz0,tz);
1785
1786             /**************************
1787              * CALCULATE INTERACTIONS *
1788              **************************/
1789
1790             /* COULOMB ELECTROSTATICS */
1791             velec            = _mm256_mul_pd(qq11,rinv11);
1792             felec            = _mm256_mul_pd(velec,rinvsq11);
1793
1794             fscal            = felec;
1795
1796             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1797
1798             /* Calculate temporary vectorial force */
1799             tx               = _mm256_mul_pd(fscal,dx11);
1800             ty               = _mm256_mul_pd(fscal,dy11);
1801             tz               = _mm256_mul_pd(fscal,dz11);
1802
1803             /* Update vectorial force */
1804             fix1             = _mm256_add_pd(fix1,tx);
1805             fiy1             = _mm256_add_pd(fiy1,ty);
1806             fiz1             = _mm256_add_pd(fiz1,tz);
1807
1808             fjx1             = _mm256_add_pd(fjx1,tx);
1809             fjy1             = _mm256_add_pd(fjy1,ty);
1810             fjz1             = _mm256_add_pd(fjz1,tz);
1811
1812             /**************************
1813              * CALCULATE INTERACTIONS *
1814              **************************/
1815
1816             /* COULOMB ELECTROSTATICS */
1817             velec            = _mm256_mul_pd(qq12,rinv12);
1818             felec            = _mm256_mul_pd(velec,rinvsq12);
1819
1820             fscal            = felec;
1821
1822             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1823
1824             /* Calculate temporary vectorial force */
1825             tx               = _mm256_mul_pd(fscal,dx12);
1826             ty               = _mm256_mul_pd(fscal,dy12);
1827             tz               = _mm256_mul_pd(fscal,dz12);
1828
1829             /* Update vectorial force */
1830             fix1             = _mm256_add_pd(fix1,tx);
1831             fiy1             = _mm256_add_pd(fiy1,ty);
1832             fiz1             = _mm256_add_pd(fiz1,tz);
1833
1834             fjx2             = _mm256_add_pd(fjx2,tx);
1835             fjy2             = _mm256_add_pd(fjy2,ty);
1836             fjz2             = _mm256_add_pd(fjz2,tz);
1837
1838             /**************************
1839              * CALCULATE INTERACTIONS *
1840              **************************/
1841
1842             /* COULOMB ELECTROSTATICS */
1843             velec            = _mm256_mul_pd(qq20,rinv20);
1844             felec            = _mm256_mul_pd(velec,rinvsq20);
1845
1846             fscal            = felec;
1847
1848             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1849
1850             /* Calculate temporary vectorial force */
1851             tx               = _mm256_mul_pd(fscal,dx20);
1852             ty               = _mm256_mul_pd(fscal,dy20);
1853             tz               = _mm256_mul_pd(fscal,dz20);
1854
1855             /* Update vectorial force */
1856             fix2             = _mm256_add_pd(fix2,tx);
1857             fiy2             = _mm256_add_pd(fiy2,ty);
1858             fiz2             = _mm256_add_pd(fiz2,tz);
1859
1860             fjx0             = _mm256_add_pd(fjx0,tx);
1861             fjy0             = _mm256_add_pd(fjy0,ty);
1862             fjz0             = _mm256_add_pd(fjz0,tz);
1863
1864             /**************************
1865              * CALCULATE INTERACTIONS *
1866              **************************/
1867
1868             /* COULOMB ELECTROSTATICS */
1869             velec            = _mm256_mul_pd(qq21,rinv21);
1870             felec            = _mm256_mul_pd(velec,rinvsq21);
1871
1872             fscal            = felec;
1873
1874             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1875
1876             /* Calculate temporary vectorial force */
1877             tx               = _mm256_mul_pd(fscal,dx21);
1878             ty               = _mm256_mul_pd(fscal,dy21);
1879             tz               = _mm256_mul_pd(fscal,dz21);
1880
1881             /* Update vectorial force */
1882             fix2             = _mm256_add_pd(fix2,tx);
1883             fiy2             = _mm256_add_pd(fiy2,ty);
1884             fiz2             = _mm256_add_pd(fiz2,tz);
1885
1886             fjx1             = _mm256_add_pd(fjx1,tx);
1887             fjy1             = _mm256_add_pd(fjy1,ty);
1888             fjz1             = _mm256_add_pd(fjz1,tz);
1889
1890             /**************************
1891              * CALCULATE INTERACTIONS *
1892              **************************/
1893
1894             /* COULOMB ELECTROSTATICS */
1895             velec            = _mm256_mul_pd(qq22,rinv22);
1896             felec            = _mm256_mul_pd(velec,rinvsq22);
1897
1898             fscal            = felec;
1899
1900             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1901
1902             /* Calculate temporary vectorial force */
1903             tx               = _mm256_mul_pd(fscal,dx22);
1904             ty               = _mm256_mul_pd(fscal,dy22);
1905             tz               = _mm256_mul_pd(fscal,dz22);
1906
1907             /* Update vectorial force */
1908             fix2             = _mm256_add_pd(fix2,tx);
1909             fiy2             = _mm256_add_pd(fiy2,ty);
1910             fiz2             = _mm256_add_pd(fiz2,tz);
1911
1912             fjx2             = _mm256_add_pd(fjx2,tx);
1913             fjy2             = _mm256_add_pd(fjy2,ty);
1914             fjz2             = _mm256_add_pd(fjz2,tz);
1915
1916             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1917             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1918             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1919             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1920
1921             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1922                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1923
1924             /* Inner loop uses 262 flops */
1925         }
1926
1927         /* End of innermost loop */
1928
1929         gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1930                                                  f+i_coord_offset,fshift+i_shift_offset);
1931
1932         /* Increment number of inner iterations */
1933         inneriter                  += j_index_end - j_index_start;
1934
1935         /* Outer loop uses 18 flops */
1936     }
1937
1938     /* Increment number of outer iterations */
1939     outeriter        += nri;
1940
1941     /* Update outer/inner flops */
1942
1943     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*262);
1944 }