Use full path for legacyheaders
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_double / nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_avx_256_double.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_double kernel generator.
37  */
38 #include "config.h"
39
40 #include <math.h>
41
42 #include "../nb_kernel.h"
43 #include "gromacs/legacyheaders/types/simple.h"
44 #include "gromacs/math/vec.h"
45 #include "gromacs/legacyheaders/nrnb.h"
46
47 #include "gromacs/simd/math_x86_avx_256_double.h"
48 #include "kernelutil_x86_avx_256_double.h"
49
50 /*
51  * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_VF_avx_256_double
52  * Electrostatics interaction: Coulomb
53  * VdW interaction:            CubicSplineTable
54  * Geometry:                   Water3-Water3
55  * Calculate force/pot:        PotentialAndForce
56  */
57 void
58 nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_VF_avx_256_double
59                     (t_nblist                    * gmx_restrict       nlist,
60                      rvec                        * gmx_restrict          xx,
61                      rvec                        * gmx_restrict          ff,
62                      t_forcerec                  * gmx_restrict          fr,
63                      t_mdatoms                   * gmx_restrict     mdatoms,
64                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65                      t_nrnb                      * gmx_restrict        nrnb)
66 {
67     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
68      * just 0 for non-waters.
69      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
70      * jnr indices corresponding to data put in the four positions in the SIMD register.
71      */
72     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
73     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74     int              jnrA,jnrB,jnrC,jnrD;
75     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
76     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
77     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
78     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
79     real             rcutoff_scalar;
80     real             *shiftvec,*fshift,*x,*f;
81     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
82     real             scratch[4*DIM];
83     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
84     real *           vdwioffsetptr0;
85     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
86     real *           vdwioffsetptr1;
87     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
88     real *           vdwioffsetptr2;
89     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
90     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
91     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
92     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
93     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
94     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
95     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
96     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
97     __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
98     __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
99     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
100     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
101     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
102     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
103     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
104     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
105     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
106     real             *charge;
107     int              nvdwtype;
108     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
109     int              *vdwtype;
110     real             *vdwparam;
111     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
112     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
113     __m128i          vfitab;
114     __m128i          ifour       = _mm_set1_epi32(4);
115     __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
116     real             *vftab;
117     __m256d          dummy_mask,cutoff_mask;
118     __m128           tmpmask0,tmpmask1;
119     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
120     __m256d          one     = _mm256_set1_pd(1.0);
121     __m256d          two     = _mm256_set1_pd(2.0);
122     x                = xx[0];
123     f                = ff[0];
124
125     nri              = nlist->nri;
126     iinr             = nlist->iinr;
127     jindex           = nlist->jindex;
128     jjnr             = nlist->jjnr;
129     shiftidx         = nlist->shift;
130     gid              = nlist->gid;
131     shiftvec         = fr->shift_vec[0];
132     fshift           = fr->fshift[0];
133     facel            = _mm256_set1_pd(fr->epsfac);
134     charge           = mdatoms->chargeA;
135     nvdwtype         = fr->ntype;
136     vdwparam         = fr->nbfp;
137     vdwtype          = mdatoms->typeA;
138
139     vftab            = kernel_data->table_vdw->data;
140     vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
141
142     /* Setup water-specific parameters */
143     inr              = nlist->iinr[0];
144     iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
145     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
146     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
147     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
148
149     jq0              = _mm256_set1_pd(charge[inr+0]);
150     jq1              = _mm256_set1_pd(charge[inr+1]);
151     jq2              = _mm256_set1_pd(charge[inr+2]);
152     vdwjidx0A        = 2*vdwtype[inr+0];
153     qq00             = _mm256_mul_pd(iq0,jq0);
154     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
155     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
156     qq01             = _mm256_mul_pd(iq0,jq1);
157     qq02             = _mm256_mul_pd(iq0,jq2);
158     qq10             = _mm256_mul_pd(iq1,jq0);
159     qq11             = _mm256_mul_pd(iq1,jq1);
160     qq12             = _mm256_mul_pd(iq1,jq2);
161     qq20             = _mm256_mul_pd(iq2,jq0);
162     qq21             = _mm256_mul_pd(iq2,jq1);
163     qq22             = _mm256_mul_pd(iq2,jq2);
164
165     /* Avoid stupid compiler warnings */
166     jnrA = jnrB = jnrC = jnrD = 0;
167     j_coord_offsetA = 0;
168     j_coord_offsetB = 0;
169     j_coord_offsetC = 0;
170     j_coord_offsetD = 0;
171
172     outeriter        = 0;
173     inneriter        = 0;
174
175     for(iidx=0;iidx<4*DIM;iidx++)
176     {
177         scratch[iidx] = 0.0;
178     }
179
180     /* Start outer loop over neighborlists */
181     for(iidx=0; iidx<nri; iidx++)
182     {
183         /* Load shift vector for this list */
184         i_shift_offset   = DIM*shiftidx[iidx];
185
186         /* Load limits for loop over neighbors */
187         j_index_start    = jindex[iidx];
188         j_index_end      = jindex[iidx+1];
189
190         /* Get outer coordinate index */
191         inr              = iinr[iidx];
192         i_coord_offset   = DIM*inr;
193
194         /* Load i particle coords and add shift vector */
195         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
196                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
197
198         fix0             = _mm256_setzero_pd();
199         fiy0             = _mm256_setzero_pd();
200         fiz0             = _mm256_setzero_pd();
201         fix1             = _mm256_setzero_pd();
202         fiy1             = _mm256_setzero_pd();
203         fiz1             = _mm256_setzero_pd();
204         fix2             = _mm256_setzero_pd();
205         fiy2             = _mm256_setzero_pd();
206         fiz2             = _mm256_setzero_pd();
207
208         /* Reset potential sums */
209         velecsum         = _mm256_setzero_pd();
210         vvdwsum          = _mm256_setzero_pd();
211
212         /* Start inner kernel loop */
213         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
214         {
215
216             /* Get j neighbor index, and coordinate index */
217             jnrA             = jjnr[jidx];
218             jnrB             = jjnr[jidx+1];
219             jnrC             = jjnr[jidx+2];
220             jnrD             = jjnr[jidx+3];
221             j_coord_offsetA  = DIM*jnrA;
222             j_coord_offsetB  = DIM*jnrB;
223             j_coord_offsetC  = DIM*jnrC;
224             j_coord_offsetD  = DIM*jnrD;
225
226             /* load j atom coordinates */
227             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
228                                                  x+j_coord_offsetC,x+j_coord_offsetD,
229                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
230
231             /* Calculate displacement vector */
232             dx00             = _mm256_sub_pd(ix0,jx0);
233             dy00             = _mm256_sub_pd(iy0,jy0);
234             dz00             = _mm256_sub_pd(iz0,jz0);
235             dx01             = _mm256_sub_pd(ix0,jx1);
236             dy01             = _mm256_sub_pd(iy0,jy1);
237             dz01             = _mm256_sub_pd(iz0,jz1);
238             dx02             = _mm256_sub_pd(ix0,jx2);
239             dy02             = _mm256_sub_pd(iy0,jy2);
240             dz02             = _mm256_sub_pd(iz0,jz2);
241             dx10             = _mm256_sub_pd(ix1,jx0);
242             dy10             = _mm256_sub_pd(iy1,jy0);
243             dz10             = _mm256_sub_pd(iz1,jz0);
244             dx11             = _mm256_sub_pd(ix1,jx1);
245             dy11             = _mm256_sub_pd(iy1,jy1);
246             dz11             = _mm256_sub_pd(iz1,jz1);
247             dx12             = _mm256_sub_pd(ix1,jx2);
248             dy12             = _mm256_sub_pd(iy1,jy2);
249             dz12             = _mm256_sub_pd(iz1,jz2);
250             dx20             = _mm256_sub_pd(ix2,jx0);
251             dy20             = _mm256_sub_pd(iy2,jy0);
252             dz20             = _mm256_sub_pd(iz2,jz0);
253             dx21             = _mm256_sub_pd(ix2,jx1);
254             dy21             = _mm256_sub_pd(iy2,jy1);
255             dz21             = _mm256_sub_pd(iz2,jz1);
256             dx22             = _mm256_sub_pd(ix2,jx2);
257             dy22             = _mm256_sub_pd(iy2,jy2);
258             dz22             = _mm256_sub_pd(iz2,jz2);
259
260             /* Calculate squared distance and things based on it */
261             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
262             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
263             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
264             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
265             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
266             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
267             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
268             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
269             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
270
271             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
272             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
273             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
274             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
275             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
276             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
277             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
278             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
279             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
280
281             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
282             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
283             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
284             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
285             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
286             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
287             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
288             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
289             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
290
291             fjx0             = _mm256_setzero_pd();
292             fjy0             = _mm256_setzero_pd();
293             fjz0             = _mm256_setzero_pd();
294             fjx1             = _mm256_setzero_pd();
295             fjy1             = _mm256_setzero_pd();
296             fjz1             = _mm256_setzero_pd();
297             fjx2             = _mm256_setzero_pd();
298             fjy2             = _mm256_setzero_pd();
299             fjz2             = _mm256_setzero_pd();
300
301             /**************************
302              * CALCULATE INTERACTIONS *
303              **************************/
304
305             r00              = _mm256_mul_pd(rsq00,rinv00);
306
307             /* Calculate table index by multiplying r with table scale and truncate to integer */
308             rt               = _mm256_mul_pd(r00,vftabscale);
309             vfitab           = _mm256_cvttpd_epi32(rt);
310             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
311             vfitab           = _mm_slli_epi32(vfitab,3);
312
313             /* COULOMB ELECTROSTATICS */
314             velec            = _mm256_mul_pd(qq00,rinv00);
315             felec            = _mm256_mul_pd(velec,rinvsq00);
316
317             /* CUBIC SPLINE TABLE DISPERSION */
318             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
319             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
320             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
321             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
322             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
323             Heps             = _mm256_mul_pd(vfeps,H);
324             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
325             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
326             vvdw6            = _mm256_mul_pd(c6_00,VV);
327             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
328             fvdw6            = _mm256_mul_pd(c6_00,FF);
329
330             /* CUBIC SPLINE TABLE REPULSION */
331             vfitab           = _mm_add_epi32(vfitab,ifour);
332             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
333             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
334             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
335             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
336             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
337             Heps             = _mm256_mul_pd(vfeps,H);
338             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
339             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
340             vvdw12           = _mm256_mul_pd(c12_00,VV);
341             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
342             fvdw12           = _mm256_mul_pd(c12_00,FF);
343             vvdw             = _mm256_add_pd(vvdw12,vvdw6);
344             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
345
346             /* Update potential sum for this i atom from the interaction with this j atom. */
347             velecsum         = _mm256_add_pd(velecsum,velec);
348             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
349
350             fscal            = _mm256_add_pd(felec,fvdw);
351
352             /* Calculate temporary vectorial force */
353             tx               = _mm256_mul_pd(fscal,dx00);
354             ty               = _mm256_mul_pd(fscal,dy00);
355             tz               = _mm256_mul_pd(fscal,dz00);
356
357             /* Update vectorial force */
358             fix0             = _mm256_add_pd(fix0,tx);
359             fiy0             = _mm256_add_pd(fiy0,ty);
360             fiz0             = _mm256_add_pd(fiz0,tz);
361
362             fjx0             = _mm256_add_pd(fjx0,tx);
363             fjy0             = _mm256_add_pd(fjy0,ty);
364             fjz0             = _mm256_add_pd(fjz0,tz);
365
366             /**************************
367              * CALCULATE INTERACTIONS *
368              **************************/
369
370             /* COULOMB ELECTROSTATICS */
371             velec            = _mm256_mul_pd(qq01,rinv01);
372             felec            = _mm256_mul_pd(velec,rinvsq01);
373
374             /* Update potential sum for this i atom from the interaction with this j atom. */
375             velecsum         = _mm256_add_pd(velecsum,velec);
376
377             fscal            = felec;
378
379             /* Calculate temporary vectorial force */
380             tx               = _mm256_mul_pd(fscal,dx01);
381             ty               = _mm256_mul_pd(fscal,dy01);
382             tz               = _mm256_mul_pd(fscal,dz01);
383
384             /* Update vectorial force */
385             fix0             = _mm256_add_pd(fix0,tx);
386             fiy0             = _mm256_add_pd(fiy0,ty);
387             fiz0             = _mm256_add_pd(fiz0,tz);
388
389             fjx1             = _mm256_add_pd(fjx1,tx);
390             fjy1             = _mm256_add_pd(fjy1,ty);
391             fjz1             = _mm256_add_pd(fjz1,tz);
392
393             /**************************
394              * CALCULATE INTERACTIONS *
395              **************************/
396
397             /* COULOMB ELECTROSTATICS */
398             velec            = _mm256_mul_pd(qq02,rinv02);
399             felec            = _mm256_mul_pd(velec,rinvsq02);
400
401             /* Update potential sum for this i atom from the interaction with this j atom. */
402             velecsum         = _mm256_add_pd(velecsum,velec);
403
404             fscal            = felec;
405
406             /* Calculate temporary vectorial force */
407             tx               = _mm256_mul_pd(fscal,dx02);
408             ty               = _mm256_mul_pd(fscal,dy02);
409             tz               = _mm256_mul_pd(fscal,dz02);
410
411             /* Update vectorial force */
412             fix0             = _mm256_add_pd(fix0,tx);
413             fiy0             = _mm256_add_pd(fiy0,ty);
414             fiz0             = _mm256_add_pd(fiz0,tz);
415
416             fjx2             = _mm256_add_pd(fjx2,tx);
417             fjy2             = _mm256_add_pd(fjy2,ty);
418             fjz2             = _mm256_add_pd(fjz2,tz);
419
420             /**************************
421              * CALCULATE INTERACTIONS *
422              **************************/
423
424             /* COULOMB ELECTROSTATICS */
425             velec            = _mm256_mul_pd(qq10,rinv10);
426             felec            = _mm256_mul_pd(velec,rinvsq10);
427
428             /* Update potential sum for this i atom from the interaction with this j atom. */
429             velecsum         = _mm256_add_pd(velecsum,velec);
430
431             fscal            = felec;
432
433             /* Calculate temporary vectorial force */
434             tx               = _mm256_mul_pd(fscal,dx10);
435             ty               = _mm256_mul_pd(fscal,dy10);
436             tz               = _mm256_mul_pd(fscal,dz10);
437
438             /* Update vectorial force */
439             fix1             = _mm256_add_pd(fix1,tx);
440             fiy1             = _mm256_add_pd(fiy1,ty);
441             fiz1             = _mm256_add_pd(fiz1,tz);
442
443             fjx0             = _mm256_add_pd(fjx0,tx);
444             fjy0             = _mm256_add_pd(fjy0,ty);
445             fjz0             = _mm256_add_pd(fjz0,tz);
446
447             /**************************
448              * CALCULATE INTERACTIONS *
449              **************************/
450
451             /* COULOMB ELECTROSTATICS */
452             velec            = _mm256_mul_pd(qq11,rinv11);
453             felec            = _mm256_mul_pd(velec,rinvsq11);
454
455             /* Update potential sum for this i atom from the interaction with this j atom. */
456             velecsum         = _mm256_add_pd(velecsum,velec);
457
458             fscal            = felec;
459
460             /* Calculate temporary vectorial force */
461             tx               = _mm256_mul_pd(fscal,dx11);
462             ty               = _mm256_mul_pd(fscal,dy11);
463             tz               = _mm256_mul_pd(fscal,dz11);
464
465             /* Update vectorial force */
466             fix1             = _mm256_add_pd(fix1,tx);
467             fiy1             = _mm256_add_pd(fiy1,ty);
468             fiz1             = _mm256_add_pd(fiz1,tz);
469
470             fjx1             = _mm256_add_pd(fjx1,tx);
471             fjy1             = _mm256_add_pd(fjy1,ty);
472             fjz1             = _mm256_add_pd(fjz1,tz);
473
474             /**************************
475              * CALCULATE INTERACTIONS *
476              **************************/
477
478             /* COULOMB ELECTROSTATICS */
479             velec            = _mm256_mul_pd(qq12,rinv12);
480             felec            = _mm256_mul_pd(velec,rinvsq12);
481
482             /* Update potential sum for this i atom from the interaction with this j atom. */
483             velecsum         = _mm256_add_pd(velecsum,velec);
484
485             fscal            = felec;
486
487             /* Calculate temporary vectorial force */
488             tx               = _mm256_mul_pd(fscal,dx12);
489             ty               = _mm256_mul_pd(fscal,dy12);
490             tz               = _mm256_mul_pd(fscal,dz12);
491
492             /* Update vectorial force */
493             fix1             = _mm256_add_pd(fix1,tx);
494             fiy1             = _mm256_add_pd(fiy1,ty);
495             fiz1             = _mm256_add_pd(fiz1,tz);
496
497             fjx2             = _mm256_add_pd(fjx2,tx);
498             fjy2             = _mm256_add_pd(fjy2,ty);
499             fjz2             = _mm256_add_pd(fjz2,tz);
500
501             /**************************
502              * CALCULATE INTERACTIONS *
503              **************************/
504
505             /* COULOMB ELECTROSTATICS */
506             velec            = _mm256_mul_pd(qq20,rinv20);
507             felec            = _mm256_mul_pd(velec,rinvsq20);
508
509             /* Update potential sum for this i atom from the interaction with this j atom. */
510             velecsum         = _mm256_add_pd(velecsum,velec);
511
512             fscal            = felec;
513
514             /* Calculate temporary vectorial force */
515             tx               = _mm256_mul_pd(fscal,dx20);
516             ty               = _mm256_mul_pd(fscal,dy20);
517             tz               = _mm256_mul_pd(fscal,dz20);
518
519             /* Update vectorial force */
520             fix2             = _mm256_add_pd(fix2,tx);
521             fiy2             = _mm256_add_pd(fiy2,ty);
522             fiz2             = _mm256_add_pd(fiz2,tz);
523
524             fjx0             = _mm256_add_pd(fjx0,tx);
525             fjy0             = _mm256_add_pd(fjy0,ty);
526             fjz0             = _mm256_add_pd(fjz0,tz);
527
528             /**************************
529              * CALCULATE INTERACTIONS *
530              **************************/
531
532             /* COULOMB ELECTROSTATICS */
533             velec            = _mm256_mul_pd(qq21,rinv21);
534             felec            = _mm256_mul_pd(velec,rinvsq21);
535
536             /* Update potential sum for this i atom from the interaction with this j atom. */
537             velecsum         = _mm256_add_pd(velecsum,velec);
538
539             fscal            = felec;
540
541             /* Calculate temporary vectorial force */
542             tx               = _mm256_mul_pd(fscal,dx21);
543             ty               = _mm256_mul_pd(fscal,dy21);
544             tz               = _mm256_mul_pd(fscal,dz21);
545
546             /* Update vectorial force */
547             fix2             = _mm256_add_pd(fix2,tx);
548             fiy2             = _mm256_add_pd(fiy2,ty);
549             fiz2             = _mm256_add_pd(fiz2,tz);
550
551             fjx1             = _mm256_add_pd(fjx1,tx);
552             fjy1             = _mm256_add_pd(fjy1,ty);
553             fjz1             = _mm256_add_pd(fjz1,tz);
554
555             /**************************
556              * CALCULATE INTERACTIONS *
557              **************************/
558
559             /* COULOMB ELECTROSTATICS */
560             velec            = _mm256_mul_pd(qq22,rinv22);
561             felec            = _mm256_mul_pd(velec,rinvsq22);
562
563             /* Update potential sum for this i atom from the interaction with this j atom. */
564             velecsum         = _mm256_add_pd(velecsum,velec);
565
566             fscal            = felec;
567
568             /* Calculate temporary vectorial force */
569             tx               = _mm256_mul_pd(fscal,dx22);
570             ty               = _mm256_mul_pd(fscal,dy22);
571             tz               = _mm256_mul_pd(fscal,dz22);
572
573             /* Update vectorial force */
574             fix2             = _mm256_add_pd(fix2,tx);
575             fiy2             = _mm256_add_pd(fiy2,ty);
576             fiz2             = _mm256_add_pd(fiz2,tz);
577
578             fjx2             = _mm256_add_pd(fjx2,tx);
579             fjy2             = _mm256_add_pd(fjy2,ty);
580             fjz2             = _mm256_add_pd(fjz2,tz);
581
582             fjptrA             = f+j_coord_offsetA;
583             fjptrB             = f+j_coord_offsetB;
584             fjptrC             = f+j_coord_offsetC;
585             fjptrD             = f+j_coord_offsetD;
586
587             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
588                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
589
590             /* Inner loop uses 278 flops */
591         }
592
593         if(jidx<j_index_end)
594         {
595
596             /* Get j neighbor index, and coordinate index */
597             jnrlistA         = jjnr[jidx];
598             jnrlistB         = jjnr[jidx+1];
599             jnrlistC         = jjnr[jidx+2];
600             jnrlistD         = jjnr[jidx+3];
601             /* Sign of each element will be negative for non-real atoms.
602              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
603              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
604              */
605             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
606
607             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
608             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
609             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
610
611             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
612             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
613             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
614             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
615             j_coord_offsetA  = DIM*jnrA;
616             j_coord_offsetB  = DIM*jnrB;
617             j_coord_offsetC  = DIM*jnrC;
618             j_coord_offsetD  = DIM*jnrD;
619
620             /* load j atom coordinates */
621             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
622                                                  x+j_coord_offsetC,x+j_coord_offsetD,
623                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
624
625             /* Calculate displacement vector */
626             dx00             = _mm256_sub_pd(ix0,jx0);
627             dy00             = _mm256_sub_pd(iy0,jy0);
628             dz00             = _mm256_sub_pd(iz0,jz0);
629             dx01             = _mm256_sub_pd(ix0,jx1);
630             dy01             = _mm256_sub_pd(iy0,jy1);
631             dz01             = _mm256_sub_pd(iz0,jz1);
632             dx02             = _mm256_sub_pd(ix0,jx2);
633             dy02             = _mm256_sub_pd(iy0,jy2);
634             dz02             = _mm256_sub_pd(iz0,jz2);
635             dx10             = _mm256_sub_pd(ix1,jx0);
636             dy10             = _mm256_sub_pd(iy1,jy0);
637             dz10             = _mm256_sub_pd(iz1,jz0);
638             dx11             = _mm256_sub_pd(ix1,jx1);
639             dy11             = _mm256_sub_pd(iy1,jy1);
640             dz11             = _mm256_sub_pd(iz1,jz1);
641             dx12             = _mm256_sub_pd(ix1,jx2);
642             dy12             = _mm256_sub_pd(iy1,jy2);
643             dz12             = _mm256_sub_pd(iz1,jz2);
644             dx20             = _mm256_sub_pd(ix2,jx0);
645             dy20             = _mm256_sub_pd(iy2,jy0);
646             dz20             = _mm256_sub_pd(iz2,jz0);
647             dx21             = _mm256_sub_pd(ix2,jx1);
648             dy21             = _mm256_sub_pd(iy2,jy1);
649             dz21             = _mm256_sub_pd(iz2,jz1);
650             dx22             = _mm256_sub_pd(ix2,jx2);
651             dy22             = _mm256_sub_pd(iy2,jy2);
652             dz22             = _mm256_sub_pd(iz2,jz2);
653
654             /* Calculate squared distance and things based on it */
655             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
656             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
657             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
658             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
659             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
660             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
661             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
662             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
663             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
664
665             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
666             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
667             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
668             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
669             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
670             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
671             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
672             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
673             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
674
675             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
676             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
677             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
678             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
679             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
680             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
681             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
682             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
683             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
684
685             fjx0             = _mm256_setzero_pd();
686             fjy0             = _mm256_setzero_pd();
687             fjz0             = _mm256_setzero_pd();
688             fjx1             = _mm256_setzero_pd();
689             fjy1             = _mm256_setzero_pd();
690             fjz1             = _mm256_setzero_pd();
691             fjx2             = _mm256_setzero_pd();
692             fjy2             = _mm256_setzero_pd();
693             fjz2             = _mm256_setzero_pd();
694
695             /**************************
696              * CALCULATE INTERACTIONS *
697              **************************/
698
699             r00              = _mm256_mul_pd(rsq00,rinv00);
700             r00              = _mm256_andnot_pd(dummy_mask,r00);
701
702             /* Calculate table index by multiplying r with table scale and truncate to integer */
703             rt               = _mm256_mul_pd(r00,vftabscale);
704             vfitab           = _mm256_cvttpd_epi32(rt);
705             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
706             vfitab           = _mm_slli_epi32(vfitab,3);
707
708             /* COULOMB ELECTROSTATICS */
709             velec            = _mm256_mul_pd(qq00,rinv00);
710             felec            = _mm256_mul_pd(velec,rinvsq00);
711
712             /* CUBIC SPLINE TABLE DISPERSION */
713             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
714             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
715             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
716             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
717             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
718             Heps             = _mm256_mul_pd(vfeps,H);
719             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
720             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
721             vvdw6            = _mm256_mul_pd(c6_00,VV);
722             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
723             fvdw6            = _mm256_mul_pd(c6_00,FF);
724
725             /* CUBIC SPLINE TABLE REPULSION */
726             vfitab           = _mm_add_epi32(vfitab,ifour);
727             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
728             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
729             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
730             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
731             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
732             Heps             = _mm256_mul_pd(vfeps,H);
733             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
734             VV               = _mm256_add_pd(Y,_mm256_mul_pd(vfeps,Fp));
735             vvdw12           = _mm256_mul_pd(c12_00,VV);
736             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
737             fvdw12           = _mm256_mul_pd(c12_00,FF);
738             vvdw             = _mm256_add_pd(vvdw12,vvdw6);
739             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
740
741             /* Update potential sum for this i atom from the interaction with this j atom. */
742             velec            = _mm256_andnot_pd(dummy_mask,velec);
743             velecsum         = _mm256_add_pd(velecsum,velec);
744             vvdw             = _mm256_andnot_pd(dummy_mask,vvdw);
745             vvdwsum          = _mm256_add_pd(vvdwsum,vvdw);
746
747             fscal            = _mm256_add_pd(felec,fvdw);
748
749             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
750
751             /* Calculate temporary vectorial force */
752             tx               = _mm256_mul_pd(fscal,dx00);
753             ty               = _mm256_mul_pd(fscal,dy00);
754             tz               = _mm256_mul_pd(fscal,dz00);
755
756             /* Update vectorial force */
757             fix0             = _mm256_add_pd(fix0,tx);
758             fiy0             = _mm256_add_pd(fiy0,ty);
759             fiz0             = _mm256_add_pd(fiz0,tz);
760
761             fjx0             = _mm256_add_pd(fjx0,tx);
762             fjy0             = _mm256_add_pd(fjy0,ty);
763             fjz0             = _mm256_add_pd(fjz0,tz);
764
765             /**************************
766              * CALCULATE INTERACTIONS *
767              **************************/
768
769             /* COULOMB ELECTROSTATICS */
770             velec            = _mm256_mul_pd(qq01,rinv01);
771             felec            = _mm256_mul_pd(velec,rinvsq01);
772
773             /* Update potential sum for this i atom from the interaction with this j atom. */
774             velec            = _mm256_andnot_pd(dummy_mask,velec);
775             velecsum         = _mm256_add_pd(velecsum,velec);
776
777             fscal            = felec;
778
779             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
780
781             /* Calculate temporary vectorial force */
782             tx               = _mm256_mul_pd(fscal,dx01);
783             ty               = _mm256_mul_pd(fscal,dy01);
784             tz               = _mm256_mul_pd(fscal,dz01);
785
786             /* Update vectorial force */
787             fix0             = _mm256_add_pd(fix0,tx);
788             fiy0             = _mm256_add_pd(fiy0,ty);
789             fiz0             = _mm256_add_pd(fiz0,tz);
790
791             fjx1             = _mm256_add_pd(fjx1,tx);
792             fjy1             = _mm256_add_pd(fjy1,ty);
793             fjz1             = _mm256_add_pd(fjz1,tz);
794
795             /**************************
796              * CALCULATE INTERACTIONS *
797              **************************/
798
799             /* COULOMB ELECTROSTATICS */
800             velec            = _mm256_mul_pd(qq02,rinv02);
801             felec            = _mm256_mul_pd(velec,rinvsq02);
802
803             /* Update potential sum for this i atom from the interaction with this j atom. */
804             velec            = _mm256_andnot_pd(dummy_mask,velec);
805             velecsum         = _mm256_add_pd(velecsum,velec);
806
807             fscal            = felec;
808
809             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
810
811             /* Calculate temporary vectorial force */
812             tx               = _mm256_mul_pd(fscal,dx02);
813             ty               = _mm256_mul_pd(fscal,dy02);
814             tz               = _mm256_mul_pd(fscal,dz02);
815
816             /* Update vectorial force */
817             fix0             = _mm256_add_pd(fix0,tx);
818             fiy0             = _mm256_add_pd(fiy0,ty);
819             fiz0             = _mm256_add_pd(fiz0,tz);
820
821             fjx2             = _mm256_add_pd(fjx2,tx);
822             fjy2             = _mm256_add_pd(fjy2,ty);
823             fjz2             = _mm256_add_pd(fjz2,tz);
824
825             /**************************
826              * CALCULATE INTERACTIONS *
827              **************************/
828
829             /* COULOMB ELECTROSTATICS */
830             velec            = _mm256_mul_pd(qq10,rinv10);
831             felec            = _mm256_mul_pd(velec,rinvsq10);
832
833             /* Update potential sum for this i atom from the interaction with this j atom. */
834             velec            = _mm256_andnot_pd(dummy_mask,velec);
835             velecsum         = _mm256_add_pd(velecsum,velec);
836
837             fscal            = felec;
838
839             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
840
841             /* Calculate temporary vectorial force */
842             tx               = _mm256_mul_pd(fscal,dx10);
843             ty               = _mm256_mul_pd(fscal,dy10);
844             tz               = _mm256_mul_pd(fscal,dz10);
845
846             /* Update vectorial force */
847             fix1             = _mm256_add_pd(fix1,tx);
848             fiy1             = _mm256_add_pd(fiy1,ty);
849             fiz1             = _mm256_add_pd(fiz1,tz);
850
851             fjx0             = _mm256_add_pd(fjx0,tx);
852             fjy0             = _mm256_add_pd(fjy0,ty);
853             fjz0             = _mm256_add_pd(fjz0,tz);
854
855             /**************************
856              * CALCULATE INTERACTIONS *
857              **************************/
858
859             /* COULOMB ELECTROSTATICS */
860             velec            = _mm256_mul_pd(qq11,rinv11);
861             felec            = _mm256_mul_pd(velec,rinvsq11);
862
863             /* Update potential sum for this i atom from the interaction with this j atom. */
864             velec            = _mm256_andnot_pd(dummy_mask,velec);
865             velecsum         = _mm256_add_pd(velecsum,velec);
866
867             fscal            = felec;
868
869             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
870
871             /* Calculate temporary vectorial force */
872             tx               = _mm256_mul_pd(fscal,dx11);
873             ty               = _mm256_mul_pd(fscal,dy11);
874             tz               = _mm256_mul_pd(fscal,dz11);
875
876             /* Update vectorial force */
877             fix1             = _mm256_add_pd(fix1,tx);
878             fiy1             = _mm256_add_pd(fiy1,ty);
879             fiz1             = _mm256_add_pd(fiz1,tz);
880
881             fjx1             = _mm256_add_pd(fjx1,tx);
882             fjy1             = _mm256_add_pd(fjy1,ty);
883             fjz1             = _mm256_add_pd(fjz1,tz);
884
885             /**************************
886              * CALCULATE INTERACTIONS *
887              **************************/
888
889             /* COULOMB ELECTROSTATICS */
890             velec            = _mm256_mul_pd(qq12,rinv12);
891             felec            = _mm256_mul_pd(velec,rinvsq12);
892
893             /* Update potential sum for this i atom from the interaction with this j atom. */
894             velec            = _mm256_andnot_pd(dummy_mask,velec);
895             velecsum         = _mm256_add_pd(velecsum,velec);
896
897             fscal            = felec;
898
899             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
900
901             /* Calculate temporary vectorial force */
902             tx               = _mm256_mul_pd(fscal,dx12);
903             ty               = _mm256_mul_pd(fscal,dy12);
904             tz               = _mm256_mul_pd(fscal,dz12);
905
906             /* Update vectorial force */
907             fix1             = _mm256_add_pd(fix1,tx);
908             fiy1             = _mm256_add_pd(fiy1,ty);
909             fiz1             = _mm256_add_pd(fiz1,tz);
910
911             fjx2             = _mm256_add_pd(fjx2,tx);
912             fjy2             = _mm256_add_pd(fjy2,ty);
913             fjz2             = _mm256_add_pd(fjz2,tz);
914
915             /**************************
916              * CALCULATE INTERACTIONS *
917              **************************/
918
919             /* COULOMB ELECTROSTATICS */
920             velec            = _mm256_mul_pd(qq20,rinv20);
921             felec            = _mm256_mul_pd(velec,rinvsq20);
922
923             /* Update potential sum for this i atom from the interaction with this j atom. */
924             velec            = _mm256_andnot_pd(dummy_mask,velec);
925             velecsum         = _mm256_add_pd(velecsum,velec);
926
927             fscal            = felec;
928
929             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
930
931             /* Calculate temporary vectorial force */
932             tx               = _mm256_mul_pd(fscal,dx20);
933             ty               = _mm256_mul_pd(fscal,dy20);
934             tz               = _mm256_mul_pd(fscal,dz20);
935
936             /* Update vectorial force */
937             fix2             = _mm256_add_pd(fix2,tx);
938             fiy2             = _mm256_add_pd(fiy2,ty);
939             fiz2             = _mm256_add_pd(fiz2,tz);
940
941             fjx0             = _mm256_add_pd(fjx0,tx);
942             fjy0             = _mm256_add_pd(fjy0,ty);
943             fjz0             = _mm256_add_pd(fjz0,tz);
944
945             /**************************
946              * CALCULATE INTERACTIONS *
947              **************************/
948
949             /* COULOMB ELECTROSTATICS */
950             velec            = _mm256_mul_pd(qq21,rinv21);
951             felec            = _mm256_mul_pd(velec,rinvsq21);
952
953             /* Update potential sum for this i atom from the interaction with this j atom. */
954             velec            = _mm256_andnot_pd(dummy_mask,velec);
955             velecsum         = _mm256_add_pd(velecsum,velec);
956
957             fscal            = felec;
958
959             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
960
961             /* Calculate temporary vectorial force */
962             tx               = _mm256_mul_pd(fscal,dx21);
963             ty               = _mm256_mul_pd(fscal,dy21);
964             tz               = _mm256_mul_pd(fscal,dz21);
965
966             /* Update vectorial force */
967             fix2             = _mm256_add_pd(fix2,tx);
968             fiy2             = _mm256_add_pd(fiy2,ty);
969             fiz2             = _mm256_add_pd(fiz2,tz);
970
971             fjx1             = _mm256_add_pd(fjx1,tx);
972             fjy1             = _mm256_add_pd(fjy1,ty);
973             fjz1             = _mm256_add_pd(fjz1,tz);
974
975             /**************************
976              * CALCULATE INTERACTIONS *
977              **************************/
978
979             /* COULOMB ELECTROSTATICS */
980             velec            = _mm256_mul_pd(qq22,rinv22);
981             felec            = _mm256_mul_pd(velec,rinvsq22);
982
983             /* Update potential sum for this i atom from the interaction with this j atom. */
984             velec            = _mm256_andnot_pd(dummy_mask,velec);
985             velecsum         = _mm256_add_pd(velecsum,velec);
986
987             fscal            = felec;
988
989             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
990
991             /* Calculate temporary vectorial force */
992             tx               = _mm256_mul_pd(fscal,dx22);
993             ty               = _mm256_mul_pd(fscal,dy22);
994             tz               = _mm256_mul_pd(fscal,dz22);
995
996             /* Update vectorial force */
997             fix2             = _mm256_add_pd(fix2,tx);
998             fiy2             = _mm256_add_pd(fiy2,ty);
999             fiz2             = _mm256_add_pd(fiz2,tz);
1000
1001             fjx2             = _mm256_add_pd(fjx2,tx);
1002             fjy2             = _mm256_add_pd(fjy2,ty);
1003             fjz2             = _mm256_add_pd(fjz2,tz);
1004
1005             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1006             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1007             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1008             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1009
1010             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1011                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1012
1013             /* Inner loop uses 279 flops */
1014         }
1015
1016         /* End of innermost loop */
1017
1018         gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1019                                                  f+i_coord_offset,fshift+i_shift_offset);
1020
1021         ggid                        = gid[iidx];
1022         /* Update potential energies */
1023         gmx_mm256_update_1pot_pd(velecsum,kernel_data->energygrp_elec+ggid);
1024         gmx_mm256_update_1pot_pd(vvdwsum,kernel_data->energygrp_vdw+ggid);
1025
1026         /* Increment number of inner iterations */
1027         inneriter                  += j_index_end - j_index_start;
1028
1029         /* Outer loop uses 20 flops */
1030     }
1031
1032     /* Increment number of outer iterations */
1033     outeriter        += nri;
1034
1035     /* Update outer/inner flops */
1036
1037     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*279);
1038 }
1039 /*
1040  * Gromacs nonbonded kernel:   nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_F_avx_256_double
1041  * Electrostatics interaction: Coulomb
1042  * VdW interaction:            CubicSplineTable
1043  * Geometry:                   Water3-Water3
1044  * Calculate force/pot:        Force
1045  */
1046 void
1047 nb_kernel_ElecCoul_VdwCSTab_GeomW3W3_F_avx_256_double
1048                     (t_nblist                    * gmx_restrict       nlist,
1049                      rvec                        * gmx_restrict          xx,
1050                      rvec                        * gmx_restrict          ff,
1051                      t_forcerec                  * gmx_restrict          fr,
1052                      t_mdatoms                   * gmx_restrict     mdatoms,
1053                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1054                      t_nrnb                      * gmx_restrict        nrnb)
1055 {
1056     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1057      * just 0 for non-waters.
1058      * Suffixes A,B,C,D refer to j loop unrolling done with AVX, e.g. for the four different
1059      * jnr indices corresponding to data put in the four positions in the SIMD register.
1060      */
1061     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1062     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1063     int              jnrA,jnrB,jnrC,jnrD;
1064     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1065     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1066     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1067     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1068     real             rcutoff_scalar;
1069     real             *shiftvec,*fshift,*x,*f;
1070     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD;
1071     real             scratch[4*DIM];
1072     __m256d          tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1073     real *           vdwioffsetptr0;
1074     __m256d          ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1075     real *           vdwioffsetptr1;
1076     __m256d          ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1077     real *           vdwioffsetptr2;
1078     __m256d          ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1079     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D;
1080     __m256d          jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1081     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D;
1082     __m256d          jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1083     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D;
1084     __m256d          jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1085     __m256d          dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1086     __m256d          dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1087     __m256d          dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1088     __m256d          dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1089     __m256d          dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1090     __m256d          dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1091     __m256d          dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1092     __m256d          dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1093     __m256d          dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1094     __m256d          velec,felec,velecsum,facel,crf,krf,krf2;
1095     real             *charge;
1096     int              nvdwtype;
1097     __m256d          rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1098     int              *vdwtype;
1099     real             *vdwparam;
1100     __m256d          one_sixth   = _mm256_set1_pd(1.0/6.0);
1101     __m256d          one_twelfth = _mm256_set1_pd(1.0/12.0);
1102     __m128i          vfitab;
1103     __m128i          ifour       = _mm_set1_epi32(4);
1104     __m256d          rt,vfeps,vftabscale,Y,F,G,H,Heps,Fp,VV,FF;
1105     real             *vftab;
1106     __m256d          dummy_mask,cutoff_mask;
1107     __m128           tmpmask0,tmpmask1;
1108     __m256d          signbit = _mm256_castsi256_pd( _mm256_set1_epi32(0x80000000) );
1109     __m256d          one     = _mm256_set1_pd(1.0);
1110     __m256d          two     = _mm256_set1_pd(2.0);
1111     x                = xx[0];
1112     f                = ff[0];
1113
1114     nri              = nlist->nri;
1115     iinr             = nlist->iinr;
1116     jindex           = nlist->jindex;
1117     jjnr             = nlist->jjnr;
1118     shiftidx         = nlist->shift;
1119     gid              = nlist->gid;
1120     shiftvec         = fr->shift_vec[0];
1121     fshift           = fr->fshift[0];
1122     facel            = _mm256_set1_pd(fr->epsfac);
1123     charge           = mdatoms->chargeA;
1124     nvdwtype         = fr->ntype;
1125     vdwparam         = fr->nbfp;
1126     vdwtype          = mdatoms->typeA;
1127
1128     vftab            = kernel_data->table_vdw->data;
1129     vftabscale       = _mm256_set1_pd(kernel_data->table_vdw->scale);
1130
1131     /* Setup water-specific parameters */
1132     inr              = nlist->iinr[0];
1133     iq0              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+0]));
1134     iq1              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+1]));
1135     iq2              = _mm256_mul_pd(facel,_mm256_set1_pd(charge[inr+2]));
1136     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1137
1138     jq0              = _mm256_set1_pd(charge[inr+0]);
1139     jq1              = _mm256_set1_pd(charge[inr+1]);
1140     jq2              = _mm256_set1_pd(charge[inr+2]);
1141     vdwjidx0A        = 2*vdwtype[inr+0];
1142     qq00             = _mm256_mul_pd(iq0,jq0);
1143     c6_00            = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A]);
1144     c12_00           = _mm256_set1_pd(vdwioffsetptr0[vdwjidx0A+1]);
1145     qq01             = _mm256_mul_pd(iq0,jq1);
1146     qq02             = _mm256_mul_pd(iq0,jq2);
1147     qq10             = _mm256_mul_pd(iq1,jq0);
1148     qq11             = _mm256_mul_pd(iq1,jq1);
1149     qq12             = _mm256_mul_pd(iq1,jq2);
1150     qq20             = _mm256_mul_pd(iq2,jq0);
1151     qq21             = _mm256_mul_pd(iq2,jq1);
1152     qq22             = _mm256_mul_pd(iq2,jq2);
1153
1154     /* Avoid stupid compiler warnings */
1155     jnrA = jnrB = jnrC = jnrD = 0;
1156     j_coord_offsetA = 0;
1157     j_coord_offsetB = 0;
1158     j_coord_offsetC = 0;
1159     j_coord_offsetD = 0;
1160
1161     outeriter        = 0;
1162     inneriter        = 0;
1163
1164     for(iidx=0;iidx<4*DIM;iidx++)
1165     {
1166         scratch[iidx] = 0.0;
1167     }
1168
1169     /* Start outer loop over neighborlists */
1170     for(iidx=0; iidx<nri; iidx++)
1171     {
1172         /* Load shift vector for this list */
1173         i_shift_offset   = DIM*shiftidx[iidx];
1174
1175         /* Load limits for loop over neighbors */
1176         j_index_start    = jindex[iidx];
1177         j_index_end      = jindex[iidx+1];
1178
1179         /* Get outer coordinate index */
1180         inr              = iinr[iidx];
1181         i_coord_offset   = DIM*inr;
1182
1183         /* Load i particle coords and add shift vector */
1184         gmx_mm256_load_shift_and_3rvec_broadcast_pd(shiftvec+i_shift_offset,x+i_coord_offset,
1185                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1186
1187         fix0             = _mm256_setzero_pd();
1188         fiy0             = _mm256_setzero_pd();
1189         fiz0             = _mm256_setzero_pd();
1190         fix1             = _mm256_setzero_pd();
1191         fiy1             = _mm256_setzero_pd();
1192         fiz1             = _mm256_setzero_pd();
1193         fix2             = _mm256_setzero_pd();
1194         fiy2             = _mm256_setzero_pd();
1195         fiz2             = _mm256_setzero_pd();
1196
1197         /* Start inner kernel loop */
1198         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+3]>=0; jidx+=4)
1199         {
1200
1201             /* Get j neighbor index, and coordinate index */
1202             jnrA             = jjnr[jidx];
1203             jnrB             = jjnr[jidx+1];
1204             jnrC             = jjnr[jidx+2];
1205             jnrD             = jjnr[jidx+3];
1206             j_coord_offsetA  = DIM*jnrA;
1207             j_coord_offsetB  = DIM*jnrB;
1208             j_coord_offsetC  = DIM*jnrC;
1209             j_coord_offsetD  = DIM*jnrD;
1210
1211             /* load j atom coordinates */
1212             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1213                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1214                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1215
1216             /* Calculate displacement vector */
1217             dx00             = _mm256_sub_pd(ix0,jx0);
1218             dy00             = _mm256_sub_pd(iy0,jy0);
1219             dz00             = _mm256_sub_pd(iz0,jz0);
1220             dx01             = _mm256_sub_pd(ix0,jx1);
1221             dy01             = _mm256_sub_pd(iy0,jy1);
1222             dz01             = _mm256_sub_pd(iz0,jz1);
1223             dx02             = _mm256_sub_pd(ix0,jx2);
1224             dy02             = _mm256_sub_pd(iy0,jy2);
1225             dz02             = _mm256_sub_pd(iz0,jz2);
1226             dx10             = _mm256_sub_pd(ix1,jx0);
1227             dy10             = _mm256_sub_pd(iy1,jy0);
1228             dz10             = _mm256_sub_pd(iz1,jz0);
1229             dx11             = _mm256_sub_pd(ix1,jx1);
1230             dy11             = _mm256_sub_pd(iy1,jy1);
1231             dz11             = _mm256_sub_pd(iz1,jz1);
1232             dx12             = _mm256_sub_pd(ix1,jx2);
1233             dy12             = _mm256_sub_pd(iy1,jy2);
1234             dz12             = _mm256_sub_pd(iz1,jz2);
1235             dx20             = _mm256_sub_pd(ix2,jx0);
1236             dy20             = _mm256_sub_pd(iy2,jy0);
1237             dz20             = _mm256_sub_pd(iz2,jz0);
1238             dx21             = _mm256_sub_pd(ix2,jx1);
1239             dy21             = _mm256_sub_pd(iy2,jy1);
1240             dz21             = _mm256_sub_pd(iz2,jz1);
1241             dx22             = _mm256_sub_pd(ix2,jx2);
1242             dy22             = _mm256_sub_pd(iy2,jy2);
1243             dz22             = _mm256_sub_pd(iz2,jz2);
1244
1245             /* Calculate squared distance and things based on it */
1246             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1247             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1248             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1249             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1250             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1251             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1252             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1253             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1254             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1255
1256             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1257             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
1258             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
1259             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1260             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
1261             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
1262             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1263             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
1264             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
1265
1266             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1267             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
1268             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
1269             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1270             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1271             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1272             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1273             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1274             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1275
1276             fjx0             = _mm256_setzero_pd();
1277             fjy0             = _mm256_setzero_pd();
1278             fjz0             = _mm256_setzero_pd();
1279             fjx1             = _mm256_setzero_pd();
1280             fjy1             = _mm256_setzero_pd();
1281             fjz1             = _mm256_setzero_pd();
1282             fjx2             = _mm256_setzero_pd();
1283             fjy2             = _mm256_setzero_pd();
1284             fjz2             = _mm256_setzero_pd();
1285
1286             /**************************
1287              * CALCULATE INTERACTIONS *
1288              **************************/
1289
1290             r00              = _mm256_mul_pd(rsq00,rinv00);
1291
1292             /* Calculate table index by multiplying r with table scale and truncate to integer */
1293             rt               = _mm256_mul_pd(r00,vftabscale);
1294             vfitab           = _mm256_cvttpd_epi32(rt);
1295             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1296             vfitab           = _mm_slli_epi32(vfitab,3);
1297
1298             /* COULOMB ELECTROSTATICS */
1299             velec            = _mm256_mul_pd(qq00,rinv00);
1300             felec            = _mm256_mul_pd(velec,rinvsq00);
1301
1302             /* CUBIC SPLINE TABLE DISPERSION */
1303             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1304             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1305             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1306             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1307             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1308             Heps             = _mm256_mul_pd(vfeps,H);
1309             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1310             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1311             fvdw6            = _mm256_mul_pd(c6_00,FF);
1312
1313             /* CUBIC SPLINE TABLE REPULSION */
1314             vfitab           = _mm_add_epi32(vfitab,ifour);
1315             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1316             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1317             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1318             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1319             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1320             Heps             = _mm256_mul_pd(vfeps,H);
1321             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1322             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1323             fvdw12           = _mm256_mul_pd(c12_00,FF);
1324             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1325
1326             fscal            = _mm256_add_pd(felec,fvdw);
1327
1328             /* Calculate temporary vectorial force */
1329             tx               = _mm256_mul_pd(fscal,dx00);
1330             ty               = _mm256_mul_pd(fscal,dy00);
1331             tz               = _mm256_mul_pd(fscal,dz00);
1332
1333             /* Update vectorial force */
1334             fix0             = _mm256_add_pd(fix0,tx);
1335             fiy0             = _mm256_add_pd(fiy0,ty);
1336             fiz0             = _mm256_add_pd(fiz0,tz);
1337
1338             fjx0             = _mm256_add_pd(fjx0,tx);
1339             fjy0             = _mm256_add_pd(fjy0,ty);
1340             fjz0             = _mm256_add_pd(fjz0,tz);
1341
1342             /**************************
1343              * CALCULATE INTERACTIONS *
1344              **************************/
1345
1346             /* COULOMB ELECTROSTATICS */
1347             velec            = _mm256_mul_pd(qq01,rinv01);
1348             felec            = _mm256_mul_pd(velec,rinvsq01);
1349
1350             fscal            = felec;
1351
1352             /* Calculate temporary vectorial force */
1353             tx               = _mm256_mul_pd(fscal,dx01);
1354             ty               = _mm256_mul_pd(fscal,dy01);
1355             tz               = _mm256_mul_pd(fscal,dz01);
1356
1357             /* Update vectorial force */
1358             fix0             = _mm256_add_pd(fix0,tx);
1359             fiy0             = _mm256_add_pd(fiy0,ty);
1360             fiz0             = _mm256_add_pd(fiz0,tz);
1361
1362             fjx1             = _mm256_add_pd(fjx1,tx);
1363             fjy1             = _mm256_add_pd(fjy1,ty);
1364             fjz1             = _mm256_add_pd(fjz1,tz);
1365
1366             /**************************
1367              * CALCULATE INTERACTIONS *
1368              **************************/
1369
1370             /* COULOMB ELECTROSTATICS */
1371             velec            = _mm256_mul_pd(qq02,rinv02);
1372             felec            = _mm256_mul_pd(velec,rinvsq02);
1373
1374             fscal            = felec;
1375
1376             /* Calculate temporary vectorial force */
1377             tx               = _mm256_mul_pd(fscal,dx02);
1378             ty               = _mm256_mul_pd(fscal,dy02);
1379             tz               = _mm256_mul_pd(fscal,dz02);
1380
1381             /* Update vectorial force */
1382             fix0             = _mm256_add_pd(fix0,tx);
1383             fiy0             = _mm256_add_pd(fiy0,ty);
1384             fiz0             = _mm256_add_pd(fiz0,tz);
1385
1386             fjx2             = _mm256_add_pd(fjx2,tx);
1387             fjy2             = _mm256_add_pd(fjy2,ty);
1388             fjz2             = _mm256_add_pd(fjz2,tz);
1389
1390             /**************************
1391              * CALCULATE INTERACTIONS *
1392              **************************/
1393
1394             /* COULOMB ELECTROSTATICS */
1395             velec            = _mm256_mul_pd(qq10,rinv10);
1396             felec            = _mm256_mul_pd(velec,rinvsq10);
1397
1398             fscal            = felec;
1399
1400             /* Calculate temporary vectorial force */
1401             tx               = _mm256_mul_pd(fscal,dx10);
1402             ty               = _mm256_mul_pd(fscal,dy10);
1403             tz               = _mm256_mul_pd(fscal,dz10);
1404
1405             /* Update vectorial force */
1406             fix1             = _mm256_add_pd(fix1,tx);
1407             fiy1             = _mm256_add_pd(fiy1,ty);
1408             fiz1             = _mm256_add_pd(fiz1,tz);
1409
1410             fjx0             = _mm256_add_pd(fjx0,tx);
1411             fjy0             = _mm256_add_pd(fjy0,ty);
1412             fjz0             = _mm256_add_pd(fjz0,tz);
1413
1414             /**************************
1415              * CALCULATE INTERACTIONS *
1416              **************************/
1417
1418             /* COULOMB ELECTROSTATICS */
1419             velec            = _mm256_mul_pd(qq11,rinv11);
1420             felec            = _mm256_mul_pd(velec,rinvsq11);
1421
1422             fscal            = felec;
1423
1424             /* Calculate temporary vectorial force */
1425             tx               = _mm256_mul_pd(fscal,dx11);
1426             ty               = _mm256_mul_pd(fscal,dy11);
1427             tz               = _mm256_mul_pd(fscal,dz11);
1428
1429             /* Update vectorial force */
1430             fix1             = _mm256_add_pd(fix1,tx);
1431             fiy1             = _mm256_add_pd(fiy1,ty);
1432             fiz1             = _mm256_add_pd(fiz1,tz);
1433
1434             fjx1             = _mm256_add_pd(fjx1,tx);
1435             fjy1             = _mm256_add_pd(fjy1,ty);
1436             fjz1             = _mm256_add_pd(fjz1,tz);
1437
1438             /**************************
1439              * CALCULATE INTERACTIONS *
1440              **************************/
1441
1442             /* COULOMB ELECTROSTATICS */
1443             velec            = _mm256_mul_pd(qq12,rinv12);
1444             felec            = _mm256_mul_pd(velec,rinvsq12);
1445
1446             fscal            = felec;
1447
1448             /* Calculate temporary vectorial force */
1449             tx               = _mm256_mul_pd(fscal,dx12);
1450             ty               = _mm256_mul_pd(fscal,dy12);
1451             tz               = _mm256_mul_pd(fscal,dz12);
1452
1453             /* Update vectorial force */
1454             fix1             = _mm256_add_pd(fix1,tx);
1455             fiy1             = _mm256_add_pd(fiy1,ty);
1456             fiz1             = _mm256_add_pd(fiz1,tz);
1457
1458             fjx2             = _mm256_add_pd(fjx2,tx);
1459             fjy2             = _mm256_add_pd(fjy2,ty);
1460             fjz2             = _mm256_add_pd(fjz2,tz);
1461
1462             /**************************
1463              * CALCULATE INTERACTIONS *
1464              **************************/
1465
1466             /* COULOMB ELECTROSTATICS */
1467             velec            = _mm256_mul_pd(qq20,rinv20);
1468             felec            = _mm256_mul_pd(velec,rinvsq20);
1469
1470             fscal            = felec;
1471
1472             /* Calculate temporary vectorial force */
1473             tx               = _mm256_mul_pd(fscal,dx20);
1474             ty               = _mm256_mul_pd(fscal,dy20);
1475             tz               = _mm256_mul_pd(fscal,dz20);
1476
1477             /* Update vectorial force */
1478             fix2             = _mm256_add_pd(fix2,tx);
1479             fiy2             = _mm256_add_pd(fiy2,ty);
1480             fiz2             = _mm256_add_pd(fiz2,tz);
1481
1482             fjx0             = _mm256_add_pd(fjx0,tx);
1483             fjy0             = _mm256_add_pd(fjy0,ty);
1484             fjz0             = _mm256_add_pd(fjz0,tz);
1485
1486             /**************************
1487              * CALCULATE INTERACTIONS *
1488              **************************/
1489
1490             /* COULOMB ELECTROSTATICS */
1491             velec            = _mm256_mul_pd(qq21,rinv21);
1492             felec            = _mm256_mul_pd(velec,rinvsq21);
1493
1494             fscal            = felec;
1495
1496             /* Calculate temporary vectorial force */
1497             tx               = _mm256_mul_pd(fscal,dx21);
1498             ty               = _mm256_mul_pd(fscal,dy21);
1499             tz               = _mm256_mul_pd(fscal,dz21);
1500
1501             /* Update vectorial force */
1502             fix2             = _mm256_add_pd(fix2,tx);
1503             fiy2             = _mm256_add_pd(fiy2,ty);
1504             fiz2             = _mm256_add_pd(fiz2,tz);
1505
1506             fjx1             = _mm256_add_pd(fjx1,tx);
1507             fjy1             = _mm256_add_pd(fjy1,ty);
1508             fjz1             = _mm256_add_pd(fjz1,tz);
1509
1510             /**************************
1511              * CALCULATE INTERACTIONS *
1512              **************************/
1513
1514             /* COULOMB ELECTROSTATICS */
1515             velec            = _mm256_mul_pd(qq22,rinv22);
1516             felec            = _mm256_mul_pd(velec,rinvsq22);
1517
1518             fscal            = felec;
1519
1520             /* Calculate temporary vectorial force */
1521             tx               = _mm256_mul_pd(fscal,dx22);
1522             ty               = _mm256_mul_pd(fscal,dy22);
1523             tz               = _mm256_mul_pd(fscal,dz22);
1524
1525             /* Update vectorial force */
1526             fix2             = _mm256_add_pd(fix2,tx);
1527             fiy2             = _mm256_add_pd(fiy2,ty);
1528             fiz2             = _mm256_add_pd(fiz2,tz);
1529
1530             fjx2             = _mm256_add_pd(fjx2,tx);
1531             fjy2             = _mm256_add_pd(fjy2,ty);
1532             fjz2             = _mm256_add_pd(fjz2,tz);
1533
1534             fjptrA             = f+j_coord_offsetA;
1535             fjptrB             = f+j_coord_offsetB;
1536             fjptrC             = f+j_coord_offsetC;
1537             fjptrD             = f+j_coord_offsetD;
1538
1539             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1540                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1541
1542             /* Inner loop uses 261 flops */
1543         }
1544
1545         if(jidx<j_index_end)
1546         {
1547
1548             /* Get j neighbor index, and coordinate index */
1549             jnrlistA         = jjnr[jidx];
1550             jnrlistB         = jjnr[jidx+1];
1551             jnrlistC         = jjnr[jidx+2];
1552             jnrlistD         = jjnr[jidx+3];
1553             /* Sign of each element will be negative for non-real atoms.
1554              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1555              * so use it as val = _mm_andnot_pd(mask,val) to clear dummy entries.
1556              */
1557             tmpmask0 = gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128()));
1558
1559             tmpmask1 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(3,3,2,2));
1560             tmpmask0 = _mm_permute_ps(tmpmask0,_GMX_MM_PERMUTE(1,1,0,0));
1561             dummy_mask = _mm256_castps_pd(gmx_mm256_set_m128(tmpmask1,tmpmask0));
1562
1563             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1564             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1565             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1566             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1567             j_coord_offsetA  = DIM*jnrA;
1568             j_coord_offsetB  = DIM*jnrB;
1569             j_coord_offsetC  = DIM*jnrC;
1570             j_coord_offsetD  = DIM*jnrD;
1571
1572             /* load j atom coordinates */
1573             gmx_mm256_load_3rvec_4ptr_swizzle_pd(x+j_coord_offsetA,x+j_coord_offsetB,
1574                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1575                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1576
1577             /* Calculate displacement vector */
1578             dx00             = _mm256_sub_pd(ix0,jx0);
1579             dy00             = _mm256_sub_pd(iy0,jy0);
1580             dz00             = _mm256_sub_pd(iz0,jz0);
1581             dx01             = _mm256_sub_pd(ix0,jx1);
1582             dy01             = _mm256_sub_pd(iy0,jy1);
1583             dz01             = _mm256_sub_pd(iz0,jz1);
1584             dx02             = _mm256_sub_pd(ix0,jx2);
1585             dy02             = _mm256_sub_pd(iy0,jy2);
1586             dz02             = _mm256_sub_pd(iz0,jz2);
1587             dx10             = _mm256_sub_pd(ix1,jx0);
1588             dy10             = _mm256_sub_pd(iy1,jy0);
1589             dz10             = _mm256_sub_pd(iz1,jz0);
1590             dx11             = _mm256_sub_pd(ix1,jx1);
1591             dy11             = _mm256_sub_pd(iy1,jy1);
1592             dz11             = _mm256_sub_pd(iz1,jz1);
1593             dx12             = _mm256_sub_pd(ix1,jx2);
1594             dy12             = _mm256_sub_pd(iy1,jy2);
1595             dz12             = _mm256_sub_pd(iz1,jz2);
1596             dx20             = _mm256_sub_pd(ix2,jx0);
1597             dy20             = _mm256_sub_pd(iy2,jy0);
1598             dz20             = _mm256_sub_pd(iz2,jz0);
1599             dx21             = _mm256_sub_pd(ix2,jx1);
1600             dy21             = _mm256_sub_pd(iy2,jy1);
1601             dz21             = _mm256_sub_pd(iz2,jz1);
1602             dx22             = _mm256_sub_pd(ix2,jx2);
1603             dy22             = _mm256_sub_pd(iy2,jy2);
1604             dz22             = _mm256_sub_pd(iz2,jz2);
1605
1606             /* Calculate squared distance and things based on it */
1607             rsq00            = gmx_mm256_calc_rsq_pd(dx00,dy00,dz00);
1608             rsq01            = gmx_mm256_calc_rsq_pd(dx01,dy01,dz01);
1609             rsq02            = gmx_mm256_calc_rsq_pd(dx02,dy02,dz02);
1610             rsq10            = gmx_mm256_calc_rsq_pd(dx10,dy10,dz10);
1611             rsq11            = gmx_mm256_calc_rsq_pd(dx11,dy11,dz11);
1612             rsq12            = gmx_mm256_calc_rsq_pd(dx12,dy12,dz12);
1613             rsq20            = gmx_mm256_calc_rsq_pd(dx20,dy20,dz20);
1614             rsq21            = gmx_mm256_calc_rsq_pd(dx21,dy21,dz21);
1615             rsq22            = gmx_mm256_calc_rsq_pd(dx22,dy22,dz22);
1616
1617             rinv00           = gmx_mm256_invsqrt_pd(rsq00);
1618             rinv01           = gmx_mm256_invsqrt_pd(rsq01);
1619             rinv02           = gmx_mm256_invsqrt_pd(rsq02);
1620             rinv10           = gmx_mm256_invsqrt_pd(rsq10);
1621             rinv11           = gmx_mm256_invsqrt_pd(rsq11);
1622             rinv12           = gmx_mm256_invsqrt_pd(rsq12);
1623             rinv20           = gmx_mm256_invsqrt_pd(rsq20);
1624             rinv21           = gmx_mm256_invsqrt_pd(rsq21);
1625             rinv22           = gmx_mm256_invsqrt_pd(rsq22);
1626
1627             rinvsq00         = _mm256_mul_pd(rinv00,rinv00);
1628             rinvsq01         = _mm256_mul_pd(rinv01,rinv01);
1629             rinvsq02         = _mm256_mul_pd(rinv02,rinv02);
1630             rinvsq10         = _mm256_mul_pd(rinv10,rinv10);
1631             rinvsq11         = _mm256_mul_pd(rinv11,rinv11);
1632             rinvsq12         = _mm256_mul_pd(rinv12,rinv12);
1633             rinvsq20         = _mm256_mul_pd(rinv20,rinv20);
1634             rinvsq21         = _mm256_mul_pd(rinv21,rinv21);
1635             rinvsq22         = _mm256_mul_pd(rinv22,rinv22);
1636
1637             fjx0             = _mm256_setzero_pd();
1638             fjy0             = _mm256_setzero_pd();
1639             fjz0             = _mm256_setzero_pd();
1640             fjx1             = _mm256_setzero_pd();
1641             fjy1             = _mm256_setzero_pd();
1642             fjz1             = _mm256_setzero_pd();
1643             fjx2             = _mm256_setzero_pd();
1644             fjy2             = _mm256_setzero_pd();
1645             fjz2             = _mm256_setzero_pd();
1646
1647             /**************************
1648              * CALCULATE INTERACTIONS *
1649              **************************/
1650
1651             r00              = _mm256_mul_pd(rsq00,rinv00);
1652             r00              = _mm256_andnot_pd(dummy_mask,r00);
1653
1654             /* Calculate table index by multiplying r with table scale and truncate to integer */
1655             rt               = _mm256_mul_pd(r00,vftabscale);
1656             vfitab           = _mm256_cvttpd_epi32(rt);
1657             vfeps            = _mm256_sub_pd(rt,_mm256_round_pd(rt, _MM_FROUND_FLOOR));
1658             vfitab           = _mm_slli_epi32(vfitab,3);
1659
1660             /* COULOMB ELECTROSTATICS */
1661             velec            = _mm256_mul_pd(qq00,rinv00);
1662             felec            = _mm256_mul_pd(velec,rinvsq00);
1663
1664             /* CUBIC SPLINE TABLE DISPERSION */
1665             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1666             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1667             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1668             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1669             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1670             Heps             = _mm256_mul_pd(vfeps,H);
1671             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1672             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1673             fvdw6            = _mm256_mul_pd(c6_00,FF);
1674
1675             /* CUBIC SPLINE TABLE REPULSION */
1676             vfitab           = _mm_add_epi32(vfitab,ifour);
1677             Y                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,0) );
1678             F                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,1) );
1679             G                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,2) );
1680             H                = _mm256_load_pd( vftab + _mm_extract_epi32(vfitab,3) );
1681             GMX_MM256_FULLTRANSPOSE4_PD(Y,F,G,H);
1682             Heps             = _mm256_mul_pd(vfeps,H);
1683             Fp               = _mm256_add_pd(F,_mm256_mul_pd(vfeps,_mm256_add_pd(G,Heps)));
1684             FF               = _mm256_add_pd(Fp,_mm256_mul_pd(vfeps,_mm256_add_pd(G,_mm256_add_pd(Heps,Heps))));
1685             fvdw12           = _mm256_mul_pd(c12_00,FF);
1686             fvdw             = _mm256_xor_pd(signbit,_mm256_mul_pd(_mm256_add_pd(fvdw6,fvdw12),_mm256_mul_pd(vftabscale,rinv00)));
1687
1688             fscal            = _mm256_add_pd(felec,fvdw);
1689
1690             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1691
1692             /* Calculate temporary vectorial force */
1693             tx               = _mm256_mul_pd(fscal,dx00);
1694             ty               = _mm256_mul_pd(fscal,dy00);
1695             tz               = _mm256_mul_pd(fscal,dz00);
1696
1697             /* Update vectorial force */
1698             fix0             = _mm256_add_pd(fix0,tx);
1699             fiy0             = _mm256_add_pd(fiy0,ty);
1700             fiz0             = _mm256_add_pd(fiz0,tz);
1701
1702             fjx0             = _mm256_add_pd(fjx0,tx);
1703             fjy0             = _mm256_add_pd(fjy0,ty);
1704             fjz0             = _mm256_add_pd(fjz0,tz);
1705
1706             /**************************
1707              * CALCULATE INTERACTIONS *
1708              **************************/
1709
1710             /* COULOMB ELECTROSTATICS */
1711             velec            = _mm256_mul_pd(qq01,rinv01);
1712             felec            = _mm256_mul_pd(velec,rinvsq01);
1713
1714             fscal            = felec;
1715
1716             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1717
1718             /* Calculate temporary vectorial force */
1719             tx               = _mm256_mul_pd(fscal,dx01);
1720             ty               = _mm256_mul_pd(fscal,dy01);
1721             tz               = _mm256_mul_pd(fscal,dz01);
1722
1723             /* Update vectorial force */
1724             fix0             = _mm256_add_pd(fix0,tx);
1725             fiy0             = _mm256_add_pd(fiy0,ty);
1726             fiz0             = _mm256_add_pd(fiz0,tz);
1727
1728             fjx1             = _mm256_add_pd(fjx1,tx);
1729             fjy1             = _mm256_add_pd(fjy1,ty);
1730             fjz1             = _mm256_add_pd(fjz1,tz);
1731
1732             /**************************
1733              * CALCULATE INTERACTIONS *
1734              **************************/
1735
1736             /* COULOMB ELECTROSTATICS */
1737             velec            = _mm256_mul_pd(qq02,rinv02);
1738             felec            = _mm256_mul_pd(velec,rinvsq02);
1739
1740             fscal            = felec;
1741
1742             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1743
1744             /* Calculate temporary vectorial force */
1745             tx               = _mm256_mul_pd(fscal,dx02);
1746             ty               = _mm256_mul_pd(fscal,dy02);
1747             tz               = _mm256_mul_pd(fscal,dz02);
1748
1749             /* Update vectorial force */
1750             fix0             = _mm256_add_pd(fix0,tx);
1751             fiy0             = _mm256_add_pd(fiy0,ty);
1752             fiz0             = _mm256_add_pd(fiz0,tz);
1753
1754             fjx2             = _mm256_add_pd(fjx2,tx);
1755             fjy2             = _mm256_add_pd(fjy2,ty);
1756             fjz2             = _mm256_add_pd(fjz2,tz);
1757
1758             /**************************
1759              * CALCULATE INTERACTIONS *
1760              **************************/
1761
1762             /* COULOMB ELECTROSTATICS */
1763             velec            = _mm256_mul_pd(qq10,rinv10);
1764             felec            = _mm256_mul_pd(velec,rinvsq10);
1765
1766             fscal            = felec;
1767
1768             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1769
1770             /* Calculate temporary vectorial force */
1771             tx               = _mm256_mul_pd(fscal,dx10);
1772             ty               = _mm256_mul_pd(fscal,dy10);
1773             tz               = _mm256_mul_pd(fscal,dz10);
1774
1775             /* Update vectorial force */
1776             fix1             = _mm256_add_pd(fix1,tx);
1777             fiy1             = _mm256_add_pd(fiy1,ty);
1778             fiz1             = _mm256_add_pd(fiz1,tz);
1779
1780             fjx0             = _mm256_add_pd(fjx0,tx);
1781             fjy0             = _mm256_add_pd(fjy0,ty);
1782             fjz0             = _mm256_add_pd(fjz0,tz);
1783
1784             /**************************
1785              * CALCULATE INTERACTIONS *
1786              **************************/
1787
1788             /* COULOMB ELECTROSTATICS */
1789             velec            = _mm256_mul_pd(qq11,rinv11);
1790             felec            = _mm256_mul_pd(velec,rinvsq11);
1791
1792             fscal            = felec;
1793
1794             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1795
1796             /* Calculate temporary vectorial force */
1797             tx               = _mm256_mul_pd(fscal,dx11);
1798             ty               = _mm256_mul_pd(fscal,dy11);
1799             tz               = _mm256_mul_pd(fscal,dz11);
1800
1801             /* Update vectorial force */
1802             fix1             = _mm256_add_pd(fix1,tx);
1803             fiy1             = _mm256_add_pd(fiy1,ty);
1804             fiz1             = _mm256_add_pd(fiz1,tz);
1805
1806             fjx1             = _mm256_add_pd(fjx1,tx);
1807             fjy1             = _mm256_add_pd(fjy1,ty);
1808             fjz1             = _mm256_add_pd(fjz1,tz);
1809
1810             /**************************
1811              * CALCULATE INTERACTIONS *
1812              **************************/
1813
1814             /* COULOMB ELECTROSTATICS */
1815             velec            = _mm256_mul_pd(qq12,rinv12);
1816             felec            = _mm256_mul_pd(velec,rinvsq12);
1817
1818             fscal            = felec;
1819
1820             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1821
1822             /* Calculate temporary vectorial force */
1823             tx               = _mm256_mul_pd(fscal,dx12);
1824             ty               = _mm256_mul_pd(fscal,dy12);
1825             tz               = _mm256_mul_pd(fscal,dz12);
1826
1827             /* Update vectorial force */
1828             fix1             = _mm256_add_pd(fix1,tx);
1829             fiy1             = _mm256_add_pd(fiy1,ty);
1830             fiz1             = _mm256_add_pd(fiz1,tz);
1831
1832             fjx2             = _mm256_add_pd(fjx2,tx);
1833             fjy2             = _mm256_add_pd(fjy2,ty);
1834             fjz2             = _mm256_add_pd(fjz2,tz);
1835
1836             /**************************
1837              * CALCULATE INTERACTIONS *
1838              **************************/
1839
1840             /* COULOMB ELECTROSTATICS */
1841             velec            = _mm256_mul_pd(qq20,rinv20);
1842             felec            = _mm256_mul_pd(velec,rinvsq20);
1843
1844             fscal            = felec;
1845
1846             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1847
1848             /* Calculate temporary vectorial force */
1849             tx               = _mm256_mul_pd(fscal,dx20);
1850             ty               = _mm256_mul_pd(fscal,dy20);
1851             tz               = _mm256_mul_pd(fscal,dz20);
1852
1853             /* Update vectorial force */
1854             fix2             = _mm256_add_pd(fix2,tx);
1855             fiy2             = _mm256_add_pd(fiy2,ty);
1856             fiz2             = _mm256_add_pd(fiz2,tz);
1857
1858             fjx0             = _mm256_add_pd(fjx0,tx);
1859             fjy0             = _mm256_add_pd(fjy0,ty);
1860             fjz0             = _mm256_add_pd(fjz0,tz);
1861
1862             /**************************
1863              * CALCULATE INTERACTIONS *
1864              **************************/
1865
1866             /* COULOMB ELECTROSTATICS */
1867             velec            = _mm256_mul_pd(qq21,rinv21);
1868             felec            = _mm256_mul_pd(velec,rinvsq21);
1869
1870             fscal            = felec;
1871
1872             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1873
1874             /* Calculate temporary vectorial force */
1875             tx               = _mm256_mul_pd(fscal,dx21);
1876             ty               = _mm256_mul_pd(fscal,dy21);
1877             tz               = _mm256_mul_pd(fscal,dz21);
1878
1879             /* Update vectorial force */
1880             fix2             = _mm256_add_pd(fix2,tx);
1881             fiy2             = _mm256_add_pd(fiy2,ty);
1882             fiz2             = _mm256_add_pd(fiz2,tz);
1883
1884             fjx1             = _mm256_add_pd(fjx1,tx);
1885             fjy1             = _mm256_add_pd(fjy1,ty);
1886             fjz1             = _mm256_add_pd(fjz1,tz);
1887
1888             /**************************
1889              * CALCULATE INTERACTIONS *
1890              **************************/
1891
1892             /* COULOMB ELECTROSTATICS */
1893             velec            = _mm256_mul_pd(qq22,rinv22);
1894             felec            = _mm256_mul_pd(velec,rinvsq22);
1895
1896             fscal            = felec;
1897
1898             fscal            = _mm256_andnot_pd(dummy_mask,fscal);
1899
1900             /* Calculate temporary vectorial force */
1901             tx               = _mm256_mul_pd(fscal,dx22);
1902             ty               = _mm256_mul_pd(fscal,dy22);
1903             tz               = _mm256_mul_pd(fscal,dz22);
1904
1905             /* Update vectorial force */
1906             fix2             = _mm256_add_pd(fix2,tx);
1907             fiy2             = _mm256_add_pd(fiy2,ty);
1908             fiz2             = _mm256_add_pd(fiz2,tz);
1909
1910             fjx2             = _mm256_add_pd(fjx2,tx);
1911             fjy2             = _mm256_add_pd(fjy2,ty);
1912             fjz2             = _mm256_add_pd(fjz2,tz);
1913
1914             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1915             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1916             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1917             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1918
1919             gmx_mm256_decrement_3rvec_4ptr_swizzle_pd(fjptrA,fjptrB,fjptrC,fjptrD,
1920                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1921
1922             /* Inner loop uses 262 flops */
1923         }
1924
1925         /* End of innermost loop */
1926
1927         gmx_mm256_update_iforce_3atom_swizzle_pd(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1928                                                  f+i_coord_offset,fshift+i_shift_offset);
1929
1930         /* Increment number of inner iterations */
1931         inneriter                  += j_index_end - j_index_start;
1932
1933         /* Outer loop uses 18 flops */
1934     }
1935
1936     /* Increment number of outer iterations */
1937     outeriter        += nri;
1938
1939     /* Update outer/inner flops */
1940
1941     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*262);
1942 }