Introduce gmxpre.h for truly global definitions
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEw_VdwLJ_GeomW3W3_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "gmxpre.h"
39
40 #include "config.h"
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "gromacs/legacyheaders/types/simple.h"
46 #include "gromacs/math/vec.h"
47 #include "gromacs/legacyheaders/nrnb.h"
48
49 #include "gromacs/simd/math_x86_avx_256_single.h"
50 #include "kernelutil_x86_avx_256_single.h"
51
52 /*
53  * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwLJ_GeomW3W3_VF_avx_256_single
54  * Electrostatics interaction: Ewald
55  * VdW interaction:            LennardJones
56  * Geometry:                   Water3-Water3
57  * Calculate force/pot:        PotentialAndForce
58  */
59 void
60 nb_kernel_ElecEw_VdwLJ_GeomW3W3_VF_avx_256_single
61                     (t_nblist                    * gmx_restrict       nlist,
62                      rvec                        * gmx_restrict          xx,
63                      rvec                        * gmx_restrict          ff,
64                      t_forcerec                  * gmx_restrict          fr,
65                      t_mdatoms                   * gmx_restrict     mdatoms,
66                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67                      t_nrnb                      * gmx_restrict        nrnb)
68 {
69     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
70      * just 0 for non-waters.
71      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
72      * jnr indices corresponding to data put in the four positions in the SIMD register.
73      */
74     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
75     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76     int              jnrA,jnrB,jnrC,jnrD;
77     int              jnrE,jnrF,jnrG,jnrH;
78     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
79     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
80     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
81     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
82     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
83     real             rcutoff_scalar;
84     real             *shiftvec,*fshift,*x,*f;
85     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
86     real             scratch[4*DIM];
87     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
88     real *           vdwioffsetptr0;
89     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
90     real *           vdwioffsetptr1;
91     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
92     real *           vdwioffsetptr2;
93     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
94     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
95     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
96     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
97     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
98     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
99     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
100     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
101     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
102     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
103     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
104     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
105     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
106     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
107     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
108     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
109     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
110     real             *charge;
111     int              nvdwtype;
112     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
113     int              *vdwtype;
114     real             *vdwparam;
115     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
116     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
117     __m256i          ewitab;
118     __m128i          ewitab_lo,ewitab_hi;
119     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
120     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
121     real             *ewtab;
122     __m256           dummy_mask,cutoff_mask;
123     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
124     __m256           one     = _mm256_set1_ps(1.0);
125     __m256           two     = _mm256_set1_ps(2.0);
126     x                = xx[0];
127     f                = ff[0];
128
129     nri              = nlist->nri;
130     iinr             = nlist->iinr;
131     jindex           = nlist->jindex;
132     jjnr             = nlist->jjnr;
133     shiftidx         = nlist->shift;
134     gid              = nlist->gid;
135     shiftvec         = fr->shift_vec[0];
136     fshift           = fr->fshift[0];
137     facel            = _mm256_set1_ps(fr->epsfac);
138     charge           = mdatoms->chargeA;
139     nvdwtype         = fr->ntype;
140     vdwparam         = fr->nbfp;
141     vdwtype          = mdatoms->typeA;
142
143     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
144     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
145     beta2            = _mm256_mul_ps(beta,beta);
146     beta3            = _mm256_mul_ps(beta,beta2);
147
148     ewtab            = fr->ic->tabq_coul_FDV0;
149     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
150     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
151
152     /* Setup water-specific parameters */
153     inr              = nlist->iinr[0];
154     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
155     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
156     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
157     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
158
159     jq0              = _mm256_set1_ps(charge[inr+0]);
160     jq1              = _mm256_set1_ps(charge[inr+1]);
161     jq2              = _mm256_set1_ps(charge[inr+2]);
162     vdwjidx0A        = 2*vdwtype[inr+0];
163     qq00             = _mm256_mul_ps(iq0,jq0);
164     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
165     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
166     qq01             = _mm256_mul_ps(iq0,jq1);
167     qq02             = _mm256_mul_ps(iq0,jq2);
168     qq10             = _mm256_mul_ps(iq1,jq0);
169     qq11             = _mm256_mul_ps(iq1,jq1);
170     qq12             = _mm256_mul_ps(iq1,jq2);
171     qq20             = _mm256_mul_ps(iq2,jq0);
172     qq21             = _mm256_mul_ps(iq2,jq1);
173     qq22             = _mm256_mul_ps(iq2,jq2);
174
175     /* Avoid stupid compiler warnings */
176     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
177     j_coord_offsetA = 0;
178     j_coord_offsetB = 0;
179     j_coord_offsetC = 0;
180     j_coord_offsetD = 0;
181     j_coord_offsetE = 0;
182     j_coord_offsetF = 0;
183     j_coord_offsetG = 0;
184     j_coord_offsetH = 0;
185
186     outeriter        = 0;
187     inneriter        = 0;
188
189     for(iidx=0;iidx<4*DIM;iidx++)
190     {
191         scratch[iidx] = 0.0;
192     }
193
194     /* Start outer loop over neighborlists */
195     for(iidx=0; iidx<nri; iidx++)
196     {
197         /* Load shift vector for this list */
198         i_shift_offset   = DIM*shiftidx[iidx];
199
200         /* Load limits for loop over neighbors */
201         j_index_start    = jindex[iidx];
202         j_index_end      = jindex[iidx+1];
203
204         /* Get outer coordinate index */
205         inr              = iinr[iidx];
206         i_coord_offset   = DIM*inr;
207
208         /* Load i particle coords and add shift vector */
209         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
210                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
211
212         fix0             = _mm256_setzero_ps();
213         fiy0             = _mm256_setzero_ps();
214         fiz0             = _mm256_setzero_ps();
215         fix1             = _mm256_setzero_ps();
216         fiy1             = _mm256_setzero_ps();
217         fiz1             = _mm256_setzero_ps();
218         fix2             = _mm256_setzero_ps();
219         fiy2             = _mm256_setzero_ps();
220         fiz2             = _mm256_setzero_ps();
221
222         /* Reset potential sums */
223         velecsum         = _mm256_setzero_ps();
224         vvdwsum          = _mm256_setzero_ps();
225
226         /* Start inner kernel loop */
227         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
228         {
229
230             /* Get j neighbor index, and coordinate index */
231             jnrA             = jjnr[jidx];
232             jnrB             = jjnr[jidx+1];
233             jnrC             = jjnr[jidx+2];
234             jnrD             = jjnr[jidx+3];
235             jnrE             = jjnr[jidx+4];
236             jnrF             = jjnr[jidx+5];
237             jnrG             = jjnr[jidx+6];
238             jnrH             = jjnr[jidx+7];
239             j_coord_offsetA  = DIM*jnrA;
240             j_coord_offsetB  = DIM*jnrB;
241             j_coord_offsetC  = DIM*jnrC;
242             j_coord_offsetD  = DIM*jnrD;
243             j_coord_offsetE  = DIM*jnrE;
244             j_coord_offsetF  = DIM*jnrF;
245             j_coord_offsetG  = DIM*jnrG;
246             j_coord_offsetH  = DIM*jnrH;
247
248             /* load j atom coordinates */
249             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
250                                                  x+j_coord_offsetC,x+j_coord_offsetD,
251                                                  x+j_coord_offsetE,x+j_coord_offsetF,
252                                                  x+j_coord_offsetG,x+j_coord_offsetH,
253                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
254
255             /* Calculate displacement vector */
256             dx00             = _mm256_sub_ps(ix0,jx0);
257             dy00             = _mm256_sub_ps(iy0,jy0);
258             dz00             = _mm256_sub_ps(iz0,jz0);
259             dx01             = _mm256_sub_ps(ix0,jx1);
260             dy01             = _mm256_sub_ps(iy0,jy1);
261             dz01             = _mm256_sub_ps(iz0,jz1);
262             dx02             = _mm256_sub_ps(ix0,jx2);
263             dy02             = _mm256_sub_ps(iy0,jy2);
264             dz02             = _mm256_sub_ps(iz0,jz2);
265             dx10             = _mm256_sub_ps(ix1,jx0);
266             dy10             = _mm256_sub_ps(iy1,jy0);
267             dz10             = _mm256_sub_ps(iz1,jz0);
268             dx11             = _mm256_sub_ps(ix1,jx1);
269             dy11             = _mm256_sub_ps(iy1,jy1);
270             dz11             = _mm256_sub_ps(iz1,jz1);
271             dx12             = _mm256_sub_ps(ix1,jx2);
272             dy12             = _mm256_sub_ps(iy1,jy2);
273             dz12             = _mm256_sub_ps(iz1,jz2);
274             dx20             = _mm256_sub_ps(ix2,jx0);
275             dy20             = _mm256_sub_ps(iy2,jy0);
276             dz20             = _mm256_sub_ps(iz2,jz0);
277             dx21             = _mm256_sub_ps(ix2,jx1);
278             dy21             = _mm256_sub_ps(iy2,jy1);
279             dz21             = _mm256_sub_ps(iz2,jz1);
280             dx22             = _mm256_sub_ps(ix2,jx2);
281             dy22             = _mm256_sub_ps(iy2,jy2);
282             dz22             = _mm256_sub_ps(iz2,jz2);
283
284             /* Calculate squared distance and things based on it */
285             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
286             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
287             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
288             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
289             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
290             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
291             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
292             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
293             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
294
295             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
296             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
297             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
298             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
299             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
300             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
301             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
302             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
303             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
304
305             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
306             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
307             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
308             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
309             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
310             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
311             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
312             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
313             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
314
315             fjx0             = _mm256_setzero_ps();
316             fjy0             = _mm256_setzero_ps();
317             fjz0             = _mm256_setzero_ps();
318             fjx1             = _mm256_setzero_ps();
319             fjy1             = _mm256_setzero_ps();
320             fjz1             = _mm256_setzero_ps();
321             fjx2             = _mm256_setzero_ps();
322             fjy2             = _mm256_setzero_ps();
323             fjz2             = _mm256_setzero_ps();
324
325             /**************************
326              * CALCULATE INTERACTIONS *
327              **************************/
328
329             r00              = _mm256_mul_ps(rsq00,rinv00);
330
331             /* EWALD ELECTROSTATICS */
332             
333             /* Analytical PME correction */
334             zeta2            = _mm256_mul_ps(beta2,rsq00);
335             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
336             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
337             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
338             felec            = _mm256_mul_ps(qq00,felec);
339             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
340             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
341             velec            = _mm256_sub_ps(rinv00,pmecorrV);
342             velec            = _mm256_mul_ps(qq00,velec);
343             
344             /* LENNARD-JONES DISPERSION/REPULSION */
345
346             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
347             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
348             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
349             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
350             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
351
352             /* Update potential sum for this i atom from the interaction with this j atom. */
353             velecsum         = _mm256_add_ps(velecsum,velec);
354             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
355
356             fscal            = _mm256_add_ps(felec,fvdw);
357
358             /* Calculate temporary vectorial force */
359             tx               = _mm256_mul_ps(fscal,dx00);
360             ty               = _mm256_mul_ps(fscal,dy00);
361             tz               = _mm256_mul_ps(fscal,dz00);
362
363             /* Update vectorial force */
364             fix0             = _mm256_add_ps(fix0,tx);
365             fiy0             = _mm256_add_ps(fiy0,ty);
366             fiz0             = _mm256_add_ps(fiz0,tz);
367
368             fjx0             = _mm256_add_ps(fjx0,tx);
369             fjy0             = _mm256_add_ps(fjy0,ty);
370             fjz0             = _mm256_add_ps(fjz0,tz);
371
372             /**************************
373              * CALCULATE INTERACTIONS *
374              **************************/
375
376             r01              = _mm256_mul_ps(rsq01,rinv01);
377
378             /* EWALD ELECTROSTATICS */
379             
380             /* Analytical PME correction */
381             zeta2            = _mm256_mul_ps(beta2,rsq01);
382             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
383             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
384             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
385             felec            = _mm256_mul_ps(qq01,felec);
386             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
387             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
388             velec            = _mm256_sub_ps(rinv01,pmecorrV);
389             velec            = _mm256_mul_ps(qq01,velec);
390             
391             /* Update potential sum for this i atom from the interaction with this j atom. */
392             velecsum         = _mm256_add_ps(velecsum,velec);
393
394             fscal            = felec;
395
396             /* Calculate temporary vectorial force */
397             tx               = _mm256_mul_ps(fscal,dx01);
398             ty               = _mm256_mul_ps(fscal,dy01);
399             tz               = _mm256_mul_ps(fscal,dz01);
400
401             /* Update vectorial force */
402             fix0             = _mm256_add_ps(fix0,tx);
403             fiy0             = _mm256_add_ps(fiy0,ty);
404             fiz0             = _mm256_add_ps(fiz0,tz);
405
406             fjx1             = _mm256_add_ps(fjx1,tx);
407             fjy1             = _mm256_add_ps(fjy1,ty);
408             fjz1             = _mm256_add_ps(fjz1,tz);
409
410             /**************************
411              * CALCULATE INTERACTIONS *
412              **************************/
413
414             r02              = _mm256_mul_ps(rsq02,rinv02);
415
416             /* EWALD ELECTROSTATICS */
417             
418             /* Analytical PME correction */
419             zeta2            = _mm256_mul_ps(beta2,rsq02);
420             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
421             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
422             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
423             felec            = _mm256_mul_ps(qq02,felec);
424             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
425             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
426             velec            = _mm256_sub_ps(rinv02,pmecorrV);
427             velec            = _mm256_mul_ps(qq02,velec);
428             
429             /* Update potential sum for this i atom from the interaction with this j atom. */
430             velecsum         = _mm256_add_ps(velecsum,velec);
431
432             fscal            = felec;
433
434             /* Calculate temporary vectorial force */
435             tx               = _mm256_mul_ps(fscal,dx02);
436             ty               = _mm256_mul_ps(fscal,dy02);
437             tz               = _mm256_mul_ps(fscal,dz02);
438
439             /* Update vectorial force */
440             fix0             = _mm256_add_ps(fix0,tx);
441             fiy0             = _mm256_add_ps(fiy0,ty);
442             fiz0             = _mm256_add_ps(fiz0,tz);
443
444             fjx2             = _mm256_add_ps(fjx2,tx);
445             fjy2             = _mm256_add_ps(fjy2,ty);
446             fjz2             = _mm256_add_ps(fjz2,tz);
447
448             /**************************
449              * CALCULATE INTERACTIONS *
450              **************************/
451
452             r10              = _mm256_mul_ps(rsq10,rinv10);
453
454             /* EWALD ELECTROSTATICS */
455             
456             /* Analytical PME correction */
457             zeta2            = _mm256_mul_ps(beta2,rsq10);
458             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
459             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
460             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
461             felec            = _mm256_mul_ps(qq10,felec);
462             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
463             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
464             velec            = _mm256_sub_ps(rinv10,pmecorrV);
465             velec            = _mm256_mul_ps(qq10,velec);
466             
467             /* Update potential sum for this i atom from the interaction with this j atom. */
468             velecsum         = _mm256_add_ps(velecsum,velec);
469
470             fscal            = felec;
471
472             /* Calculate temporary vectorial force */
473             tx               = _mm256_mul_ps(fscal,dx10);
474             ty               = _mm256_mul_ps(fscal,dy10);
475             tz               = _mm256_mul_ps(fscal,dz10);
476
477             /* Update vectorial force */
478             fix1             = _mm256_add_ps(fix1,tx);
479             fiy1             = _mm256_add_ps(fiy1,ty);
480             fiz1             = _mm256_add_ps(fiz1,tz);
481
482             fjx0             = _mm256_add_ps(fjx0,tx);
483             fjy0             = _mm256_add_ps(fjy0,ty);
484             fjz0             = _mm256_add_ps(fjz0,tz);
485
486             /**************************
487              * CALCULATE INTERACTIONS *
488              **************************/
489
490             r11              = _mm256_mul_ps(rsq11,rinv11);
491
492             /* EWALD ELECTROSTATICS */
493             
494             /* Analytical PME correction */
495             zeta2            = _mm256_mul_ps(beta2,rsq11);
496             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
497             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
498             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
499             felec            = _mm256_mul_ps(qq11,felec);
500             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
501             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
502             velec            = _mm256_sub_ps(rinv11,pmecorrV);
503             velec            = _mm256_mul_ps(qq11,velec);
504             
505             /* Update potential sum for this i atom from the interaction with this j atom. */
506             velecsum         = _mm256_add_ps(velecsum,velec);
507
508             fscal            = felec;
509
510             /* Calculate temporary vectorial force */
511             tx               = _mm256_mul_ps(fscal,dx11);
512             ty               = _mm256_mul_ps(fscal,dy11);
513             tz               = _mm256_mul_ps(fscal,dz11);
514
515             /* Update vectorial force */
516             fix1             = _mm256_add_ps(fix1,tx);
517             fiy1             = _mm256_add_ps(fiy1,ty);
518             fiz1             = _mm256_add_ps(fiz1,tz);
519
520             fjx1             = _mm256_add_ps(fjx1,tx);
521             fjy1             = _mm256_add_ps(fjy1,ty);
522             fjz1             = _mm256_add_ps(fjz1,tz);
523
524             /**************************
525              * CALCULATE INTERACTIONS *
526              **************************/
527
528             r12              = _mm256_mul_ps(rsq12,rinv12);
529
530             /* EWALD ELECTROSTATICS */
531             
532             /* Analytical PME correction */
533             zeta2            = _mm256_mul_ps(beta2,rsq12);
534             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
535             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
536             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
537             felec            = _mm256_mul_ps(qq12,felec);
538             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
539             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
540             velec            = _mm256_sub_ps(rinv12,pmecorrV);
541             velec            = _mm256_mul_ps(qq12,velec);
542             
543             /* Update potential sum for this i atom from the interaction with this j atom. */
544             velecsum         = _mm256_add_ps(velecsum,velec);
545
546             fscal            = felec;
547
548             /* Calculate temporary vectorial force */
549             tx               = _mm256_mul_ps(fscal,dx12);
550             ty               = _mm256_mul_ps(fscal,dy12);
551             tz               = _mm256_mul_ps(fscal,dz12);
552
553             /* Update vectorial force */
554             fix1             = _mm256_add_ps(fix1,tx);
555             fiy1             = _mm256_add_ps(fiy1,ty);
556             fiz1             = _mm256_add_ps(fiz1,tz);
557
558             fjx2             = _mm256_add_ps(fjx2,tx);
559             fjy2             = _mm256_add_ps(fjy2,ty);
560             fjz2             = _mm256_add_ps(fjz2,tz);
561
562             /**************************
563              * CALCULATE INTERACTIONS *
564              **************************/
565
566             r20              = _mm256_mul_ps(rsq20,rinv20);
567
568             /* EWALD ELECTROSTATICS */
569             
570             /* Analytical PME correction */
571             zeta2            = _mm256_mul_ps(beta2,rsq20);
572             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
573             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
574             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
575             felec            = _mm256_mul_ps(qq20,felec);
576             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
577             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
578             velec            = _mm256_sub_ps(rinv20,pmecorrV);
579             velec            = _mm256_mul_ps(qq20,velec);
580             
581             /* Update potential sum for this i atom from the interaction with this j atom. */
582             velecsum         = _mm256_add_ps(velecsum,velec);
583
584             fscal            = felec;
585
586             /* Calculate temporary vectorial force */
587             tx               = _mm256_mul_ps(fscal,dx20);
588             ty               = _mm256_mul_ps(fscal,dy20);
589             tz               = _mm256_mul_ps(fscal,dz20);
590
591             /* Update vectorial force */
592             fix2             = _mm256_add_ps(fix2,tx);
593             fiy2             = _mm256_add_ps(fiy2,ty);
594             fiz2             = _mm256_add_ps(fiz2,tz);
595
596             fjx0             = _mm256_add_ps(fjx0,tx);
597             fjy0             = _mm256_add_ps(fjy0,ty);
598             fjz0             = _mm256_add_ps(fjz0,tz);
599
600             /**************************
601              * CALCULATE INTERACTIONS *
602              **************************/
603
604             r21              = _mm256_mul_ps(rsq21,rinv21);
605
606             /* EWALD ELECTROSTATICS */
607             
608             /* Analytical PME correction */
609             zeta2            = _mm256_mul_ps(beta2,rsq21);
610             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
611             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
612             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
613             felec            = _mm256_mul_ps(qq21,felec);
614             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
615             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
616             velec            = _mm256_sub_ps(rinv21,pmecorrV);
617             velec            = _mm256_mul_ps(qq21,velec);
618             
619             /* Update potential sum for this i atom from the interaction with this j atom. */
620             velecsum         = _mm256_add_ps(velecsum,velec);
621
622             fscal            = felec;
623
624             /* Calculate temporary vectorial force */
625             tx               = _mm256_mul_ps(fscal,dx21);
626             ty               = _mm256_mul_ps(fscal,dy21);
627             tz               = _mm256_mul_ps(fscal,dz21);
628
629             /* Update vectorial force */
630             fix2             = _mm256_add_ps(fix2,tx);
631             fiy2             = _mm256_add_ps(fiy2,ty);
632             fiz2             = _mm256_add_ps(fiz2,tz);
633
634             fjx1             = _mm256_add_ps(fjx1,tx);
635             fjy1             = _mm256_add_ps(fjy1,ty);
636             fjz1             = _mm256_add_ps(fjz1,tz);
637
638             /**************************
639              * CALCULATE INTERACTIONS *
640              **************************/
641
642             r22              = _mm256_mul_ps(rsq22,rinv22);
643
644             /* EWALD ELECTROSTATICS */
645             
646             /* Analytical PME correction */
647             zeta2            = _mm256_mul_ps(beta2,rsq22);
648             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
649             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
650             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
651             felec            = _mm256_mul_ps(qq22,felec);
652             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
653             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
654             velec            = _mm256_sub_ps(rinv22,pmecorrV);
655             velec            = _mm256_mul_ps(qq22,velec);
656             
657             /* Update potential sum for this i atom from the interaction with this j atom. */
658             velecsum         = _mm256_add_ps(velecsum,velec);
659
660             fscal            = felec;
661
662             /* Calculate temporary vectorial force */
663             tx               = _mm256_mul_ps(fscal,dx22);
664             ty               = _mm256_mul_ps(fscal,dy22);
665             tz               = _mm256_mul_ps(fscal,dz22);
666
667             /* Update vectorial force */
668             fix2             = _mm256_add_ps(fix2,tx);
669             fiy2             = _mm256_add_ps(fiy2,ty);
670             fiz2             = _mm256_add_ps(fiz2,tz);
671
672             fjx2             = _mm256_add_ps(fjx2,tx);
673             fjy2             = _mm256_add_ps(fjy2,ty);
674             fjz2             = _mm256_add_ps(fjz2,tz);
675
676             fjptrA             = f+j_coord_offsetA;
677             fjptrB             = f+j_coord_offsetB;
678             fjptrC             = f+j_coord_offsetC;
679             fjptrD             = f+j_coord_offsetD;
680             fjptrE             = f+j_coord_offsetE;
681             fjptrF             = f+j_coord_offsetF;
682             fjptrG             = f+j_coord_offsetG;
683             fjptrH             = f+j_coord_offsetH;
684
685             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
686                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
687
688             /* Inner loop uses 768 flops */
689         }
690
691         if(jidx<j_index_end)
692         {
693
694             /* Get j neighbor index, and coordinate index */
695             jnrlistA         = jjnr[jidx];
696             jnrlistB         = jjnr[jidx+1];
697             jnrlistC         = jjnr[jidx+2];
698             jnrlistD         = jjnr[jidx+3];
699             jnrlistE         = jjnr[jidx+4];
700             jnrlistF         = jjnr[jidx+5];
701             jnrlistG         = jjnr[jidx+6];
702             jnrlistH         = jjnr[jidx+7];
703             /* Sign of each element will be negative for non-real atoms.
704              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
705              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
706              */
707             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
708                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
709                                             
710             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
711             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
712             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
713             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
714             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
715             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
716             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
717             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
718             j_coord_offsetA  = DIM*jnrA;
719             j_coord_offsetB  = DIM*jnrB;
720             j_coord_offsetC  = DIM*jnrC;
721             j_coord_offsetD  = DIM*jnrD;
722             j_coord_offsetE  = DIM*jnrE;
723             j_coord_offsetF  = DIM*jnrF;
724             j_coord_offsetG  = DIM*jnrG;
725             j_coord_offsetH  = DIM*jnrH;
726
727             /* load j atom coordinates */
728             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
729                                                  x+j_coord_offsetC,x+j_coord_offsetD,
730                                                  x+j_coord_offsetE,x+j_coord_offsetF,
731                                                  x+j_coord_offsetG,x+j_coord_offsetH,
732                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
733
734             /* Calculate displacement vector */
735             dx00             = _mm256_sub_ps(ix0,jx0);
736             dy00             = _mm256_sub_ps(iy0,jy0);
737             dz00             = _mm256_sub_ps(iz0,jz0);
738             dx01             = _mm256_sub_ps(ix0,jx1);
739             dy01             = _mm256_sub_ps(iy0,jy1);
740             dz01             = _mm256_sub_ps(iz0,jz1);
741             dx02             = _mm256_sub_ps(ix0,jx2);
742             dy02             = _mm256_sub_ps(iy0,jy2);
743             dz02             = _mm256_sub_ps(iz0,jz2);
744             dx10             = _mm256_sub_ps(ix1,jx0);
745             dy10             = _mm256_sub_ps(iy1,jy0);
746             dz10             = _mm256_sub_ps(iz1,jz0);
747             dx11             = _mm256_sub_ps(ix1,jx1);
748             dy11             = _mm256_sub_ps(iy1,jy1);
749             dz11             = _mm256_sub_ps(iz1,jz1);
750             dx12             = _mm256_sub_ps(ix1,jx2);
751             dy12             = _mm256_sub_ps(iy1,jy2);
752             dz12             = _mm256_sub_ps(iz1,jz2);
753             dx20             = _mm256_sub_ps(ix2,jx0);
754             dy20             = _mm256_sub_ps(iy2,jy0);
755             dz20             = _mm256_sub_ps(iz2,jz0);
756             dx21             = _mm256_sub_ps(ix2,jx1);
757             dy21             = _mm256_sub_ps(iy2,jy1);
758             dz21             = _mm256_sub_ps(iz2,jz1);
759             dx22             = _mm256_sub_ps(ix2,jx2);
760             dy22             = _mm256_sub_ps(iy2,jy2);
761             dz22             = _mm256_sub_ps(iz2,jz2);
762
763             /* Calculate squared distance and things based on it */
764             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
765             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
766             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
767             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
768             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
769             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
770             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
771             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
772             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
773
774             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
775             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
776             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
777             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
778             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
779             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
780             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
781             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
782             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
783
784             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
785             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
786             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
787             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
788             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
789             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
790             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
791             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
792             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
793
794             fjx0             = _mm256_setzero_ps();
795             fjy0             = _mm256_setzero_ps();
796             fjz0             = _mm256_setzero_ps();
797             fjx1             = _mm256_setzero_ps();
798             fjy1             = _mm256_setzero_ps();
799             fjz1             = _mm256_setzero_ps();
800             fjx2             = _mm256_setzero_ps();
801             fjy2             = _mm256_setzero_ps();
802             fjz2             = _mm256_setzero_ps();
803
804             /**************************
805              * CALCULATE INTERACTIONS *
806              **************************/
807
808             r00              = _mm256_mul_ps(rsq00,rinv00);
809             r00              = _mm256_andnot_ps(dummy_mask,r00);
810
811             /* EWALD ELECTROSTATICS */
812             
813             /* Analytical PME correction */
814             zeta2            = _mm256_mul_ps(beta2,rsq00);
815             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
816             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
817             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
818             felec            = _mm256_mul_ps(qq00,felec);
819             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
820             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
821             velec            = _mm256_sub_ps(rinv00,pmecorrV);
822             velec            = _mm256_mul_ps(qq00,velec);
823             
824             /* LENNARD-JONES DISPERSION/REPULSION */
825
826             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
827             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
828             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
829             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
830             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
831
832             /* Update potential sum for this i atom from the interaction with this j atom. */
833             velec            = _mm256_andnot_ps(dummy_mask,velec);
834             velecsum         = _mm256_add_ps(velecsum,velec);
835             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
836             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
837
838             fscal            = _mm256_add_ps(felec,fvdw);
839
840             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
841
842             /* Calculate temporary vectorial force */
843             tx               = _mm256_mul_ps(fscal,dx00);
844             ty               = _mm256_mul_ps(fscal,dy00);
845             tz               = _mm256_mul_ps(fscal,dz00);
846
847             /* Update vectorial force */
848             fix0             = _mm256_add_ps(fix0,tx);
849             fiy0             = _mm256_add_ps(fiy0,ty);
850             fiz0             = _mm256_add_ps(fiz0,tz);
851
852             fjx0             = _mm256_add_ps(fjx0,tx);
853             fjy0             = _mm256_add_ps(fjy0,ty);
854             fjz0             = _mm256_add_ps(fjz0,tz);
855
856             /**************************
857              * CALCULATE INTERACTIONS *
858              **************************/
859
860             r01              = _mm256_mul_ps(rsq01,rinv01);
861             r01              = _mm256_andnot_ps(dummy_mask,r01);
862
863             /* EWALD ELECTROSTATICS */
864             
865             /* Analytical PME correction */
866             zeta2            = _mm256_mul_ps(beta2,rsq01);
867             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
868             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
869             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
870             felec            = _mm256_mul_ps(qq01,felec);
871             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
872             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
873             velec            = _mm256_sub_ps(rinv01,pmecorrV);
874             velec            = _mm256_mul_ps(qq01,velec);
875             
876             /* Update potential sum for this i atom from the interaction with this j atom. */
877             velec            = _mm256_andnot_ps(dummy_mask,velec);
878             velecsum         = _mm256_add_ps(velecsum,velec);
879
880             fscal            = felec;
881
882             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
883
884             /* Calculate temporary vectorial force */
885             tx               = _mm256_mul_ps(fscal,dx01);
886             ty               = _mm256_mul_ps(fscal,dy01);
887             tz               = _mm256_mul_ps(fscal,dz01);
888
889             /* Update vectorial force */
890             fix0             = _mm256_add_ps(fix0,tx);
891             fiy0             = _mm256_add_ps(fiy0,ty);
892             fiz0             = _mm256_add_ps(fiz0,tz);
893
894             fjx1             = _mm256_add_ps(fjx1,tx);
895             fjy1             = _mm256_add_ps(fjy1,ty);
896             fjz1             = _mm256_add_ps(fjz1,tz);
897
898             /**************************
899              * CALCULATE INTERACTIONS *
900              **************************/
901
902             r02              = _mm256_mul_ps(rsq02,rinv02);
903             r02              = _mm256_andnot_ps(dummy_mask,r02);
904
905             /* EWALD ELECTROSTATICS */
906             
907             /* Analytical PME correction */
908             zeta2            = _mm256_mul_ps(beta2,rsq02);
909             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
910             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
911             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
912             felec            = _mm256_mul_ps(qq02,felec);
913             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
914             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
915             velec            = _mm256_sub_ps(rinv02,pmecorrV);
916             velec            = _mm256_mul_ps(qq02,velec);
917             
918             /* Update potential sum for this i atom from the interaction with this j atom. */
919             velec            = _mm256_andnot_ps(dummy_mask,velec);
920             velecsum         = _mm256_add_ps(velecsum,velec);
921
922             fscal            = felec;
923
924             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
925
926             /* Calculate temporary vectorial force */
927             tx               = _mm256_mul_ps(fscal,dx02);
928             ty               = _mm256_mul_ps(fscal,dy02);
929             tz               = _mm256_mul_ps(fscal,dz02);
930
931             /* Update vectorial force */
932             fix0             = _mm256_add_ps(fix0,tx);
933             fiy0             = _mm256_add_ps(fiy0,ty);
934             fiz0             = _mm256_add_ps(fiz0,tz);
935
936             fjx2             = _mm256_add_ps(fjx2,tx);
937             fjy2             = _mm256_add_ps(fjy2,ty);
938             fjz2             = _mm256_add_ps(fjz2,tz);
939
940             /**************************
941              * CALCULATE INTERACTIONS *
942              **************************/
943
944             r10              = _mm256_mul_ps(rsq10,rinv10);
945             r10              = _mm256_andnot_ps(dummy_mask,r10);
946
947             /* EWALD ELECTROSTATICS */
948             
949             /* Analytical PME correction */
950             zeta2            = _mm256_mul_ps(beta2,rsq10);
951             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
952             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
953             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
954             felec            = _mm256_mul_ps(qq10,felec);
955             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
956             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
957             velec            = _mm256_sub_ps(rinv10,pmecorrV);
958             velec            = _mm256_mul_ps(qq10,velec);
959             
960             /* Update potential sum for this i atom from the interaction with this j atom. */
961             velec            = _mm256_andnot_ps(dummy_mask,velec);
962             velecsum         = _mm256_add_ps(velecsum,velec);
963
964             fscal            = felec;
965
966             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
967
968             /* Calculate temporary vectorial force */
969             tx               = _mm256_mul_ps(fscal,dx10);
970             ty               = _mm256_mul_ps(fscal,dy10);
971             tz               = _mm256_mul_ps(fscal,dz10);
972
973             /* Update vectorial force */
974             fix1             = _mm256_add_ps(fix1,tx);
975             fiy1             = _mm256_add_ps(fiy1,ty);
976             fiz1             = _mm256_add_ps(fiz1,tz);
977
978             fjx0             = _mm256_add_ps(fjx0,tx);
979             fjy0             = _mm256_add_ps(fjy0,ty);
980             fjz0             = _mm256_add_ps(fjz0,tz);
981
982             /**************************
983              * CALCULATE INTERACTIONS *
984              **************************/
985
986             r11              = _mm256_mul_ps(rsq11,rinv11);
987             r11              = _mm256_andnot_ps(dummy_mask,r11);
988
989             /* EWALD ELECTROSTATICS */
990             
991             /* Analytical PME correction */
992             zeta2            = _mm256_mul_ps(beta2,rsq11);
993             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
994             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
995             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
996             felec            = _mm256_mul_ps(qq11,felec);
997             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
998             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
999             velec            = _mm256_sub_ps(rinv11,pmecorrV);
1000             velec            = _mm256_mul_ps(qq11,velec);
1001             
1002             /* Update potential sum for this i atom from the interaction with this j atom. */
1003             velec            = _mm256_andnot_ps(dummy_mask,velec);
1004             velecsum         = _mm256_add_ps(velecsum,velec);
1005
1006             fscal            = felec;
1007
1008             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1009
1010             /* Calculate temporary vectorial force */
1011             tx               = _mm256_mul_ps(fscal,dx11);
1012             ty               = _mm256_mul_ps(fscal,dy11);
1013             tz               = _mm256_mul_ps(fscal,dz11);
1014
1015             /* Update vectorial force */
1016             fix1             = _mm256_add_ps(fix1,tx);
1017             fiy1             = _mm256_add_ps(fiy1,ty);
1018             fiz1             = _mm256_add_ps(fiz1,tz);
1019
1020             fjx1             = _mm256_add_ps(fjx1,tx);
1021             fjy1             = _mm256_add_ps(fjy1,ty);
1022             fjz1             = _mm256_add_ps(fjz1,tz);
1023
1024             /**************************
1025              * CALCULATE INTERACTIONS *
1026              **************************/
1027
1028             r12              = _mm256_mul_ps(rsq12,rinv12);
1029             r12              = _mm256_andnot_ps(dummy_mask,r12);
1030
1031             /* EWALD ELECTROSTATICS */
1032             
1033             /* Analytical PME correction */
1034             zeta2            = _mm256_mul_ps(beta2,rsq12);
1035             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1036             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1037             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1038             felec            = _mm256_mul_ps(qq12,felec);
1039             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1040             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1041             velec            = _mm256_sub_ps(rinv12,pmecorrV);
1042             velec            = _mm256_mul_ps(qq12,velec);
1043             
1044             /* Update potential sum for this i atom from the interaction with this j atom. */
1045             velec            = _mm256_andnot_ps(dummy_mask,velec);
1046             velecsum         = _mm256_add_ps(velecsum,velec);
1047
1048             fscal            = felec;
1049
1050             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1051
1052             /* Calculate temporary vectorial force */
1053             tx               = _mm256_mul_ps(fscal,dx12);
1054             ty               = _mm256_mul_ps(fscal,dy12);
1055             tz               = _mm256_mul_ps(fscal,dz12);
1056
1057             /* Update vectorial force */
1058             fix1             = _mm256_add_ps(fix1,tx);
1059             fiy1             = _mm256_add_ps(fiy1,ty);
1060             fiz1             = _mm256_add_ps(fiz1,tz);
1061
1062             fjx2             = _mm256_add_ps(fjx2,tx);
1063             fjy2             = _mm256_add_ps(fjy2,ty);
1064             fjz2             = _mm256_add_ps(fjz2,tz);
1065
1066             /**************************
1067              * CALCULATE INTERACTIONS *
1068              **************************/
1069
1070             r20              = _mm256_mul_ps(rsq20,rinv20);
1071             r20              = _mm256_andnot_ps(dummy_mask,r20);
1072
1073             /* EWALD ELECTROSTATICS */
1074             
1075             /* Analytical PME correction */
1076             zeta2            = _mm256_mul_ps(beta2,rsq20);
1077             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1078             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1079             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1080             felec            = _mm256_mul_ps(qq20,felec);
1081             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1082             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1083             velec            = _mm256_sub_ps(rinv20,pmecorrV);
1084             velec            = _mm256_mul_ps(qq20,velec);
1085             
1086             /* Update potential sum for this i atom from the interaction with this j atom. */
1087             velec            = _mm256_andnot_ps(dummy_mask,velec);
1088             velecsum         = _mm256_add_ps(velecsum,velec);
1089
1090             fscal            = felec;
1091
1092             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1093
1094             /* Calculate temporary vectorial force */
1095             tx               = _mm256_mul_ps(fscal,dx20);
1096             ty               = _mm256_mul_ps(fscal,dy20);
1097             tz               = _mm256_mul_ps(fscal,dz20);
1098
1099             /* Update vectorial force */
1100             fix2             = _mm256_add_ps(fix2,tx);
1101             fiy2             = _mm256_add_ps(fiy2,ty);
1102             fiz2             = _mm256_add_ps(fiz2,tz);
1103
1104             fjx0             = _mm256_add_ps(fjx0,tx);
1105             fjy0             = _mm256_add_ps(fjy0,ty);
1106             fjz0             = _mm256_add_ps(fjz0,tz);
1107
1108             /**************************
1109              * CALCULATE INTERACTIONS *
1110              **************************/
1111
1112             r21              = _mm256_mul_ps(rsq21,rinv21);
1113             r21              = _mm256_andnot_ps(dummy_mask,r21);
1114
1115             /* EWALD ELECTROSTATICS */
1116             
1117             /* Analytical PME correction */
1118             zeta2            = _mm256_mul_ps(beta2,rsq21);
1119             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1120             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1121             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1122             felec            = _mm256_mul_ps(qq21,felec);
1123             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1124             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1125             velec            = _mm256_sub_ps(rinv21,pmecorrV);
1126             velec            = _mm256_mul_ps(qq21,velec);
1127             
1128             /* Update potential sum for this i atom from the interaction with this j atom. */
1129             velec            = _mm256_andnot_ps(dummy_mask,velec);
1130             velecsum         = _mm256_add_ps(velecsum,velec);
1131
1132             fscal            = felec;
1133
1134             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1135
1136             /* Calculate temporary vectorial force */
1137             tx               = _mm256_mul_ps(fscal,dx21);
1138             ty               = _mm256_mul_ps(fscal,dy21);
1139             tz               = _mm256_mul_ps(fscal,dz21);
1140
1141             /* Update vectorial force */
1142             fix2             = _mm256_add_ps(fix2,tx);
1143             fiy2             = _mm256_add_ps(fiy2,ty);
1144             fiz2             = _mm256_add_ps(fiz2,tz);
1145
1146             fjx1             = _mm256_add_ps(fjx1,tx);
1147             fjy1             = _mm256_add_ps(fjy1,ty);
1148             fjz1             = _mm256_add_ps(fjz1,tz);
1149
1150             /**************************
1151              * CALCULATE INTERACTIONS *
1152              **************************/
1153
1154             r22              = _mm256_mul_ps(rsq22,rinv22);
1155             r22              = _mm256_andnot_ps(dummy_mask,r22);
1156
1157             /* EWALD ELECTROSTATICS */
1158             
1159             /* Analytical PME correction */
1160             zeta2            = _mm256_mul_ps(beta2,rsq22);
1161             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1162             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1163             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1164             felec            = _mm256_mul_ps(qq22,felec);
1165             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1166             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1167             velec            = _mm256_sub_ps(rinv22,pmecorrV);
1168             velec            = _mm256_mul_ps(qq22,velec);
1169             
1170             /* Update potential sum for this i atom from the interaction with this j atom. */
1171             velec            = _mm256_andnot_ps(dummy_mask,velec);
1172             velecsum         = _mm256_add_ps(velecsum,velec);
1173
1174             fscal            = felec;
1175
1176             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1177
1178             /* Calculate temporary vectorial force */
1179             tx               = _mm256_mul_ps(fscal,dx22);
1180             ty               = _mm256_mul_ps(fscal,dy22);
1181             tz               = _mm256_mul_ps(fscal,dz22);
1182
1183             /* Update vectorial force */
1184             fix2             = _mm256_add_ps(fix2,tx);
1185             fiy2             = _mm256_add_ps(fiy2,ty);
1186             fiz2             = _mm256_add_ps(fiz2,tz);
1187
1188             fjx2             = _mm256_add_ps(fjx2,tx);
1189             fjy2             = _mm256_add_ps(fjy2,ty);
1190             fjz2             = _mm256_add_ps(fjz2,tz);
1191
1192             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1193             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1194             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1195             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1196             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1197             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1198             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1199             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1200
1201             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1202                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1203
1204             /* Inner loop uses 777 flops */
1205         }
1206
1207         /* End of innermost loop */
1208
1209         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1210                                                  f+i_coord_offset,fshift+i_shift_offset);
1211
1212         ggid                        = gid[iidx];
1213         /* Update potential energies */
1214         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1215         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1216
1217         /* Increment number of inner iterations */
1218         inneriter                  += j_index_end - j_index_start;
1219
1220         /* Outer loop uses 20 flops */
1221     }
1222
1223     /* Increment number of outer iterations */
1224     outeriter        += nri;
1225
1226     /* Update outer/inner flops */
1227
1228     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*777);
1229 }
1230 /*
1231  * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwLJ_GeomW3W3_F_avx_256_single
1232  * Electrostatics interaction: Ewald
1233  * VdW interaction:            LennardJones
1234  * Geometry:                   Water3-Water3
1235  * Calculate force/pot:        Force
1236  */
1237 void
1238 nb_kernel_ElecEw_VdwLJ_GeomW3W3_F_avx_256_single
1239                     (t_nblist                    * gmx_restrict       nlist,
1240                      rvec                        * gmx_restrict          xx,
1241                      rvec                        * gmx_restrict          ff,
1242                      t_forcerec                  * gmx_restrict          fr,
1243                      t_mdatoms                   * gmx_restrict     mdatoms,
1244                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1245                      t_nrnb                      * gmx_restrict        nrnb)
1246 {
1247     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1248      * just 0 for non-waters.
1249      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1250      * jnr indices corresponding to data put in the four positions in the SIMD register.
1251      */
1252     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1253     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1254     int              jnrA,jnrB,jnrC,jnrD;
1255     int              jnrE,jnrF,jnrG,jnrH;
1256     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1257     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1258     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1259     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1260     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1261     real             rcutoff_scalar;
1262     real             *shiftvec,*fshift,*x,*f;
1263     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1264     real             scratch[4*DIM];
1265     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1266     real *           vdwioffsetptr0;
1267     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1268     real *           vdwioffsetptr1;
1269     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1270     real *           vdwioffsetptr2;
1271     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1272     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1273     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1274     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1275     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1276     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1277     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1278     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1279     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1280     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1281     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1282     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1283     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1284     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1285     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1286     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1287     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
1288     real             *charge;
1289     int              nvdwtype;
1290     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1291     int              *vdwtype;
1292     real             *vdwparam;
1293     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
1294     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
1295     __m256i          ewitab;
1296     __m128i          ewitab_lo,ewitab_hi;
1297     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1298     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1299     real             *ewtab;
1300     __m256           dummy_mask,cutoff_mask;
1301     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1302     __m256           one     = _mm256_set1_ps(1.0);
1303     __m256           two     = _mm256_set1_ps(2.0);
1304     x                = xx[0];
1305     f                = ff[0];
1306
1307     nri              = nlist->nri;
1308     iinr             = nlist->iinr;
1309     jindex           = nlist->jindex;
1310     jjnr             = nlist->jjnr;
1311     shiftidx         = nlist->shift;
1312     gid              = nlist->gid;
1313     shiftvec         = fr->shift_vec[0];
1314     fshift           = fr->fshift[0];
1315     facel            = _mm256_set1_ps(fr->epsfac);
1316     charge           = mdatoms->chargeA;
1317     nvdwtype         = fr->ntype;
1318     vdwparam         = fr->nbfp;
1319     vdwtype          = mdatoms->typeA;
1320
1321     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
1322     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1323     beta2            = _mm256_mul_ps(beta,beta);
1324     beta3            = _mm256_mul_ps(beta,beta2);
1325
1326     ewtab            = fr->ic->tabq_coul_F;
1327     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
1328     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1329
1330     /* Setup water-specific parameters */
1331     inr              = nlist->iinr[0];
1332     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
1333     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1334     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1335     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1336
1337     jq0              = _mm256_set1_ps(charge[inr+0]);
1338     jq1              = _mm256_set1_ps(charge[inr+1]);
1339     jq2              = _mm256_set1_ps(charge[inr+2]);
1340     vdwjidx0A        = 2*vdwtype[inr+0];
1341     qq00             = _mm256_mul_ps(iq0,jq0);
1342     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1343     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1344     qq01             = _mm256_mul_ps(iq0,jq1);
1345     qq02             = _mm256_mul_ps(iq0,jq2);
1346     qq10             = _mm256_mul_ps(iq1,jq0);
1347     qq11             = _mm256_mul_ps(iq1,jq1);
1348     qq12             = _mm256_mul_ps(iq1,jq2);
1349     qq20             = _mm256_mul_ps(iq2,jq0);
1350     qq21             = _mm256_mul_ps(iq2,jq1);
1351     qq22             = _mm256_mul_ps(iq2,jq2);
1352
1353     /* Avoid stupid compiler warnings */
1354     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1355     j_coord_offsetA = 0;
1356     j_coord_offsetB = 0;
1357     j_coord_offsetC = 0;
1358     j_coord_offsetD = 0;
1359     j_coord_offsetE = 0;
1360     j_coord_offsetF = 0;
1361     j_coord_offsetG = 0;
1362     j_coord_offsetH = 0;
1363
1364     outeriter        = 0;
1365     inneriter        = 0;
1366
1367     for(iidx=0;iidx<4*DIM;iidx++)
1368     {
1369         scratch[iidx] = 0.0;
1370     }
1371
1372     /* Start outer loop over neighborlists */
1373     for(iidx=0; iidx<nri; iidx++)
1374     {
1375         /* Load shift vector for this list */
1376         i_shift_offset   = DIM*shiftidx[iidx];
1377
1378         /* Load limits for loop over neighbors */
1379         j_index_start    = jindex[iidx];
1380         j_index_end      = jindex[iidx+1];
1381
1382         /* Get outer coordinate index */
1383         inr              = iinr[iidx];
1384         i_coord_offset   = DIM*inr;
1385
1386         /* Load i particle coords and add shift vector */
1387         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1388                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1389
1390         fix0             = _mm256_setzero_ps();
1391         fiy0             = _mm256_setzero_ps();
1392         fiz0             = _mm256_setzero_ps();
1393         fix1             = _mm256_setzero_ps();
1394         fiy1             = _mm256_setzero_ps();
1395         fiz1             = _mm256_setzero_ps();
1396         fix2             = _mm256_setzero_ps();
1397         fiy2             = _mm256_setzero_ps();
1398         fiz2             = _mm256_setzero_ps();
1399
1400         /* Start inner kernel loop */
1401         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1402         {
1403
1404             /* Get j neighbor index, and coordinate index */
1405             jnrA             = jjnr[jidx];
1406             jnrB             = jjnr[jidx+1];
1407             jnrC             = jjnr[jidx+2];
1408             jnrD             = jjnr[jidx+3];
1409             jnrE             = jjnr[jidx+4];
1410             jnrF             = jjnr[jidx+5];
1411             jnrG             = jjnr[jidx+6];
1412             jnrH             = jjnr[jidx+7];
1413             j_coord_offsetA  = DIM*jnrA;
1414             j_coord_offsetB  = DIM*jnrB;
1415             j_coord_offsetC  = DIM*jnrC;
1416             j_coord_offsetD  = DIM*jnrD;
1417             j_coord_offsetE  = DIM*jnrE;
1418             j_coord_offsetF  = DIM*jnrF;
1419             j_coord_offsetG  = DIM*jnrG;
1420             j_coord_offsetH  = DIM*jnrH;
1421
1422             /* load j atom coordinates */
1423             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1424                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1425                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1426                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1427                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1428
1429             /* Calculate displacement vector */
1430             dx00             = _mm256_sub_ps(ix0,jx0);
1431             dy00             = _mm256_sub_ps(iy0,jy0);
1432             dz00             = _mm256_sub_ps(iz0,jz0);
1433             dx01             = _mm256_sub_ps(ix0,jx1);
1434             dy01             = _mm256_sub_ps(iy0,jy1);
1435             dz01             = _mm256_sub_ps(iz0,jz1);
1436             dx02             = _mm256_sub_ps(ix0,jx2);
1437             dy02             = _mm256_sub_ps(iy0,jy2);
1438             dz02             = _mm256_sub_ps(iz0,jz2);
1439             dx10             = _mm256_sub_ps(ix1,jx0);
1440             dy10             = _mm256_sub_ps(iy1,jy0);
1441             dz10             = _mm256_sub_ps(iz1,jz0);
1442             dx11             = _mm256_sub_ps(ix1,jx1);
1443             dy11             = _mm256_sub_ps(iy1,jy1);
1444             dz11             = _mm256_sub_ps(iz1,jz1);
1445             dx12             = _mm256_sub_ps(ix1,jx2);
1446             dy12             = _mm256_sub_ps(iy1,jy2);
1447             dz12             = _mm256_sub_ps(iz1,jz2);
1448             dx20             = _mm256_sub_ps(ix2,jx0);
1449             dy20             = _mm256_sub_ps(iy2,jy0);
1450             dz20             = _mm256_sub_ps(iz2,jz0);
1451             dx21             = _mm256_sub_ps(ix2,jx1);
1452             dy21             = _mm256_sub_ps(iy2,jy1);
1453             dz21             = _mm256_sub_ps(iz2,jz1);
1454             dx22             = _mm256_sub_ps(ix2,jx2);
1455             dy22             = _mm256_sub_ps(iy2,jy2);
1456             dz22             = _mm256_sub_ps(iz2,jz2);
1457
1458             /* Calculate squared distance and things based on it */
1459             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1460             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1461             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1462             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1463             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1464             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1465             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1466             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1467             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1468
1469             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1470             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
1471             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
1472             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1473             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
1474             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
1475             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1476             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
1477             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
1478
1479             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1480             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
1481             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
1482             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1483             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1484             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1485             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1486             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1487             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1488
1489             fjx0             = _mm256_setzero_ps();
1490             fjy0             = _mm256_setzero_ps();
1491             fjz0             = _mm256_setzero_ps();
1492             fjx1             = _mm256_setzero_ps();
1493             fjy1             = _mm256_setzero_ps();
1494             fjz1             = _mm256_setzero_ps();
1495             fjx2             = _mm256_setzero_ps();
1496             fjy2             = _mm256_setzero_ps();
1497             fjz2             = _mm256_setzero_ps();
1498
1499             /**************************
1500              * CALCULATE INTERACTIONS *
1501              **************************/
1502
1503             r00              = _mm256_mul_ps(rsq00,rinv00);
1504
1505             /* EWALD ELECTROSTATICS */
1506             
1507             /* Analytical PME correction */
1508             zeta2            = _mm256_mul_ps(beta2,rsq00);
1509             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1510             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1511             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1512             felec            = _mm256_mul_ps(qq00,felec);
1513             
1514             /* LENNARD-JONES DISPERSION/REPULSION */
1515
1516             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1517             fvdw             = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
1518
1519             fscal            = _mm256_add_ps(felec,fvdw);
1520
1521             /* Calculate temporary vectorial force */
1522             tx               = _mm256_mul_ps(fscal,dx00);
1523             ty               = _mm256_mul_ps(fscal,dy00);
1524             tz               = _mm256_mul_ps(fscal,dz00);
1525
1526             /* Update vectorial force */
1527             fix0             = _mm256_add_ps(fix0,tx);
1528             fiy0             = _mm256_add_ps(fiy0,ty);
1529             fiz0             = _mm256_add_ps(fiz0,tz);
1530
1531             fjx0             = _mm256_add_ps(fjx0,tx);
1532             fjy0             = _mm256_add_ps(fjy0,ty);
1533             fjz0             = _mm256_add_ps(fjz0,tz);
1534
1535             /**************************
1536              * CALCULATE INTERACTIONS *
1537              **************************/
1538
1539             r01              = _mm256_mul_ps(rsq01,rinv01);
1540
1541             /* EWALD ELECTROSTATICS */
1542             
1543             /* Analytical PME correction */
1544             zeta2            = _mm256_mul_ps(beta2,rsq01);
1545             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
1546             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1547             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1548             felec            = _mm256_mul_ps(qq01,felec);
1549             
1550             fscal            = felec;
1551
1552             /* Calculate temporary vectorial force */
1553             tx               = _mm256_mul_ps(fscal,dx01);
1554             ty               = _mm256_mul_ps(fscal,dy01);
1555             tz               = _mm256_mul_ps(fscal,dz01);
1556
1557             /* Update vectorial force */
1558             fix0             = _mm256_add_ps(fix0,tx);
1559             fiy0             = _mm256_add_ps(fiy0,ty);
1560             fiz0             = _mm256_add_ps(fiz0,tz);
1561
1562             fjx1             = _mm256_add_ps(fjx1,tx);
1563             fjy1             = _mm256_add_ps(fjy1,ty);
1564             fjz1             = _mm256_add_ps(fjz1,tz);
1565
1566             /**************************
1567              * CALCULATE INTERACTIONS *
1568              **************************/
1569
1570             r02              = _mm256_mul_ps(rsq02,rinv02);
1571
1572             /* EWALD ELECTROSTATICS */
1573             
1574             /* Analytical PME correction */
1575             zeta2            = _mm256_mul_ps(beta2,rsq02);
1576             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
1577             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1578             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1579             felec            = _mm256_mul_ps(qq02,felec);
1580             
1581             fscal            = felec;
1582
1583             /* Calculate temporary vectorial force */
1584             tx               = _mm256_mul_ps(fscal,dx02);
1585             ty               = _mm256_mul_ps(fscal,dy02);
1586             tz               = _mm256_mul_ps(fscal,dz02);
1587
1588             /* Update vectorial force */
1589             fix0             = _mm256_add_ps(fix0,tx);
1590             fiy0             = _mm256_add_ps(fiy0,ty);
1591             fiz0             = _mm256_add_ps(fiz0,tz);
1592
1593             fjx2             = _mm256_add_ps(fjx2,tx);
1594             fjy2             = _mm256_add_ps(fjy2,ty);
1595             fjz2             = _mm256_add_ps(fjz2,tz);
1596
1597             /**************************
1598              * CALCULATE INTERACTIONS *
1599              **************************/
1600
1601             r10              = _mm256_mul_ps(rsq10,rinv10);
1602
1603             /* EWALD ELECTROSTATICS */
1604             
1605             /* Analytical PME correction */
1606             zeta2            = _mm256_mul_ps(beta2,rsq10);
1607             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1608             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1609             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1610             felec            = _mm256_mul_ps(qq10,felec);
1611             
1612             fscal            = felec;
1613
1614             /* Calculate temporary vectorial force */
1615             tx               = _mm256_mul_ps(fscal,dx10);
1616             ty               = _mm256_mul_ps(fscal,dy10);
1617             tz               = _mm256_mul_ps(fscal,dz10);
1618
1619             /* Update vectorial force */
1620             fix1             = _mm256_add_ps(fix1,tx);
1621             fiy1             = _mm256_add_ps(fiy1,ty);
1622             fiz1             = _mm256_add_ps(fiz1,tz);
1623
1624             fjx0             = _mm256_add_ps(fjx0,tx);
1625             fjy0             = _mm256_add_ps(fjy0,ty);
1626             fjz0             = _mm256_add_ps(fjz0,tz);
1627
1628             /**************************
1629              * CALCULATE INTERACTIONS *
1630              **************************/
1631
1632             r11              = _mm256_mul_ps(rsq11,rinv11);
1633
1634             /* EWALD ELECTROSTATICS */
1635             
1636             /* Analytical PME correction */
1637             zeta2            = _mm256_mul_ps(beta2,rsq11);
1638             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1639             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1640             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1641             felec            = _mm256_mul_ps(qq11,felec);
1642             
1643             fscal            = felec;
1644
1645             /* Calculate temporary vectorial force */
1646             tx               = _mm256_mul_ps(fscal,dx11);
1647             ty               = _mm256_mul_ps(fscal,dy11);
1648             tz               = _mm256_mul_ps(fscal,dz11);
1649
1650             /* Update vectorial force */
1651             fix1             = _mm256_add_ps(fix1,tx);
1652             fiy1             = _mm256_add_ps(fiy1,ty);
1653             fiz1             = _mm256_add_ps(fiz1,tz);
1654
1655             fjx1             = _mm256_add_ps(fjx1,tx);
1656             fjy1             = _mm256_add_ps(fjy1,ty);
1657             fjz1             = _mm256_add_ps(fjz1,tz);
1658
1659             /**************************
1660              * CALCULATE INTERACTIONS *
1661              **************************/
1662
1663             r12              = _mm256_mul_ps(rsq12,rinv12);
1664
1665             /* EWALD ELECTROSTATICS */
1666             
1667             /* Analytical PME correction */
1668             zeta2            = _mm256_mul_ps(beta2,rsq12);
1669             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1670             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1671             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1672             felec            = _mm256_mul_ps(qq12,felec);
1673             
1674             fscal            = felec;
1675
1676             /* Calculate temporary vectorial force */
1677             tx               = _mm256_mul_ps(fscal,dx12);
1678             ty               = _mm256_mul_ps(fscal,dy12);
1679             tz               = _mm256_mul_ps(fscal,dz12);
1680
1681             /* Update vectorial force */
1682             fix1             = _mm256_add_ps(fix1,tx);
1683             fiy1             = _mm256_add_ps(fiy1,ty);
1684             fiz1             = _mm256_add_ps(fiz1,tz);
1685
1686             fjx2             = _mm256_add_ps(fjx2,tx);
1687             fjy2             = _mm256_add_ps(fjy2,ty);
1688             fjz2             = _mm256_add_ps(fjz2,tz);
1689
1690             /**************************
1691              * CALCULATE INTERACTIONS *
1692              **************************/
1693
1694             r20              = _mm256_mul_ps(rsq20,rinv20);
1695
1696             /* EWALD ELECTROSTATICS */
1697             
1698             /* Analytical PME correction */
1699             zeta2            = _mm256_mul_ps(beta2,rsq20);
1700             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1701             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1702             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1703             felec            = _mm256_mul_ps(qq20,felec);
1704             
1705             fscal            = felec;
1706
1707             /* Calculate temporary vectorial force */
1708             tx               = _mm256_mul_ps(fscal,dx20);
1709             ty               = _mm256_mul_ps(fscal,dy20);
1710             tz               = _mm256_mul_ps(fscal,dz20);
1711
1712             /* Update vectorial force */
1713             fix2             = _mm256_add_ps(fix2,tx);
1714             fiy2             = _mm256_add_ps(fiy2,ty);
1715             fiz2             = _mm256_add_ps(fiz2,tz);
1716
1717             fjx0             = _mm256_add_ps(fjx0,tx);
1718             fjy0             = _mm256_add_ps(fjy0,ty);
1719             fjz0             = _mm256_add_ps(fjz0,tz);
1720
1721             /**************************
1722              * CALCULATE INTERACTIONS *
1723              **************************/
1724
1725             r21              = _mm256_mul_ps(rsq21,rinv21);
1726
1727             /* EWALD ELECTROSTATICS */
1728             
1729             /* Analytical PME correction */
1730             zeta2            = _mm256_mul_ps(beta2,rsq21);
1731             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1732             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1733             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1734             felec            = _mm256_mul_ps(qq21,felec);
1735             
1736             fscal            = felec;
1737
1738             /* Calculate temporary vectorial force */
1739             tx               = _mm256_mul_ps(fscal,dx21);
1740             ty               = _mm256_mul_ps(fscal,dy21);
1741             tz               = _mm256_mul_ps(fscal,dz21);
1742
1743             /* Update vectorial force */
1744             fix2             = _mm256_add_ps(fix2,tx);
1745             fiy2             = _mm256_add_ps(fiy2,ty);
1746             fiz2             = _mm256_add_ps(fiz2,tz);
1747
1748             fjx1             = _mm256_add_ps(fjx1,tx);
1749             fjy1             = _mm256_add_ps(fjy1,ty);
1750             fjz1             = _mm256_add_ps(fjz1,tz);
1751
1752             /**************************
1753              * CALCULATE INTERACTIONS *
1754              **************************/
1755
1756             r22              = _mm256_mul_ps(rsq22,rinv22);
1757
1758             /* EWALD ELECTROSTATICS */
1759             
1760             /* Analytical PME correction */
1761             zeta2            = _mm256_mul_ps(beta2,rsq22);
1762             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1763             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1764             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1765             felec            = _mm256_mul_ps(qq22,felec);
1766             
1767             fscal            = felec;
1768
1769             /* Calculate temporary vectorial force */
1770             tx               = _mm256_mul_ps(fscal,dx22);
1771             ty               = _mm256_mul_ps(fscal,dy22);
1772             tz               = _mm256_mul_ps(fscal,dz22);
1773
1774             /* Update vectorial force */
1775             fix2             = _mm256_add_ps(fix2,tx);
1776             fiy2             = _mm256_add_ps(fiy2,ty);
1777             fiz2             = _mm256_add_ps(fiz2,tz);
1778
1779             fjx2             = _mm256_add_ps(fjx2,tx);
1780             fjy2             = _mm256_add_ps(fjy2,ty);
1781             fjz2             = _mm256_add_ps(fjz2,tz);
1782
1783             fjptrA             = f+j_coord_offsetA;
1784             fjptrB             = f+j_coord_offsetB;
1785             fjptrC             = f+j_coord_offsetC;
1786             fjptrD             = f+j_coord_offsetD;
1787             fjptrE             = f+j_coord_offsetE;
1788             fjptrF             = f+j_coord_offsetF;
1789             fjptrG             = f+j_coord_offsetG;
1790             fjptrH             = f+j_coord_offsetH;
1791
1792             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1793                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1794
1795             /* Inner loop uses 511 flops */
1796         }
1797
1798         if(jidx<j_index_end)
1799         {
1800
1801             /* Get j neighbor index, and coordinate index */
1802             jnrlistA         = jjnr[jidx];
1803             jnrlistB         = jjnr[jidx+1];
1804             jnrlistC         = jjnr[jidx+2];
1805             jnrlistD         = jjnr[jidx+3];
1806             jnrlistE         = jjnr[jidx+4];
1807             jnrlistF         = jjnr[jidx+5];
1808             jnrlistG         = jjnr[jidx+6];
1809             jnrlistH         = jjnr[jidx+7];
1810             /* Sign of each element will be negative for non-real atoms.
1811              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1812              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
1813              */
1814             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
1815                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
1816                                             
1817             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1818             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1819             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1820             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1821             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
1822             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
1823             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
1824             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
1825             j_coord_offsetA  = DIM*jnrA;
1826             j_coord_offsetB  = DIM*jnrB;
1827             j_coord_offsetC  = DIM*jnrC;
1828             j_coord_offsetD  = DIM*jnrD;
1829             j_coord_offsetE  = DIM*jnrE;
1830             j_coord_offsetF  = DIM*jnrF;
1831             j_coord_offsetG  = DIM*jnrG;
1832             j_coord_offsetH  = DIM*jnrH;
1833
1834             /* load j atom coordinates */
1835             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1836                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1837                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1838                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1839                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1840
1841             /* Calculate displacement vector */
1842             dx00             = _mm256_sub_ps(ix0,jx0);
1843             dy00             = _mm256_sub_ps(iy0,jy0);
1844             dz00             = _mm256_sub_ps(iz0,jz0);
1845             dx01             = _mm256_sub_ps(ix0,jx1);
1846             dy01             = _mm256_sub_ps(iy0,jy1);
1847             dz01             = _mm256_sub_ps(iz0,jz1);
1848             dx02             = _mm256_sub_ps(ix0,jx2);
1849             dy02             = _mm256_sub_ps(iy0,jy2);
1850             dz02             = _mm256_sub_ps(iz0,jz2);
1851             dx10             = _mm256_sub_ps(ix1,jx0);
1852             dy10             = _mm256_sub_ps(iy1,jy0);
1853             dz10             = _mm256_sub_ps(iz1,jz0);
1854             dx11             = _mm256_sub_ps(ix1,jx1);
1855             dy11             = _mm256_sub_ps(iy1,jy1);
1856             dz11             = _mm256_sub_ps(iz1,jz1);
1857             dx12             = _mm256_sub_ps(ix1,jx2);
1858             dy12             = _mm256_sub_ps(iy1,jy2);
1859             dz12             = _mm256_sub_ps(iz1,jz2);
1860             dx20             = _mm256_sub_ps(ix2,jx0);
1861             dy20             = _mm256_sub_ps(iy2,jy0);
1862             dz20             = _mm256_sub_ps(iz2,jz0);
1863             dx21             = _mm256_sub_ps(ix2,jx1);
1864             dy21             = _mm256_sub_ps(iy2,jy1);
1865             dz21             = _mm256_sub_ps(iz2,jz1);
1866             dx22             = _mm256_sub_ps(ix2,jx2);
1867             dy22             = _mm256_sub_ps(iy2,jy2);
1868             dz22             = _mm256_sub_ps(iz2,jz2);
1869
1870             /* Calculate squared distance and things based on it */
1871             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1872             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1873             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1874             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1875             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1876             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1877             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1878             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1879             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1880
1881             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1882             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
1883             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
1884             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1885             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
1886             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
1887             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1888             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
1889             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
1890
1891             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1892             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
1893             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
1894             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1895             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1896             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1897             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1898             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1899             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1900
1901             fjx0             = _mm256_setzero_ps();
1902             fjy0             = _mm256_setzero_ps();
1903             fjz0             = _mm256_setzero_ps();
1904             fjx1             = _mm256_setzero_ps();
1905             fjy1             = _mm256_setzero_ps();
1906             fjz1             = _mm256_setzero_ps();
1907             fjx2             = _mm256_setzero_ps();
1908             fjy2             = _mm256_setzero_ps();
1909             fjz2             = _mm256_setzero_ps();
1910
1911             /**************************
1912              * CALCULATE INTERACTIONS *
1913              **************************/
1914
1915             r00              = _mm256_mul_ps(rsq00,rinv00);
1916             r00              = _mm256_andnot_ps(dummy_mask,r00);
1917
1918             /* EWALD ELECTROSTATICS */
1919             
1920             /* Analytical PME correction */
1921             zeta2            = _mm256_mul_ps(beta2,rsq00);
1922             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1923             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1924             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1925             felec            = _mm256_mul_ps(qq00,felec);
1926             
1927             /* LENNARD-JONES DISPERSION/REPULSION */
1928
1929             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1930             fvdw             = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
1931
1932             fscal            = _mm256_add_ps(felec,fvdw);
1933
1934             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1935
1936             /* Calculate temporary vectorial force */
1937             tx               = _mm256_mul_ps(fscal,dx00);
1938             ty               = _mm256_mul_ps(fscal,dy00);
1939             tz               = _mm256_mul_ps(fscal,dz00);
1940
1941             /* Update vectorial force */
1942             fix0             = _mm256_add_ps(fix0,tx);
1943             fiy0             = _mm256_add_ps(fiy0,ty);
1944             fiz0             = _mm256_add_ps(fiz0,tz);
1945
1946             fjx0             = _mm256_add_ps(fjx0,tx);
1947             fjy0             = _mm256_add_ps(fjy0,ty);
1948             fjz0             = _mm256_add_ps(fjz0,tz);
1949
1950             /**************************
1951              * CALCULATE INTERACTIONS *
1952              **************************/
1953
1954             r01              = _mm256_mul_ps(rsq01,rinv01);
1955             r01              = _mm256_andnot_ps(dummy_mask,r01);
1956
1957             /* EWALD ELECTROSTATICS */
1958             
1959             /* Analytical PME correction */
1960             zeta2            = _mm256_mul_ps(beta2,rsq01);
1961             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
1962             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1963             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1964             felec            = _mm256_mul_ps(qq01,felec);
1965             
1966             fscal            = felec;
1967
1968             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1969
1970             /* Calculate temporary vectorial force */
1971             tx               = _mm256_mul_ps(fscal,dx01);
1972             ty               = _mm256_mul_ps(fscal,dy01);
1973             tz               = _mm256_mul_ps(fscal,dz01);
1974
1975             /* Update vectorial force */
1976             fix0             = _mm256_add_ps(fix0,tx);
1977             fiy0             = _mm256_add_ps(fiy0,ty);
1978             fiz0             = _mm256_add_ps(fiz0,tz);
1979
1980             fjx1             = _mm256_add_ps(fjx1,tx);
1981             fjy1             = _mm256_add_ps(fjy1,ty);
1982             fjz1             = _mm256_add_ps(fjz1,tz);
1983
1984             /**************************
1985              * CALCULATE INTERACTIONS *
1986              **************************/
1987
1988             r02              = _mm256_mul_ps(rsq02,rinv02);
1989             r02              = _mm256_andnot_ps(dummy_mask,r02);
1990
1991             /* EWALD ELECTROSTATICS */
1992             
1993             /* Analytical PME correction */
1994             zeta2            = _mm256_mul_ps(beta2,rsq02);
1995             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
1996             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1997             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1998             felec            = _mm256_mul_ps(qq02,felec);
1999             
2000             fscal            = felec;
2001
2002             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2003
2004             /* Calculate temporary vectorial force */
2005             tx               = _mm256_mul_ps(fscal,dx02);
2006             ty               = _mm256_mul_ps(fscal,dy02);
2007             tz               = _mm256_mul_ps(fscal,dz02);
2008
2009             /* Update vectorial force */
2010             fix0             = _mm256_add_ps(fix0,tx);
2011             fiy0             = _mm256_add_ps(fiy0,ty);
2012             fiz0             = _mm256_add_ps(fiz0,tz);
2013
2014             fjx2             = _mm256_add_ps(fjx2,tx);
2015             fjy2             = _mm256_add_ps(fjy2,ty);
2016             fjz2             = _mm256_add_ps(fjz2,tz);
2017
2018             /**************************
2019              * CALCULATE INTERACTIONS *
2020              **************************/
2021
2022             r10              = _mm256_mul_ps(rsq10,rinv10);
2023             r10              = _mm256_andnot_ps(dummy_mask,r10);
2024
2025             /* EWALD ELECTROSTATICS */
2026             
2027             /* Analytical PME correction */
2028             zeta2            = _mm256_mul_ps(beta2,rsq10);
2029             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
2030             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2031             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2032             felec            = _mm256_mul_ps(qq10,felec);
2033             
2034             fscal            = felec;
2035
2036             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2037
2038             /* Calculate temporary vectorial force */
2039             tx               = _mm256_mul_ps(fscal,dx10);
2040             ty               = _mm256_mul_ps(fscal,dy10);
2041             tz               = _mm256_mul_ps(fscal,dz10);
2042
2043             /* Update vectorial force */
2044             fix1             = _mm256_add_ps(fix1,tx);
2045             fiy1             = _mm256_add_ps(fiy1,ty);
2046             fiz1             = _mm256_add_ps(fiz1,tz);
2047
2048             fjx0             = _mm256_add_ps(fjx0,tx);
2049             fjy0             = _mm256_add_ps(fjy0,ty);
2050             fjz0             = _mm256_add_ps(fjz0,tz);
2051
2052             /**************************
2053              * CALCULATE INTERACTIONS *
2054              **************************/
2055
2056             r11              = _mm256_mul_ps(rsq11,rinv11);
2057             r11              = _mm256_andnot_ps(dummy_mask,r11);
2058
2059             /* EWALD ELECTROSTATICS */
2060             
2061             /* Analytical PME correction */
2062             zeta2            = _mm256_mul_ps(beta2,rsq11);
2063             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
2064             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2065             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2066             felec            = _mm256_mul_ps(qq11,felec);
2067             
2068             fscal            = felec;
2069
2070             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2071
2072             /* Calculate temporary vectorial force */
2073             tx               = _mm256_mul_ps(fscal,dx11);
2074             ty               = _mm256_mul_ps(fscal,dy11);
2075             tz               = _mm256_mul_ps(fscal,dz11);
2076
2077             /* Update vectorial force */
2078             fix1             = _mm256_add_ps(fix1,tx);
2079             fiy1             = _mm256_add_ps(fiy1,ty);
2080             fiz1             = _mm256_add_ps(fiz1,tz);
2081
2082             fjx1             = _mm256_add_ps(fjx1,tx);
2083             fjy1             = _mm256_add_ps(fjy1,ty);
2084             fjz1             = _mm256_add_ps(fjz1,tz);
2085
2086             /**************************
2087              * CALCULATE INTERACTIONS *
2088              **************************/
2089
2090             r12              = _mm256_mul_ps(rsq12,rinv12);
2091             r12              = _mm256_andnot_ps(dummy_mask,r12);
2092
2093             /* EWALD ELECTROSTATICS */
2094             
2095             /* Analytical PME correction */
2096             zeta2            = _mm256_mul_ps(beta2,rsq12);
2097             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
2098             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2099             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2100             felec            = _mm256_mul_ps(qq12,felec);
2101             
2102             fscal            = felec;
2103
2104             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2105
2106             /* Calculate temporary vectorial force */
2107             tx               = _mm256_mul_ps(fscal,dx12);
2108             ty               = _mm256_mul_ps(fscal,dy12);
2109             tz               = _mm256_mul_ps(fscal,dz12);
2110
2111             /* Update vectorial force */
2112             fix1             = _mm256_add_ps(fix1,tx);
2113             fiy1             = _mm256_add_ps(fiy1,ty);
2114             fiz1             = _mm256_add_ps(fiz1,tz);
2115
2116             fjx2             = _mm256_add_ps(fjx2,tx);
2117             fjy2             = _mm256_add_ps(fjy2,ty);
2118             fjz2             = _mm256_add_ps(fjz2,tz);
2119
2120             /**************************
2121              * CALCULATE INTERACTIONS *
2122              **************************/
2123
2124             r20              = _mm256_mul_ps(rsq20,rinv20);
2125             r20              = _mm256_andnot_ps(dummy_mask,r20);
2126
2127             /* EWALD ELECTROSTATICS */
2128             
2129             /* Analytical PME correction */
2130             zeta2            = _mm256_mul_ps(beta2,rsq20);
2131             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
2132             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2133             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2134             felec            = _mm256_mul_ps(qq20,felec);
2135             
2136             fscal            = felec;
2137
2138             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2139
2140             /* Calculate temporary vectorial force */
2141             tx               = _mm256_mul_ps(fscal,dx20);
2142             ty               = _mm256_mul_ps(fscal,dy20);
2143             tz               = _mm256_mul_ps(fscal,dz20);
2144
2145             /* Update vectorial force */
2146             fix2             = _mm256_add_ps(fix2,tx);
2147             fiy2             = _mm256_add_ps(fiy2,ty);
2148             fiz2             = _mm256_add_ps(fiz2,tz);
2149
2150             fjx0             = _mm256_add_ps(fjx0,tx);
2151             fjy0             = _mm256_add_ps(fjy0,ty);
2152             fjz0             = _mm256_add_ps(fjz0,tz);
2153
2154             /**************************
2155              * CALCULATE INTERACTIONS *
2156              **************************/
2157
2158             r21              = _mm256_mul_ps(rsq21,rinv21);
2159             r21              = _mm256_andnot_ps(dummy_mask,r21);
2160
2161             /* EWALD ELECTROSTATICS */
2162             
2163             /* Analytical PME correction */
2164             zeta2            = _mm256_mul_ps(beta2,rsq21);
2165             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2166             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2167             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2168             felec            = _mm256_mul_ps(qq21,felec);
2169             
2170             fscal            = felec;
2171
2172             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2173
2174             /* Calculate temporary vectorial force */
2175             tx               = _mm256_mul_ps(fscal,dx21);
2176             ty               = _mm256_mul_ps(fscal,dy21);
2177             tz               = _mm256_mul_ps(fscal,dz21);
2178
2179             /* Update vectorial force */
2180             fix2             = _mm256_add_ps(fix2,tx);
2181             fiy2             = _mm256_add_ps(fiy2,ty);
2182             fiz2             = _mm256_add_ps(fiz2,tz);
2183
2184             fjx1             = _mm256_add_ps(fjx1,tx);
2185             fjy1             = _mm256_add_ps(fjy1,ty);
2186             fjz1             = _mm256_add_ps(fjz1,tz);
2187
2188             /**************************
2189              * CALCULATE INTERACTIONS *
2190              **************************/
2191
2192             r22              = _mm256_mul_ps(rsq22,rinv22);
2193             r22              = _mm256_andnot_ps(dummy_mask,r22);
2194
2195             /* EWALD ELECTROSTATICS */
2196             
2197             /* Analytical PME correction */
2198             zeta2            = _mm256_mul_ps(beta2,rsq22);
2199             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2200             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2201             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2202             felec            = _mm256_mul_ps(qq22,felec);
2203             
2204             fscal            = felec;
2205
2206             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2207
2208             /* Calculate temporary vectorial force */
2209             tx               = _mm256_mul_ps(fscal,dx22);
2210             ty               = _mm256_mul_ps(fscal,dy22);
2211             tz               = _mm256_mul_ps(fscal,dz22);
2212
2213             /* Update vectorial force */
2214             fix2             = _mm256_add_ps(fix2,tx);
2215             fiy2             = _mm256_add_ps(fiy2,ty);
2216             fiz2             = _mm256_add_ps(fiz2,tz);
2217
2218             fjx2             = _mm256_add_ps(fjx2,tx);
2219             fjy2             = _mm256_add_ps(fjy2,ty);
2220             fjz2             = _mm256_add_ps(fjz2,tz);
2221
2222             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2223             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2224             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2225             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2226             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2227             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2228             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2229             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2230
2231             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2232                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2233
2234             /* Inner loop uses 520 flops */
2235         }
2236
2237         /* End of innermost loop */
2238
2239         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
2240                                                  f+i_coord_offset,fshift+i_shift_offset);
2241
2242         /* Increment number of inner iterations */
2243         inneriter                  += j_index_end - j_index_start;
2244
2245         /* Outer loop uses 18 flops */
2246     }
2247
2248     /* Increment number of outer iterations */
2249     outeriter        += nri;
2250
2251     /* Update outer/inner flops */
2252
2253     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*520);
2254 }