Remove all unnecessary HAVE_CONFIG_H
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEw_VdwLJ_GeomW3W3_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "config.h"
39
40 #include <math.h>
41
42 #include "../nb_kernel.h"
43 #include "types/simple.h"
44 #include "gromacs/math/vec.h"
45 #include "nrnb.h"
46
47 #include "gromacs/simd/math_x86_avx_256_single.h"
48 #include "kernelutil_x86_avx_256_single.h"
49
50 /*
51  * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwLJ_GeomW3W3_VF_avx_256_single
52  * Electrostatics interaction: Ewald
53  * VdW interaction:            LennardJones
54  * Geometry:                   Water3-Water3
55  * Calculate force/pot:        PotentialAndForce
56  */
57 void
58 nb_kernel_ElecEw_VdwLJ_GeomW3W3_VF_avx_256_single
59                     (t_nblist                    * gmx_restrict       nlist,
60                      rvec                        * gmx_restrict          xx,
61                      rvec                        * gmx_restrict          ff,
62                      t_forcerec                  * gmx_restrict          fr,
63                      t_mdatoms                   * gmx_restrict     mdatoms,
64                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65                      t_nrnb                      * gmx_restrict        nrnb)
66 {
67     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
68      * just 0 for non-waters.
69      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
70      * jnr indices corresponding to data put in the four positions in the SIMD register.
71      */
72     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
73     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74     int              jnrA,jnrB,jnrC,jnrD;
75     int              jnrE,jnrF,jnrG,jnrH;
76     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
77     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
78     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
79     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
80     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
81     real             rcutoff_scalar;
82     real             *shiftvec,*fshift,*x,*f;
83     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
84     real             scratch[4*DIM];
85     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86     real *           vdwioffsetptr0;
87     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
88     real *           vdwioffsetptr1;
89     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
90     real *           vdwioffsetptr2;
91     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
92     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
93     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
94     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
95     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
96     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
97     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
98     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
99     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
100     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
101     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
102     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
103     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
104     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
105     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
106     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
107     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
108     real             *charge;
109     int              nvdwtype;
110     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
111     int              *vdwtype;
112     real             *vdwparam;
113     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
114     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
115     __m256i          ewitab;
116     __m128i          ewitab_lo,ewitab_hi;
117     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
118     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
119     real             *ewtab;
120     __m256           dummy_mask,cutoff_mask;
121     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
122     __m256           one     = _mm256_set1_ps(1.0);
123     __m256           two     = _mm256_set1_ps(2.0);
124     x                = xx[0];
125     f                = ff[0];
126
127     nri              = nlist->nri;
128     iinr             = nlist->iinr;
129     jindex           = nlist->jindex;
130     jjnr             = nlist->jjnr;
131     shiftidx         = nlist->shift;
132     gid              = nlist->gid;
133     shiftvec         = fr->shift_vec[0];
134     fshift           = fr->fshift[0];
135     facel            = _mm256_set1_ps(fr->epsfac);
136     charge           = mdatoms->chargeA;
137     nvdwtype         = fr->ntype;
138     vdwparam         = fr->nbfp;
139     vdwtype          = mdatoms->typeA;
140
141     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
142     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
143     beta2            = _mm256_mul_ps(beta,beta);
144     beta3            = _mm256_mul_ps(beta,beta2);
145
146     ewtab            = fr->ic->tabq_coul_FDV0;
147     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
148     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
149
150     /* Setup water-specific parameters */
151     inr              = nlist->iinr[0];
152     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
153     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
154     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
155     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
156
157     jq0              = _mm256_set1_ps(charge[inr+0]);
158     jq1              = _mm256_set1_ps(charge[inr+1]);
159     jq2              = _mm256_set1_ps(charge[inr+2]);
160     vdwjidx0A        = 2*vdwtype[inr+0];
161     qq00             = _mm256_mul_ps(iq0,jq0);
162     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
163     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
164     qq01             = _mm256_mul_ps(iq0,jq1);
165     qq02             = _mm256_mul_ps(iq0,jq2);
166     qq10             = _mm256_mul_ps(iq1,jq0);
167     qq11             = _mm256_mul_ps(iq1,jq1);
168     qq12             = _mm256_mul_ps(iq1,jq2);
169     qq20             = _mm256_mul_ps(iq2,jq0);
170     qq21             = _mm256_mul_ps(iq2,jq1);
171     qq22             = _mm256_mul_ps(iq2,jq2);
172
173     /* Avoid stupid compiler warnings */
174     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
175     j_coord_offsetA = 0;
176     j_coord_offsetB = 0;
177     j_coord_offsetC = 0;
178     j_coord_offsetD = 0;
179     j_coord_offsetE = 0;
180     j_coord_offsetF = 0;
181     j_coord_offsetG = 0;
182     j_coord_offsetH = 0;
183
184     outeriter        = 0;
185     inneriter        = 0;
186
187     for(iidx=0;iidx<4*DIM;iidx++)
188     {
189         scratch[iidx] = 0.0;
190     }
191
192     /* Start outer loop over neighborlists */
193     for(iidx=0; iidx<nri; iidx++)
194     {
195         /* Load shift vector for this list */
196         i_shift_offset   = DIM*shiftidx[iidx];
197
198         /* Load limits for loop over neighbors */
199         j_index_start    = jindex[iidx];
200         j_index_end      = jindex[iidx+1];
201
202         /* Get outer coordinate index */
203         inr              = iinr[iidx];
204         i_coord_offset   = DIM*inr;
205
206         /* Load i particle coords and add shift vector */
207         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
208                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
209
210         fix0             = _mm256_setzero_ps();
211         fiy0             = _mm256_setzero_ps();
212         fiz0             = _mm256_setzero_ps();
213         fix1             = _mm256_setzero_ps();
214         fiy1             = _mm256_setzero_ps();
215         fiz1             = _mm256_setzero_ps();
216         fix2             = _mm256_setzero_ps();
217         fiy2             = _mm256_setzero_ps();
218         fiz2             = _mm256_setzero_ps();
219
220         /* Reset potential sums */
221         velecsum         = _mm256_setzero_ps();
222         vvdwsum          = _mm256_setzero_ps();
223
224         /* Start inner kernel loop */
225         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
226         {
227
228             /* Get j neighbor index, and coordinate index */
229             jnrA             = jjnr[jidx];
230             jnrB             = jjnr[jidx+1];
231             jnrC             = jjnr[jidx+2];
232             jnrD             = jjnr[jidx+3];
233             jnrE             = jjnr[jidx+4];
234             jnrF             = jjnr[jidx+5];
235             jnrG             = jjnr[jidx+6];
236             jnrH             = jjnr[jidx+7];
237             j_coord_offsetA  = DIM*jnrA;
238             j_coord_offsetB  = DIM*jnrB;
239             j_coord_offsetC  = DIM*jnrC;
240             j_coord_offsetD  = DIM*jnrD;
241             j_coord_offsetE  = DIM*jnrE;
242             j_coord_offsetF  = DIM*jnrF;
243             j_coord_offsetG  = DIM*jnrG;
244             j_coord_offsetH  = DIM*jnrH;
245
246             /* load j atom coordinates */
247             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
248                                                  x+j_coord_offsetC,x+j_coord_offsetD,
249                                                  x+j_coord_offsetE,x+j_coord_offsetF,
250                                                  x+j_coord_offsetG,x+j_coord_offsetH,
251                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
252
253             /* Calculate displacement vector */
254             dx00             = _mm256_sub_ps(ix0,jx0);
255             dy00             = _mm256_sub_ps(iy0,jy0);
256             dz00             = _mm256_sub_ps(iz0,jz0);
257             dx01             = _mm256_sub_ps(ix0,jx1);
258             dy01             = _mm256_sub_ps(iy0,jy1);
259             dz01             = _mm256_sub_ps(iz0,jz1);
260             dx02             = _mm256_sub_ps(ix0,jx2);
261             dy02             = _mm256_sub_ps(iy0,jy2);
262             dz02             = _mm256_sub_ps(iz0,jz2);
263             dx10             = _mm256_sub_ps(ix1,jx0);
264             dy10             = _mm256_sub_ps(iy1,jy0);
265             dz10             = _mm256_sub_ps(iz1,jz0);
266             dx11             = _mm256_sub_ps(ix1,jx1);
267             dy11             = _mm256_sub_ps(iy1,jy1);
268             dz11             = _mm256_sub_ps(iz1,jz1);
269             dx12             = _mm256_sub_ps(ix1,jx2);
270             dy12             = _mm256_sub_ps(iy1,jy2);
271             dz12             = _mm256_sub_ps(iz1,jz2);
272             dx20             = _mm256_sub_ps(ix2,jx0);
273             dy20             = _mm256_sub_ps(iy2,jy0);
274             dz20             = _mm256_sub_ps(iz2,jz0);
275             dx21             = _mm256_sub_ps(ix2,jx1);
276             dy21             = _mm256_sub_ps(iy2,jy1);
277             dz21             = _mm256_sub_ps(iz2,jz1);
278             dx22             = _mm256_sub_ps(ix2,jx2);
279             dy22             = _mm256_sub_ps(iy2,jy2);
280             dz22             = _mm256_sub_ps(iz2,jz2);
281
282             /* Calculate squared distance and things based on it */
283             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
284             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
285             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
286             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
287             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
288             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
289             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
290             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
291             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
292
293             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
294             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
295             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
296             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
297             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
298             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
299             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
300             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
301             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
302
303             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
304             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
305             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
306             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
307             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
308             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
309             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
310             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
311             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
312
313             fjx0             = _mm256_setzero_ps();
314             fjy0             = _mm256_setzero_ps();
315             fjz0             = _mm256_setzero_ps();
316             fjx1             = _mm256_setzero_ps();
317             fjy1             = _mm256_setzero_ps();
318             fjz1             = _mm256_setzero_ps();
319             fjx2             = _mm256_setzero_ps();
320             fjy2             = _mm256_setzero_ps();
321             fjz2             = _mm256_setzero_ps();
322
323             /**************************
324              * CALCULATE INTERACTIONS *
325              **************************/
326
327             r00              = _mm256_mul_ps(rsq00,rinv00);
328
329             /* EWALD ELECTROSTATICS */
330             
331             /* Analytical PME correction */
332             zeta2            = _mm256_mul_ps(beta2,rsq00);
333             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
334             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
335             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
336             felec            = _mm256_mul_ps(qq00,felec);
337             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
338             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
339             velec            = _mm256_sub_ps(rinv00,pmecorrV);
340             velec            = _mm256_mul_ps(qq00,velec);
341             
342             /* LENNARD-JONES DISPERSION/REPULSION */
343
344             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
345             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
346             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
347             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
348             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
349
350             /* Update potential sum for this i atom from the interaction with this j atom. */
351             velecsum         = _mm256_add_ps(velecsum,velec);
352             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
353
354             fscal            = _mm256_add_ps(felec,fvdw);
355
356             /* Calculate temporary vectorial force */
357             tx               = _mm256_mul_ps(fscal,dx00);
358             ty               = _mm256_mul_ps(fscal,dy00);
359             tz               = _mm256_mul_ps(fscal,dz00);
360
361             /* Update vectorial force */
362             fix0             = _mm256_add_ps(fix0,tx);
363             fiy0             = _mm256_add_ps(fiy0,ty);
364             fiz0             = _mm256_add_ps(fiz0,tz);
365
366             fjx0             = _mm256_add_ps(fjx0,tx);
367             fjy0             = _mm256_add_ps(fjy0,ty);
368             fjz0             = _mm256_add_ps(fjz0,tz);
369
370             /**************************
371              * CALCULATE INTERACTIONS *
372              **************************/
373
374             r01              = _mm256_mul_ps(rsq01,rinv01);
375
376             /* EWALD ELECTROSTATICS */
377             
378             /* Analytical PME correction */
379             zeta2            = _mm256_mul_ps(beta2,rsq01);
380             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
381             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
382             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
383             felec            = _mm256_mul_ps(qq01,felec);
384             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
385             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
386             velec            = _mm256_sub_ps(rinv01,pmecorrV);
387             velec            = _mm256_mul_ps(qq01,velec);
388             
389             /* Update potential sum for this i atom from the interaction with this j atom. */
390             velecsum         = _mm256_add_ps(velecsum,velec);
391
392             fscal            = felec;
393
394             /* Calculate temporary vectorial force */
395             tx               = _mm256_mul_ps(fscal,dx01);
396             ty               = _mm256_mul_ps(fscal,dy01);
397             tz               = _mm256_mul_ps(fscal,dz01);
398
399             /* Update vectorial force */
400             fix0             = _mm256_add_ps(fix0,tx);
401             fiy0             = _mm256_add_ps(fiy0,ty);
402             fiz0             = _mm256_add_ps(fiz0,tz);
403
404             fjx1             = _mm256_add_ps(fjx1,tx);
405             fjy1             = _mm256_add_ps(fjy1,ty);
406             fjz1             = _mm256_add_ps(fjz1,tz);
407
408             /**************************
409              * CALCULATE INTERACTIONS *
410              **************************/
411
412             r02              = _mm256_mul_ps(rsq02,rinv02);
413
414             /* EWALD ELECTROSTATICS */
415             
416             /* Analytical PME correction */
417             zeta2            = _mm256_mul_ps(beta2,rsq02);
418             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
419             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
420             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
421             felec            = _mm256_mul_ps(qq02,felec);
422             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
423             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
424             velec            = _mm256_sub_ps(rinv02,pmecorrV);
425             velec            = _mm256_mul_ps(qq02,velec);
426             
427             /* Update potential sum for this i atom from the interaction with this j atom. */
428             velecsum         = _mm256_add_ps(velecsum,velec);
429
430             fscal            = felec;
431
432             /* Calculate temporary vectorial force */
433             tx               = _mm256_mul_ps(fscal,dx02);
434             ty               = _mm256_mul_ps(fscal,dy02);
435             tz               = _mm256_mul_ps(fscal,dz02);
436
437             /* Update vectorial force */
438             fix0             = _mm256_add_ps(fix0,tx);
439             fiy0             = _mm256_add_ps(fiy0,ty);
440             fiz0             = _mm256_add_ps(fiz0,tz);
441
442             fjx2             = _mm256_add_ps(fjx2,tx);
443             fjy2             = _mm256_add_ps(fjy2,ty);
444             fjz2             = _mm256_add_ps(fjz2,tz);
445
446             /**************************
447              * CALCULATE INTERACTIONS *
448              **************************/
449
450             r10              = _mm256_mul_ps(rsq10,rinv10);
451
452             /* EWALD ELECTROSTATICS */
453             
454             /* Analytical PME correction */
455             zeta2            = _mm256_mul_ps(beta2,rsq10);
456             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
457             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
458             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
459             felec            = _mm256_mul_ps(qq10,felec);
460             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
461             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
462             velec            = _mm256_sub_ps(rinv10,pmecorrV);
463             velec            = _mm256_mul_ps(qq10,velec);
464             
465             /* Update potential sum for this i atom from the interaction with this j atom. */
466             velecsum         = _mm256_add_ps(velecsum,velec);
467
468             fscal            = felec;
469
470             /* Calculate temporary vectorial force */
471             tx               = _mm256_mul_ps(fscal,dx10);
472             ty               = _mm256_mul_ps(fscal,dy10);
473             tz               = _mm256_mul_ps(fscal,dz10);
474
475             /* Update vectorial force */
476             fix1             = _mm256_add_ps(fix1,tx);
477             fiy1             = _mm256_add_ps(fiy1,ty);
478             fiz1             = _mm256_add_ps(fiz1,tz);
479
480             fjx0             = _mm256_add_ps(fjx0,tx);
481             fjy0             = _mm256_add_ps(fjy0,ty);
482             fjz0             = _mm256_add_ps(fjz0,tz);
483
484             /**************************
485              * CALCULATE INTERACTIONS *
486              **************************/
487
488             r11              = _mm256_mul_ps(rsq11,rinv11);
489
490             /* EWALD ELECTROSTATICS */
491             
492             /* Analytical PME correction */
493             zeta2            = _mm256_mul_ps(beta2,rsq11);
494             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
495             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
496             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
497             felec            = _mm256_mul_ps(qq11,felec);
498             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
499             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
500             velec            = _mm256_sub_ps(rinv11,pmecorrV);
501             velec            = _mm256_mul_ps(qq11,velec);
502             
503             /* Update potential sum for this i atom from the interaction with this j atom. */
504             velecsum         = _mm256_add_ps(velecsum,velec);
505
506             fscal            = felec;
507
508             /* Calculate temporary vectorial force */
509             tx               = _mm256_mul_ps(fscal,dx11);
510             ty               = _mm256_mul_ps(fscal,dy11);
511             tz               = _mm256_mul_ps(fscal,dz11);
512
513             /* Update vectorial force */
514             fix1             = _mm256_add_ps(fix1,tx);
515             fiy1             = _mm256_add_ps(fiy1,ty);
516             fiz1             = _mm256_add_ps(fiz1,tz);
517
518             fjx1             = _mm256_add_ps(fjx1,tx);
519             fjy1             = _mm256_add_ps(fjy1,ty);
520             fjz1             = _mm256_add_ps(fjz1,tz);
521
522             /**************************
523              * CALCULATE INTERACTIONS *
524              **************************/
525
526             r12              = _mm256_mul_ps(rsq12,rinv12);
527
528             /* EWALD ELECTROSTATICS */
529             
530             /* Analytical PME correction */
531             zeta2            = _mm256_mul_ps(beta2,rsq12);
532             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
533             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
534             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
535             felec            = _mm256_mul_ps(qq12,felec);
536             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
537             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
538             velec            = _mm256_sub_ps(rinv12,pmecorrV);
539             velec            = _mm256_mul_ps(qq12,velec);
540             
541             /* Update potential sum for this i atom from the interaction with this j atom. */
542             velecsum         = _mm256_add_ps(velecsum,velec);
543
544             fscal            = felec;
545
546             /* Calculate temporary vectorial force */
547             tx               = _mm256_mul_ps(fscal,dx12);
548             ty               = _mm256_mul_ps(fscal,dy12);
549             tz               = _mm256_mul_ps(fscal,dz12);
550
551             /* Update vectorial force */
552             fix1             = _mm256_add_ps(fix1,tx);
553             fiy1             = _mm256_add_ps(fiy1,ty);
554             fiz1             = _mm256_add_ps(fiz1,tz);
555
556             fjx2             = _mm256_add_ps(fjx2,tx);
557             fjy2             = _mm256_add_ps(fjy2,ty);
558             fjz2             = _mm256_add_ps(fjz2,tz);
559
560             /**************************
561              * CALCULATE INTERACTIONS *
562              **************************/
563
564             r20              = _mm256_mul_ps(rsq20,rinv20);
565
566             /* EWALD ELECTROSTATICS */
567             
568             /* Analytical PME correction */
569             zeta2            = _mm256_mul_ps(beta2,rsq20);
570             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
571             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
572             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
573             felec            = _mm256_mul_ps(qq20,felec);
574             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
575             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
576             velec            = _mm256_sub_ps(rinv20,pmecorrV);
577             velec            = _mm256_mul_ps(qq20,velec);
578             
579             /* Update potential sum for this i atom from the interaction with this j atom. */
580             velecsum         = _mm256_add_ps(velecsum,velec);
581
582             fscal            = felec;
583
584             /* Calculate temporary vectorial force */
585             tx               = _mm256_mul_ps(fscal,dx20);
586             ty               = _mm256_mul_ps(fscal,dy20);
587             tz               = _mm256_mul_ps(fscal,dz20);
588
589             /* Update vectorial force */
590             fix2             = _mm256_add_ps(fix2,tx);
591             fiy2             = _mm256_add_ps(fiy2,ty);
592             fiz2             = _mm256_add_ps(fiz2,tz);
593
594             fjx0             = _mm256_add_ps(fjx0,tx);
595             fjy0             = _mm256_add_ps(fjy0,ty);
596             fjz0             = _mm256_add_ps(fjz0,tz);
597
598             /**************************
599              * CALCULATE INTERACTIONS *
600              **************************/
601
602             r21              = _mm256_mul_ps(rsq21,rinv21);
603
604             /* EWALD ELECTROSTATICS */
605             
606             /* Analytical PME correction */
607             zeta2            = _mm256_mul_ps(beta2,rsq21);
608             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
609             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
610             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
611             felec            = _mm256_mul_ps(qq21,felec);
612             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
613             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
614             velec            = _mm256_sub_ps(rinv21,pmecorrV);
615             velec            = _mm256_mul_ps(qq21,velec);
616             
617             /* Update potential sum for this i atom from the interaction with this j atom. */
618             velecsum         = _mm256_add_ps(velecsum,velec);
619
620             fscal            = felec;
621
622             /* Calculate temporary vectorial force */
623             tx               = _mm256_mul_ps(fscal,dx21);
624             ty               = _mm256_mul_ps(fscal,dy21);
625             tz               = _mm256_mul_ps(fscal,dz21);
626
627             /* Update vectorial force */
628             fix2             = _mm256_add_ps(fix2,tx);
629             fiy2             = _mm256_add_ps(fiy2,ty);
630             fiz2             = _mm256_add_ps(fiz2,tz);
631
632             fjx1             = _mm256_add_ps(fjx1,tx);
633             fjy1             = _mm256_add_ps(fjy1,ty);
634             fjz1             = _mm256_add_ps(fjz1,tz);
635
636             /**************************
637              * CALCULATE INTERACTIONS *
638              **************************/
639
640             r22              = _mm256_mul_ps(rsq22,rinv22);
641
642             /* EWALD ELECTROSTATICS */
643             
644             /* Analytical PME correction */
645             zeta2            = _mm256_mul_ps(beta2,rsq22);
646             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
647             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
648             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
649             felec            = _mm256_mul_ps(qq22,felec);
650             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
651             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
652             velec            = _mm256_sub_ps(rinv22,pmecorrV);
653             velec            = _mm256_mul_ps(qq22,velec);
654             
655             /* Update potential sum for this i atom from the interaction with this j atom. */
656             velecsum         = _mm256_add_ps(velecsum,velec);
657
658             fscal            = felec;
659
660             /* Calculate temporary vectorial force */
661             tx               = _mm256_mul_ps(fscal,dx22);
662             ty               = _mm256_mul_ps(fscal,dy22);
663             tz               = _mm256_mul_ps(fscal,dz22);
664
665             /* Update vectorial force */
666             fix2             = _mm256_add_ps(fix2,tx);
667             fiy2             = _mm256_add_ps(fiy2,ty);
668             fiz2             = _mm256_add_ps(fiz2,tz);
669
670             fjx2             = _mm256_add_ps(fjx2,tx);
671             fjy2             = _mm256_add_ps(fjy2,ty);
672             fjz2             = _mm256_add_ps(fjz2,tz);
673
674             fjptrA             = f+j_coord_offsetA;
675             fjptrB             = f+j_coord_offsetB;
676             fjptrC             = f+j_coord_offsetC;
677             fjptrD             = f+j_coord_offsetD;
678             fjptrE             = f+j_coord_offsetE;
679             fjptrF             = f+j_coord_offsetF;
680             fjptrG             = f+j_coord_offsetG;
681             fjptrH             = f+j_coord_offsetH;
682
683             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
684                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
685
686             /* Inner loop uses 768 flops */
687         }
688
689         if(jidx<j_index_end)
690         {
691
692             /* Get j neighbor index, and coordinate index */
693             jnrlistA         = jjnr[jidx];
694             jnrlistB         = jjnr[jidx+1];
695             jnrlistC         = jjnr[jidx+2];
696             jnrlistD         = jjnr[jidx+3];
697             jnrlistE         = jjnr[jidx+4];
698             jnrlistF         = jjnr[jidx+5];
699             jnrlistG         = jjnr[jidx+6];
700             jnrlistH         = jjnr[jidx+7];
701             /* Sign of each element will be negative for non-real atoms.
702              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
703              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
704              */
705             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
706                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
707                                             
708             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
709             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
710             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
711             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
712             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
713             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
714             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
715             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
716             j_coord_offsetA  = DIM*jnrA;
717             j_coord_offsetB  = DIM*jnrB;
718             j_coord_offsetC  = DIM*jnrC;
719             j_coord_offsetD  = DIM*jnrD;
720             j_coord_offsetE  = DIM*jnrE;
721             j_coord_offsetF  = DIM*jnrF;
722             j_coord_offsetG  = DIM*jnrG;
723             j_coord_offsetH  = DIM*jnrH;
724
725             /* load j atom coordinates */
726             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
727                                                  x+j_coord_offsetC,x+j_coord_offsetD,
728                                                  x+j_coord_offsetE,x+j_coord_offsetF,
729                                                  x+j_coord_offsetG,x+j_coord_offsetH,
730                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
731
732             /* Calculate displacement vector */
733             dx00             = _mm256_sub_ps(ix0,jx0);
734             dy00             = _mm256_sub_ps(iy0,jy0);
735             dz00             = _mm256_sub_ps(iz0,jz0);
736             dx01             = _mm256_sub_ps(ix0,jx1);
737             dy01             = _mm256_sub_ps(iy0,jy1);
738             dz01             = _mm256_sub_ps(iz0,jz1);
739             dx02             = _mm256_sub_ps(ix0,jx2);
740             dy02             = _mm256_sub_ps(iy0,jy2);
741             dz02             = _mm256_sub_ps(iz0,jz2);
742             dx10             = _mm256_sub_ps(ix1,jx0);
743             dy10             = _mm256_sub_ps(iy1,jy0);
744             dz10             = _mm256_sub_ps(iz1,jz0);
745             dx11             = _mm256_sub_ps(ix1,jx1);
746             dy11             = _mm256_sub_ps(iy1,jy1);
747             dz11             = _mm256_sub_ps(iz1,jz1);
748             dx12             = _mm256_sub_ps(ix1,jx2);
749             dy12             = _mm256_sub_ps(iy1,jy2);
750             dz12             = _mm256_sub_ps(iz1,jz2);
751             dx20             = _mm256_sub_ps(ix2,jx0);
752             dy20             = _mm256_sub_ps(iy2,jy0);
753             dz20             = _mm256_sub_ps(iz2,jz0);
754             dx21             = _mm256_sub_ps(ix2,jx1);
755             dy21             = _mm256_sub_ps(iy2,jy1);
756             dz21             = _mm256_sub_ps(iz2,jz1);
757             dx22             = _mm256_sub_ps(ix2,jx2);
758             dy22             = _mm256_sub_ps(iy2,jy2);
759             dz22             = _mm256_sub_ps(iz2,jz2);
760
761             /* Calculate squared distance and things based on it */
762             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
763             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
764             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
765             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
766             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
767             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
768             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
769             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
770             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
771
772             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
773             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
774             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
775             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
776             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
777             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
778             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
779             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
780             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
781
782             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
783             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
784             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
785             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
786             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
787             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
788             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
789             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
790             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
791
792             fjx0             = _mm256_setzero_ps();
793             fjy0             = _mm256_setzero_ps();
794             fjz0             = _mm256_setzero_ps();
795             fjx1             = _mm256_setzero_ps();
796             fjy1             = _mm256_setzero_ps();
797             fjz1             = _mm256_setzero_ps();
798             fjx2             = _mm256_setzero_ps();
799             fjy2             = _mm256_setzero_ps();
800             fjz2             = _mm256_setzero_ps();
801
802             /**************************
803              * CALCULATE INTERACTIONS *
804              **************************/
805
806             r00              = _mm256_mul_ps(rsq00,rinv00);
807             r00              = _mm256_andnot_ps(dummy_mask,r00);
808
809             /* EWALD ELECTROSTATICS */
810             
811             /* Analytical PME correction */
812             zeta2            = _mm256_mul_ps(beta2,rsq00);
813             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
814             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
815             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
816             felec            = _mm256_mul_ps(qq00,felec);
817             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
818             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
819             velec            = _mm256_sub_ps(rinv00,pmecorrV);
820             velec            = _mm256_mul_ps(qq00,velec);
821             
822             /* LENNARD-JONES DISPERSION/REPULSION */
823
824             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
825             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
826             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
827             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
828             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
829
830             /* Update potential sum for this i atom from the interaction with this j atom. */
831             velec            = _mm256_andnot_ps(dummy_mask,velec);
832             velecsum         = _mm256_add_ps(velecsum,velec);
833             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
834             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
835
836             fscal            = _mm256_add_ps(felec,fvdw);
837
838             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
839
840             /* Calculate temporary vectorial force */
841             tx               = _mm256_mul_ps(fscal,dx00);
842             ty               = _mm256_mul_ps(fscal,dy00);
843             tz               = _mm256_mul_ps(fscal,dz00);
844
845             /* Update vectorial force */
846             fix0             = _mm256_add_ps(fix0,tx);
847             fiy0             = _mm256_add_ps(fiy0,ty);
848             fiz0             = _mm256_add_ps(fiz0,tz);
849
850             fjx0             = _mm256_add_ps(fjx0,tx);
851             fjy0             = _mm256_add_ps(fjy0,ty);
852             fjz0             = _mm256_add_ps(fjz0,tz);
853
854             /**************************
855              * CALCULATE INTERACTIONS *
856              **************************/
857
858             r01              = _mm256_mul_ps(rsq01,rinv01);
859             r01              = _mm256_andnot_ps(dummy_mask,r01);
860
861             /* EWALD ELECTROSTATICS */
862             
863             /* Analytical PME correction */
864             zeta2            = _mm256_mul_ps(beta2,rsq01);
865             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
866             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
867             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
868             felec            = _mm256_mul_ps(qq01,felec);
869             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
870             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
871             velec            = _mm256_sub_ps(rinv01,pmecorrV);
872             velec            = _mm256_mul_ps(qq01,velec);
873             
874             /* Update potential sum for this i atom from the interaction with this j atom. */
875             velec            = _mm256_andnot_ps(dummy_mask,velec);
876             velecsum         = _mm256_add_ps(velecsum,velec);
877
878             fscal            = felec;
879
880             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
881
882             /* Calculate temporary vectorial force */
883             tx               = _mm256_mul_ps(fscal,dx01);
884             ty               = _mm256_mul_ps(fscal,dy01);
885             tz               = _mm256_mul_ps(fscal,dz01);
886
887             /* Update vectorial force */
888             fix0             = _mm256_add_ps(fix0,tx);
889             fiy0             = _mm256_add_ps(fiy0,ty);
890             fiz0             = _mm256_add_ps(fiz0,tz);
891
892             fjx1             = _mm256_add_ps(fjx1,tx);
893             fjy1             = _mm256_add_ps(fjy1,ty);
894             fjz1             = _mm256_add_ps(fjz1,tz);
895
896             /**************************
897              * CALCULATE INTERACTIONS *
898              **************************/
899
900             r02              = _mm256_mul_ps(rsq02,rinv02);
901             r02              = _mm256_andnot_ps(dummy_mask,r02);
902
903             /* EWALD ELECTROSTATICS */
904             
905             /* Analytical PME correction */
906             zeta2            = _mm256_mul_ps(beta2,rsq02);
907             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
908             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
909             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
910             felec            = _mm256_mul_ps(qq02,felec);
911             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
912             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
913             velec            = _mm256_sub_ps(rinv02,pmecorrV);
914             velec            = _mm256_mul_ps(qq02,velec);
915             
916             /* Update potential sum for this i atom from the interaction with this j atom. */
917             velec            = _mm256_andnot_ps(dummy_mask,velec);
918             velecsum         = _mm256_add_ps(velecsum,velec);
919
920             fscal            = felec;
921
922             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
923
924             /* Calculate temporary vectorial force */
925             tx               = _mm256_mul_ps(fscal,dx02);
926             ty               = _mm256_mul_ps(fscal,dy02);
927             tz               = _mm256_mul_ps(fscal,dz02);
928
929             /* Update vectorial force */
930             fix0             = _mm256_add_ps(fix0,tx);
931             fiy0             = _mm256_add_ps(fiy0,ty);
932             fiz0             = _mm256_add_ps(fiz0,tz);
933
934             fjx2             = _mm256_add_ps(fjx2,tx);
935             fjy2             = _mm256_add_ps(fjy2,ty);
936             fjz2             = _mm256_add_ps(fjz2,tz);
937
938             /**************************
939              * CALCULATE INTERACTIONS *
940              **************************/
941
942             r10              = _mm256_mul_ps(rsq10,rinv10);
943             r10              = _mm256_andnot_ps(dummy_mask,r10);
944
945             /* EWALD ELECTROSTATICS */
946             
947             /* Analytical PME correction */
948             zeta2            = _mm256_mul_ps(beta2,rsq10);
949             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
950             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
951             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
952             felec            = _mm256_mul_ps(qq10,felec);
953             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
954             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
955             velec            = _mm256_sub_ps(rinv10,pmecorrV);
956             velec            = _mm256_mul_ps(qq10,velec);
957             
958             /* Update potential sum for this i atom from the interaction with this j atom. */
959             velec            = _mm256_andnot_ps(dummy_mask,velec);
960             velecsum         = _mm256_add_ps(velecsum,velec);
961
962             fscal            = felec;
963
964             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
965
966             /* Calculate temporary vectorial force */
967             tx               = _mm256_mul_ps(fscal,dx10);
968             ty               = _mm256_mul_ps(fscal,dy10);
969             tz               = _mm256_mul_ps(fscal,dz10);
970
971             /* Update vectorial force */
972             fix1             = _mm256_add_ps(fix1,tx);
973             fiy1             = _mm256_add_ps(fiy1,ty);
974             fiz1             = _mm256_add_ps(fiz1,tz);
975
976             fjx0             = _mm256_add_ps(fjx0,tx);
977             fjy0             = _mm256_add_ps(fjy0,ty);
978             fjz0             = _mm256_add_ps(fjz0,tz);
979
980             /**************************
981              * CALCULATE INTERACTIONS *
982              **************************/
983
984             r11              = _mm256_mul_ps(rsq11,rinv11);
985             r11              = _mm256_andnot_ps(dummy_mask,r11);
986
987             /* EWALD ELECTROSTATICS */
988             
989             /* Analytical PME correction */
990             zeta2            = _mm256_mul_ps(beta2,rsq11);
991             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
992             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
993             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
994             felec            = _mm256_mul_ps(qq11,felec);
995             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
996             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
997             velec            = _mm256_sub_ps(rinv11,pmecorrV);
998             velec            = _mm256_mul_ps(qq11,velec);
999             
1000             /* Update potential sum for this i atom from the interaction with this j atom. */
1001             velec            = _mm256_andnot_ps(dummy_mask,velec);
1002             velecsum         = _mm256_add_ps(velecsum,velec);
1003
1004             fscal            = felec;
1005
1006             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1007
1008             /* Calculate temporary vectorial force */
1009             tx               = _mm256_mul_ps(fscal,dx11);
1010             ty               = _mm256_mul_ps(fscal,dy11);
1011             tz               = _mm256_mul_ps(fscal,dz11);
1012
1013             /* Update vectorial force */
1014             fix1             = _mm256_add_ps(fix1,tx);
1015             fiy1             = _mm256_add_ps(fiy1,ty);
1016             fiz1             = _mm256_add_ps(fiz1,tz);
1017
1018             fjx1             = _mm256_add_ps(fjx1,tx);
1019             fjy1             = _mm256_add_ps(fjy1,ty);
1020             fjz1             = _mm256_add_ps(fjz1,tz);
1021
1022             /**************************
1023              * CALCULATE INTERACTIONS *
1024              **************************/
1025
1026             r12              = _mm256_mul_ps(rsq12,rinv12);
1027             r12              = _mm256_andnot_ps(dummy_mask,r12);
1028
1029             /* EWALD ELECTROSTATICS */
1030             
1031             /* Analytical PME correction */
1032             zeta2            = _mm256_mul_ps(beta2,rsq12);
1033             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1034             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1035             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1036             felec            = _mm256_mul_ps(qq12,felec);
1037             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1038             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1039             velec            = _mm256_sub_ps(rinv12,pmecorrV);
1040             velec            = _mm256_mul_ps(qq12,velec);
1041             
1042             /* Update potential sum for this i atom from the interaction with this j atom. */
1043             velec            = _mm256_andnot_ps(dummy_mask,velec);
1044             velecsum         = _mm256_add_ps(velecsum,velec);
1045
1046             fscal            = felec;
1047
1048             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1049
1050             /* Calculate temporary vectorial force */
1051             tx               = _mm256_mul_ps(fscal,dx12);
1052             ty               = _mm256_mul_ps(fscal,dy12);
1053             tz               = _mm256_mul_ps(fscal,dz12);
1054
1055             /* Update vectorial force */
1056             fix1             = _mm256_add_ps(fix1,tx);
1057             fiy1             = _mm256_add_ps(fiy1,ty);
1058             fiz1             = _mm256_add_ps(fiz1,tz);
1059
1060             fjx2             = _mm256_add_ps(fjx2,tx);
1061             fjy2             = _mm256_add_ps(fjy2,ty);
1062             fjz2             = _mm256_add_ps(fjz2,tz);
1063
1064             /**************************
1065              * CALCULATE INTERACTIONS *
1066              **************************/
1067
1068             r20              = _mm256_mul_ps(rsq20,rinv20);
1069             r20              = _mm256_andnot_ps(dummy_mask,r20);
1070
1071             /* EWALD ELECTROSTATICS */
1072             
1073             /* Analytical PME correction */
1074             zeta2            = _mm256_mul_ps(beta2,rsq20);
1075             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1076             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1077             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1078             felec            = _mm256_mul_ps(qq20,felec);
1079             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1080             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1081             velec            = _mm256_sub_ps(rinv20,pmecorrV);
1082             velec            = _mm256_mul_ps(qq20,velec);
1083             
1084             /* Update potential sum for this i atom from the interaction with this j atom. */
1085             velec            = _mm256_andnot_ps(dummy_mask,velec);
1086             velecsum         = _mm256_add_ps(velecsum,velec);
1087
1088             fscal            = felec;
1089
1090             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1091
1092             /* Calculate temporary vectorial force */
1093             tx               = _mm256_mul_ps(fscal,dx20);
1094             ty               = _mm256_mul_ps(fscal,dy20);
1095             tz               = _mm256_mul_ps(fscal,dz20);
1096
1097             /* Update vectorial force */
1098             fix2             = _mm256_add_ps(fix2,tx);
1099             fiy2             = _mm256_add_ps(fiy2,ty);
1100             fiz2             = _mm256_add_ps(fiz2,tz);
1101
1102             fjx0             = _mm256_add_ps(fjx0,tx);
1103             fjy0             = _mm256_add_ps(fjy0,ty);
1104             fjz0             = _mm256_add_ps(fjz0,tz);
1105
1106             /**************************
1107              * CALCULATE INTERACTIONS *
1108              **************************/
1109
1110             r21              = _mm256_mul_ps(rsq21,rinv21);
1111             r21              = _mm256_andnot_ps(dummy_mask,r21);
1112
1113             /* EWALD ELECTROSTATICS */
1114             
1115             /* Analytical PME correction */
1116             zeta2            = _mm256_mul_ps(beta2,rsq21);
1117             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1118             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1119             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1120             felec            = _mm256_mul_ps(qq21,felec);
1121             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1122             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1123             velec            = _mm256_sub_ps(rinv21,pmecorrV);
1124             velec            = _mm256_mul_ps(qq21,velec);
1125             
1126             /* Update potential sum for this i atom from the interaction with this j atom. */
1127             velec            = _mm256_andnot_ps(dummy_mask,velec);
1128             velecsum         = _mm256_add_ps(velecsum,velec);
1129
1130             fscal            = felec;
1131
1132             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1133
1134             /* Calculate temporary vectorial force */
1135             tx               = _mm256_mul_ps(fscal,dx21);
1136             ty               = _mm256_mul_ps(fscal,dy21);
1137             tz               = _mm256_mul_ps(fscal,dz21);
1138
1139             /* Update vectorial force */
1140             fix2             = _mm256_add_ps(fix2,tx);
1141             fiy2             = _mm256_add_ps(fiy2,ty);
1142             fiz2             = _mm256_add_ps(fiz2,tz);
1143
1144             fjx1             = _mm256_add_ps(fjx1,tx);
1145             fjy1             = _mm256_add_ps(fjy1,ty);
1146             fjz1             = _mm256_add_ps(fjz1,tz);
1147
1148             /**************************
1149              * CALCULATE INTERACTIONS *
1150              **************************/
1151
1152             r22              = _mm256_mul_ps(rsq22,rinv22);
1153             r22              = _mm256_andnot_ps(dummy_mask,r22);
1154
1155             /* EWALD ELECTROSTATICS */
1156             
1157             /* Analytical PME correction */
1158             zeta2            = _mm256_mul_ps(beta2,rsq22);
1159             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1160             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1161             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1162             felec            = _mm256_mul_ps(qq22,felec);
1163             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1164             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1165             velec            = _mm256_sub_ps(rinv22,pmecorrV);
1166             velec            = _mm256_mul_ps(qq22,velec);
1167             
1168             /* Update potential sum for this i atom from the interaction with this j atom. */
1169             velec            = _mm256_andnot_ps(dummy_mask,velec);
1170             velecsum         = _mm256_add_ps(velecsum,velec);
1171
1172             fscal            = felec;
1173
1174             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1175
1176             /* Calculate temporary vectorial force */
1177             tx               = _mm256_mul_ps(fscal,dx22);
1178             ty               = _mm256_mul_ps(fscal,dy22);
1179             tz               = _mm256_mul_ps(fscal,dz22);
1180
1181             /* Update vectorial force */
1182             fix2             = _mm256_add_ps(fix2,tx);
1183             fiy2             = _mm256_add_ps(fiy2,ty);
1184             fiz2             = _mm256_add_ps(fiz2,tz);
1185
1186             fjx2             = _mm256_add_ps(fjx2,tx);
1187             fjy2             = _mm256_add_ps(fjy2,ty);
1188             fjz2             = _mm256_add_ps(fjz2,tz);
1189
1190             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1191             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1192             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1193             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1194             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1195             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1196             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1197             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1198
1199             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1200                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1201
1202             /* Inner loop uses 777 flops */
1203         }
1204
1205         /* End of innermost loop */
1206
1207         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1208                                                  f+i_coord_offset,fshift+i_shift_offset);
1209
1210         ggid                        = gid[iidx];
1211         /* Update potential energies */
1212         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
1213         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
1214
1215         /* Increment number of inner iterations */
1216         inneriter                  += j_index_end - j_index_start;
1217
1218         /* Outer loop uses 20 flops */
1219     }
1220
1221     /* Increment number of outer iterations */
1222     outeriter        += nri;
1223
1224     /* Update outer/inner flops */
1225
1226     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_VF,outeriter*20 + inneriter*777);
1227 }
1228 /*
1229  * Gromacs nonbonded kernel:   nb_kernel_ElecEw_VdwLJ_GeomW3W3_F_avx_256_single
1230  * Electrostatics interaction: Ewald
1231  * VdW interaction:            LennardJones
1232  * Geometry:                   Water3-Water3
1233  * Calculate force/pot:        Force
1234  */
1235 void
1236 nb_kernel_ElecEw_VdwLJ_GeomW3W3_F_avx_256_single
1237                     (t_nblist                    * gmx_restrict       nlist,
1238                      rvec                        * gmx_restrict          xx,
1239                      rvec                        * gmx_restrict          ff,
1240                      t_forcerec                  * gmx_restrict          fr,
1241                      t_mdatoms                   * gmx_restrict     mdatoms,
1242                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
1243                      t_nrnb                      * gmx_restrict        nrnb)
1244 {
1245     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
1246      * just 0 for non-waters.
1247      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
1248      * jnr indices corresponding to data put in the four positions in the SIMD register.
1249      */
1250     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
1251     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
1252     int              jnrA,jnrB,jnrC,jnrD;
1253     int              jnrE,jnrF,jnrG,jnrH;
1254     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
1255     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
1256     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
1257     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
1258     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
1259     real             rcutoff_scalar;
1260     real             *shiftvec,*fshift,*x,*f;
1261     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
1262     real             scratch[4*DIM];
1263     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
1264     real *           vdwioffsetptr0;
1265     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
1266     real *           vdwioffsetptr1;
1267     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
1268     real *           vdwioffsetptr2;
1269     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
1270     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
1271     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
1272     int              vdwjidx1A,vdwjidx1B,vdwjidx1C,vdwjidx1D,vdwjidx1E,vdwjidx1F,vdwjidx1G,vdwjidx1H;
1273     __m256           jx1,jy1,jz1,fjx1,fjy1,fjz1,jq1,isaj1;
1274     int              vdwjidx2A,vdwjidx2B,vdwjidx2C,vdwjidx2D,vdwjidx2E,vdwjidx2F,vdwjidx2G,vdwjidx2H;
1275     __m256           jx2,jy2,jz2,fjx2,fjy2,fjz2,jq2,isaj2;
1276     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
1277     __m256           dx01,dy01,dz01,rsq01,rinv01,rinvsq01,r01,qq01,c6_01,c12_01;
1278     __m256           dx02,dy02,dz02,rsq02,rinv02,rinvsq02,r02,qq02,c6_02,c12_02;
1279     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
1280     __m256           dx11,dy11,dz11,rsq11,rinv11,rinvsq11,r11,qq11,c6_11,c12_11;
1281     __m256           dx12,dy12,dz12,rsq12,rinv12,rinvsq12,r12,qq12,c6_12,c12_12;
1282     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
1283     __m256           dx21,dy21,dz21,rsq21,rinv21,rinvsq21,r21,qq21,c6_21,c12_21;
1284     __m256           dx22,dy22,dz22,rsq22,rinv22,rinvsq22,r22,qq22,c6_22,c12_22;
1285     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
1286     real             *charge;
1287     int              nvdwtype;
1288     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
1289     int              *vdwtype;
1290     real             *vdwparam;
1291     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
1292     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
1293     __m256i          ewitab;
1294     __m128i          ewitab_lo,ewitab_hi;
1295     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
1296     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
1297     real             *ewtab;
1298     __m256           dummy_mask,cutoff_mask;
1299     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
1300     __m256           one     = _mm256_set1_ps(1.0);
1301     __m256           two     = _mm256_set1_ps(2.0);
1302     x                = xx[0];
1303     f                = ff[0];
1304
1305     nri              = nlist->nri;
1306     iinr             = nlist->iinr;
1307     jindex           = nlist->jindex;
1308     jjnr             = nlist->jjnr;
1309     shiftidx         = nlist->shift;
1310     gid              = nlist->gid;
1311     shiftvec         = fr->shift_vec[0];
1312     fshift           = fr->fshift[0];
1313     facel            = _mm256_set1_ps(fr->epsfac);
1314     charge           = mdatoms->chargeA;
1315     nvdwtype         = fr->ntype;
1316     vdwparam         = fr->nbfp;
1317     vdwtype          = mdatoms->typeA;
1318
1319     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
1320     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1321     beta2            = _mm256_mul_ps(beta,beta);
1322     beta3            = _mm256_mul_ps(beta,beta2);
1323
1324     ewtab            = fr->ic->tabq_coul_F;
1325     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
1326     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1327
1328     /* Setup water-specific parameters */
1329     inr              = nlist->iinr[0];
1330     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
1331     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1332     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1333     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1334
1335     jq0              = _mm256_set1_ps(charge[inr+0]);
1336     jq1              = _mm256_set1_ps(charge[inr+1]);
1337     jq2              = _mm256_set1_ps(charge[inr+2]);
1338     vdwjidx0A        = 2*vdwtype[inr+0];
1339     qq00             = _mm256_mul_ps(iq0,jq0);
1340     c6_00            = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A]);
1341     c12_00           = _mm256_set1_ps(vdwioffsetptr0[vdwjidx0A+1]);
1342     qq01             = _mm256_mul_ps(iq0,jq1);
1343     qq02             = _mm256_mul_ps(iq0,jq2);
1344     qq10             = _mm256_mul_ps(iq1,jq0);
1345     qq11             = _mm256_mul_ps(iq1,jq1);
1346     qq12             = _mm256_mul_ps(iq1,jq2);
1347     qq20             = _mm256_mul_ps(iq2,jq0);
1348     qq21             = _mm256_mul_ps(iq2,jq1);
1349     qq22             = _mm256_mul_ps(iq2,jq2);
1350
1351     /* Avoid stupid compiler warnings */
1352     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1353     j_coord_offsetA = 0;
1354     j_coord_offsetB = 0;
1355     j_coord_offsetC = 0;
1356     j_coord_offsetD = 0;
1357     j_coord_offsetE = 0;
1358     j_coord_offsetF = 0;
1359     j_coord_offsetG = 0;
1360     j_coord_offsetH = 0;
1361
1362     outeriter        = 0;
1363     inneriter        = 0;
1364
1365     for(iidx=0;iidx<4*DIM;iidx++)
1366     {
1367         scratch[iidx] = 0.0;
1368     }
1369
1370     /* Start outer loop over neighborlists */
1371     for(iidx=0; iidx<nri; iidx++)
1372     {
1373         /* Load shift vector for this list */
1374         i_shift_offset   = DIM*shiftidx[iidx];
1375
1376         /* Load limits for loop over neighbors */
1377         j_index_start    = jindex[iidx];
1378         j_index_end      = jindex[iidx+1];
1379
1380         /* Get outer coordinate index */
1381         inr              = iinr[iidx];
1382         i_coord_offset   = DIM*inr;
1383
1384         /* Load i particle coords and add shift vector */
1385         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1386                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
1387
1388         fix0             = _mm256_setzero_ps();
1389         fiy0             = _mm256_setzero_ps();
1390         fiz0             = _mm256_setzero_ps();
1391         fix1             = _mm256_setzero_ps();
1392         fiy1             = _mm256_setzero_ps();
1393         fiz1             = _mm256_setzero_ps();
1394         fix2             = _mm256_setzero_ps();
1395         fiy2             = _mm256_setzero_ps();
1396         fiz2             = _mm256_setzero_ps();
1397
1398         /* Start inner kernel loop */
1399         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1400         {
1401
1402             /* Get j neighbor index, and coordinate index */
1403             jnrA             = jjnr[jidx];
1404             jnrB             = jjnr[jidx+1];
1405             jnrC             = jjnr[jidx+2];
1406             jnrD             = jjnr[jidx+3];
1407             jnrE             = jjnr[jidx+4];
1408             jnrF             = jjnr[jidx+5];
1409             jnrG             = jjnr[jidx+6];
1410             jnrH             = jjnr[jidx+7];
1411             j_coord_offsetA  = DIM*jnrA;
1412             j_coord_offsetB  = DIM*jnrB;
1413             j_coord_offsetC  = DIM*jnrC;
1414             j_coord_offsetD  = DIM*jnrD;
1415             j_coord_offsetE  = DIM*jnrE;
1416             j_coord_offsetF  = DIM*jnrF;
1417             j_coord_offsetG  = DIM*jnrG;
1418             j_coord_offsetH  = DIM*jnrH;
1419
1420             /* load j atom coordinates */
1421             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1422                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1423                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1424                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1425                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1426
1427             /* Calculate displacement vector */
1428             dx00             = _mm256_sub_ps(ix0,jx0);
1429             dy00             = _mm256_sub_ps(iy0,jy0);
1430             dz00             = _mm256_sub_ps(iz0,jz0);
1431             dx01             = _mm256_sub_ps(ix0,jx1);
1432             dy01             = _mm256_sub_ps(iy0,jy1);
1433             dz01             = _mm256_sub_ps(iz0,jz1);
1434             dx02             = _mm256_sub_ps(ix0,jx2);
1435             dy02             = _mm256_sub_ps(iy0,jy2);
1436             dz02             = _mm256_sub_ps(iz0,jz2);
1437             dx10             = _mm256_sub_ps(ix1,jx0);
1438             dy10             = _mm256_sub_ps(iy1,jy0);
1439             dz10             = _mm256_sub_ps(iz1,jz0);
1440             dx11             = _mm256_sub_ps(ix1,jx1);
1441             dy11             = _mm256_sub_ps(iy1,jy1);
1442             dz11             = _mm256_sub_ps(iz1,jz1);
1443             dx12             = _mm256_sub_ps(ix1,jx2);
1444             dy12             = _mm256_sub_ps(iy1,jy2);
1445             dz12             = _mm256_sub_ps(iz1,jz2);
1446             dx20             = _mm256_sub_ps(ix2,jx0);
1447             dy20             = _mm256_sub_ps(iy2,jy0);
1448             dz20             = _mm256_sub_ps(iz2,jz0);
1449             dx21             = _mm256_sub_ps(ix2,jx1);
1450             dy21             = _mm256_sub_ps(iy2,jy1);
1451             dz21             = _mm256_sub_ps(iz2,jz1);
1452             dx22             = _mm256_sub_ps(ix2,jx2);
1453             dy22             = _mm256_sub_ps(iy2,jy2);
1454             dz22             = _mm256_sub_ps(iz2,jz2);
1455
1456             /* Calculate squared distance and things based on it */
1457             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1458             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1459             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1460             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1461             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1462             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1463             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1464             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1465             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1466
1467             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1468             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
1469             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
1470             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1471             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
1472             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
1473             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1474             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
1475             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
1476
1477             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1478             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
1479             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
1480             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1481             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1482             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1483             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1484             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1485             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1486
1487             fjx0             = _mm256_setzero_ps();
1488             fjy0             = _mm256_setzero_ps();
1489             fjz0             = _mm256_setzero_ps();
1490             fjx1             = _mm256_setzero_ps();
1491             fjy1             = _mm256_setzero_ps();
1492             fjz1             = _mm256_setzero_ps();
1493             fjx2             = _mm256_setzero_ps();
1494             fjy2             = _mm256_setzero_ps();
1495             fjz2             = _mm256_setzero_ps();
1496
1497             /**************************
1498              * CALCULATE INTERACTIONS *
1499              **************************/
1500
1501             r00              = _mm256_mul_ps(rsq00,rinv00);
1502
1503             /* EWALD ELECTROSTATICS */
1504             
1505             /* Analytical PME correction */
1506             zeta2            = _mm256_mul_ps(beta2,rsq00);
1507             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1508             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1509             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1510             felec            = _mm256_mul_ps(qq00,felec);
1511             
1512             /* LENNARD-JONES DISPERSION/REPULSION */
1513
1514             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1515             fvdw             = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
1516
1517             fscal            = _mm256_add_ps(felec,fvdw);
1518
1519             /* Calculate temporary vectorial force */
1520             tx               = _mm256_mul_ps(fscal,dx00);
1521             ty               = _mm256_mul_ps(fscal,dy00);
1522             tz               = _mm256_mul_ps(fscal,dz00);
1523
1524             /* Update vectorial force */
1525             fix0             = _mm256_add_ps(fix0,tx);
1526             fiy0             = _mm256_add_ps(fiy0,ty);
1527             fiz0             = _mm256_add_ps(fiz0,tz);
1528
1529             fjx0             = _mm256_add_ps(fjx0,tx);
1530             fjy0             = _mm256_add_ps(fjy0,ty);
1531             fjz0             = _mm256_add_ps(fjz0,tz);
1532
1533             /**************************
1534              * CALCULATE INTERACTIONS *
1535              **************************/
1536
1537             r01              = _mm256_mul_ps(rsq01,rinv01);
1538
1539             /* EWALD ELECTROSTATICS */
1540             
1541             /* Analytical PME correction */
1542             zeta2            = _mm256_mul_ps(beta2,rsq01);
1543             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
1544             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1545             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1546             felec            = _mm256_mul_ps(qq01,felec);
1547             
1548             fscal            = felec;
1549
1550             /* Calculate temporary vectorial force */
1551             tx               = _mm256_mul_ps(fscal,dx01);
1552             ty               = _mm256_mul_ps(fscal,dy01);
1553             tz               = _mm256_mul_ps(fscal,dz01);
1554
1555             /* Update vectorial force */
1556             fix0             = _mm256_add_ps(fix0,tx);
1557             fiy0             = _mm256_add_ps(fiy0,ty);
1558             fiz0             = _mm256_add_ps(fiz0,tz);
1559
1560             fjx1             = _mm256_add_ps(fjx1,tx);
1561             fjy1             = _mm256_add_ps(fjy1,ty);
1562             fjz1             = _mm256_add_ps(fjz1,tz);
1563
1564             /**************************
1565              * CALCULATE INTERACTIONS *
1566              **************************/
1567
1568             r02              = _mm256_mul_ps(rsq02,rinv02);
1569
1570             /* EWALD ELECTROSTATICS */
1571             
1572             /* Analytical PME correction */
1573             zeta2            = _mm256_mul_ps(beta2,rsq02);
1574             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
1575             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1576             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1577             felec            = _mm256_mul_ps(qq02,felec);
1578             
1579             fscal            = felec;
1580
1581             /* Calculate temporary vectorial force */
1582             tx               = _mm256_mul_ps(fscal,dx02);
1583             ty               = _mm256_mul_ps(fscal,dy02);
1584             tz               = _mm256_mul_ps(fscal,dz02);
1585
1586             /* Update vectorial force */
1587             fix0             = _mm256_add_ps(fix0,tx);
1588             fiy0             = _mm256_add_ps(fiy0,ty);
1589             fiz0             = _mm256_add_ps(fiz0,tz);
1590
1591             fjx2             = _mm256_add_ps(fjx2,tx);
1592             fjy2             = _mm256_add_ps(fjy2,ty);
1593             fjz2             = _mm256_add_ps(fjz2,tz);
1594
1595             /**************************
1596              * CALCULATE INTERACTIONS *
1597              **************************/
1598
1599             r10              = _mm256_mul_ps(rsq10,rinv10);
1600
1601             /* EWALD ELECTROSTATICS */
1602             
1603             /* Analytical PME correction */
1604             zeta2            = _mm256_mul_ps(beta2,rsq10);
1605             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1606             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1607             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1608             felec            = _mm256_mul_ps(qq10,felec);
1609             
1610             fscal            = felec;
1611
1612             /* Calculate temporary vectorial force */
1613             tx               = _mm256_mul_ps(fscal,dx10);
1614             ty               = _mm256_mul_ps(fscal,dy10);
1615             tz               = _mm256_mul_ps(fscal,dz10);
1616
1617             /* Update vectorial force */
1618             fix1             = _mm256_add_ps(fix1,tx);
1619             fiy1             = _mm256_add_ps(fiy1,ty);
1620             fiz1             = _mm256_add_ps(fiz1,tz);
1621
1622             fjx0             = _mm256_add_ps(fjx0,tx);
1623             fjy0             = _mm256_add_ps(fjy0,ty);
1624             fjz0             = _mm256_add_ps(fjz0,tz);
1625
1626             /**************************
1627              * CALCULATE INTERACTIONS *
1628              **************************/
1629
1630             r11              = _mm256_mul_ps(rsq11,rinv11);
1631
1632             /* EWALD ELECTROSTATICS */
1633             
1634             /* Analytical PME correction */
1635             zeta2            = _mm256_mul_ps(beta2,rsq11);
1636             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
1637             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1638             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1639             felec            = _mm256_mul_ps(qq11,felec);
1640             
1641             fscal            = felec;
1642
1643             /* Calculate temporary vectorial force */
1644             tx               = _mm256_mul_ps(fscal,dx11);
1645             ty               = _mm256_mul_ps(fscal,dy11);
1646             tz               = _mm256_mul_ps(fscal,dz11);
1647
1648             /* Update vectorial force */
1649             fix1             = _mm256_add_ps(fix1,tx);
1650             fiy1             = _mm256_add_ps(fiy1,ty);
1651             fiz1             = _mm256_add_ps(fiz1,tz);
1652
1653             fjx1             = _mm256_add_ps(fjx1,tx);
1654             fjy1             = _mm256_add_ps(fjy1,ty);
1655             fjz1             = _mm256_add_ps(fjz1,tz);
1656
1657             /**************************
1658              * CALCULATE INTERACTIONS *
1659              **************************/
1660
1661             r12              = _mm256_mul_ps(rsq12,rinv12);
1662
1663             /* EWALD ELECTROSTATICS */
1664             
1665             /* Analytical PME correction */
1666             zeta2            = _mm256_mul_ps(beta2,rsq12);
1667             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
1668             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1669             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1670             felec            = _mm256_mul_ps(qq12,felec);
1671             
1672             fscal            = felec;
1673
1674             /* Calculate temporary vectorial force */
1675             tx               = _mm256_mul_ps(fscal,dx12);
1676             ty               = _mm256_mul_ps(fscal,dy12);
1677             tz               = _mm256_mul_ps(fscal,dz12);
1678
1679             /* Update vectorial force */
1680             fix1             = _mm256_add_ps(fix1,tx);
1681             fiy1             = _mm256_add_ps(fiy1,ty);
1682             fiz1             = _mm256_add_ps(fiz1,tz);
1683
1684             fjx2             = _mm256_add_ps(fjx2,tx);
1685             fjy2             = _mm256_add_ps(fjy2,ty);
1686             fjz2             = _mm256_add_ps(fjz2,tz);
1687
1688             /**************************
1689              * CALCULATE INTERACTIONS *
1690              **************************/
1691
1692             r20              = _mm256_mul_ps(rsq20,rinv20);
1693
1694             /* EWALD ELECTROSTATICS */
1695             
1696             /* Analytical PME correction */
1697             zeta2            = _mm256_mul_ps(beta2,rsq20);
1698             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1699             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1700             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1701             felec            = _mm256_mul_ps(qq20,felec);
1702             
1703             fscal            = felec;
1704
1705             /* Calculate temporary vectorial force */
1706             tx               = _mm256_mul_ps(fscal,dx20);
1707             ty               = _mm256_mul_ps(fscal,dy20);
1708             tz               = _mm256_mul_ps(fscal,dz20);
1709
1710             /* Update vectorial force */
1711             fix2             = _mm256_add_ps(fix2,tx);
1712             fiy2             = _mm256_add_ps(fiy2,ty);
1713             fiz2             = _mm256_add_ps(fiz2,tz);
1714
1715             fjx0             = _mm256_add_ps(fjx0,tx);
1716             fjy0             = _mm256_add_ps(fjy0,ty);
1717             fjz0             = _mm256_add_ps(fjz0,tz);
1718
1719             /**************************
1720              * CALCULATE INTERACTIONS *
1721              **************************/
1722
1723             r21              = _mm256_mul_ps(rsq21,rinv21);
1724
1725             /* EWALD ELECTROSTATICS */
1726             
1727             /* Analytical PME correction */
1728             zeta2            = _mm256_mul_ps(beta2,rsq21);
1729             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
1730             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1731             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1732             felec            = _mm256_mul_ps(qq21,felec);
1733             
1734             fscal            = felec;
1735
1736             /* Calculate temporary vectorial force */
1737             tx               = _mm256_mul_ps(fscal,dx21);
1738             ty               = _mm256_mul_ps(fscal,dy21);
1739             tz               = _mm256_mul_ps(fscal,dz21);
1740
1741             /* Update vectorial force */
1742             fix2             = _mm256_add_ps(fix2,tx);
1743             fiy2             = _mm256_add_ps(fiy2,ty);
1744             fiz2             = _mm256_add_ps(fiz2,tz);
1745
1746             fjx1             = _mm256_add_ps(fjx1,tx);
1747             fjy1             = _mm256_add_ps(fjy1,ty);
1748             fjz1             = _mm256_add_ps(fjz1,tz);
1749
1750             /**************************
1751              * CALCULATE INTERACTIONS *
1752              **************************/
1753
1754             r22              = _mm256_mul_ps(rsq22,rinv22);
1755
1756             /* EWALD ELECTROSTATICS */
1757             
1758             /* Analytical PME correction */
1759             zeta2            = _mm256_mul_ps(beta2,rsq22);
1760             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
1761             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1762             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1763             felec            = _mm256_mul_ps(qq22,felec);
1764             
1765             fscal            = felec;
1766
1767             /* Calculate temporary vectorial force */
1768             tx               = _mm256_mul_ps(fscal,dx22);
1769             ty               = _mm256_mul_ps(fscal,dy22);
1770             tz               = _mm256_mul_ps(fscal,dz22);
1771
1772             /* Update vectorial force */
1773             fix2             = _mm256_add_ps(fix2,tx);
1774             fiy2             = _mm256_add_ps(fiy2,ty);
1775             fiz2             = _mm256_add_ps(fiz2,tz);
1776
1777             fjx2             = _mm256_add_ps(fjx2,tx);
1778             fjy2             = _mm256_add_ps(fjy2,ty);
1779             fjz2             = _mm256_add_ps(fjz2,tz);
1780
1781             fjptrA             = f+j_coord_offsetA;
1782             fjptrB             = f+j_coord_offsetB;
1783             fjptrC             = f+j_coord_offsetC;
1784             fjptrD             = f+j_coord_offsetD;
1785             fjptrE             = f+j_coord_offsetE;
1786             fjptrF             = f+j_coord_offsetF;
1787             fjptrG             = f+j_coord_offsetG;
1788             fjptrH             = f+j_coord_offsetH;
1789
1790             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
1791                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
1792
1793             /* Inner loop uses 511 flops */
1794         }
1795
1796         if(jidx<j_index_end)
1797         {
1798
1799             /* Get j neighbor index, and coordinate index */
1800             jnrlistA         = jjnr[jidx];
1801             jnrlistB         = jjnr[jidx+1];
1802             jnrlistC         = jjnr[jidx+2];
1803             jnrlistD         = jjnr[jidx+3];
1804             jnrlistE         = jjnr[jidx+4];
1805             jnrlistF         = jjnr[jidx+5];
1806             jnrlistG         = jjnr[jidx+6];
1807             jnrlistH         = jjnr[jidx+7];
1808             /* Sign of each element will be negative for non-real atoms.
1809              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1810              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
1811              */
1812             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
1813                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
1814                                             
1815             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1816             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1817             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1818             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1819             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
1820             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
1821             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
1822             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
1823             j_coord_offsetA  = DIM*jnrA;
1824             j_coord_offsetB  = DIM*jnrB;
1825             j_coord_offsetC  = DIM*jnrC;
1826             j_coord_offsetD  = DIM*jnrD;
1827             j_coord_offsetE  = DIM*jnrE;
1828             j_coord_offsetF  = DIM*jnrF;
1829             j_coord_offsetG  = DIM*jnrG;
1830             j_coord_offsetH  = DIM*jnrH;
1831
1832             /* load j atom coordinates */
1833             gmx_mm256_load_3rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1834                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1835                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1836                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1837                                               &jx0,&jy0,&jz0,&jx1,&jy1,&jz1,&jx2,&jy2,&jz2);
1838
1839             /* Calculate displacement vector */
1840             dx00             = _mm256_sub_ps(ix0,jx0);
1841             dy00             = _mm256_sub_ps(iy0,jy0);
1842             dz00             = _mm256_sub_ps(iz0,jz0);
1843             dx01             = _mm256_sub_ps(ix0,jx1);
1844             dy01             = _mm256_sub_ps(iy0,jy1);
1845             dz01             = _mm256_sub_ps(iz0,jz1);
1846             dx02             = _mm256_sub_ps(ix0,jx2);
1847             dy02             = _mm256_sub_ps(iy0,jy2);
1848             dz02             = _mm256_sub_ps(iz0,jz2);
1849             dx10             = _mm256_sub_ps(ix1,jx0);
1850             dy10             = _mm256_sub_ps(iy1,jy0);
1851             dz10             = _mm256_sub_ps(iz1,jz0);
1852             dx11             = _mm256_sub_ps(ix1,jx1);
1853             dy11             = _mm256_sub_ps(iy1,jy1);
1854             dz11             = _mm256_sub_ps(iz1,jz1);
1855             dx12             = _mm256_sub_ps(ix1,jx2);
1856             dy12             = _mm256_sub_ps(iy1,jy2);
1857             dz12             = _mm256_sub_ps(iz1,jz2);
1858             dx20             = _mm256_sub_ps(ix2,jx0);
1859             dy20             = _mm256_sub_ps(iy2,jy0);
1860             dz20             = _mm256_sub_ps(iz2,jz0);
1861             dx21             = _mm256_sub_ps(ix2,jx1);
1862             dy21             = _mm256_sub_ps(iy2,jy1);
1863             dz21             = _mm256_sub_ps(iz2,jz1);
1864             dx22             = _mm256_sub_ps(ix2,jx2);
1865             dy22             = _mm256_sub_ps(iy2,jy2);
1866             dz22             = _mm256_sub_ps(iz2,jz2);
1867
1868             /* Calculate squared distance and things based on it */
1869             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1870             rsq01            = gmx_mm256_calc_rsq_ps(dx01,dy01,dz01);
1871             rsq02            = gmx_mm256_calc_rsq_ps(dx02,dy02,dz02);
1872             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1873             rsq11            = gmx_mm256_calc_rsq_ps(dx11,dy11,dz11);
1874             rsq12            = gmx_mm256_calc_rsq_ps(dx12,dy12,dz12);
1875             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1876             rsq21            = gmx_mm256_calc_rsq_ps(dx21,dy21,dz21);
1877             rsq22            = gmx_mm256_calc_rsq_ps(dx22,dy22,dz22);
1878
1879             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1880             rinv01           = gmx_mm256_invsqrt_ps(rsq01);
1881             rinv02           = gmx_mm256_invsqrt_ps(rsq02);
1882             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1883             rinv11           = gmx_mm256_invsqrt_ps(rsq11);
1884             rinv12           = gmx_mm256_invsqrt_ps(rsq12);
1885             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1886             rinv21           = gmx_mm256_invsqrt_ps(rsq21);
1887             rinv22           = gmx_mm256_invsqrt_ps(rsq22);
1888
1889             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1890             rinvsq01         = _mm256_mul_ps(rinv01,rinv01);
1891             rinvsq02         = _mm256_mul_ps(rinv02,rinv02);
1892             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1893             rinvsq11         = _mm256_mul_ps(rinv11,rinv11);
1894             rinvsq12         = _mm256_mul_ps(rinv12,rinv12);
1895             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1896             rinvsq21         = _mm256_mul_ps(rinv21,rinv21);
1897             rinvsq22         = _mm256_mul_ps(rinv22,rinv22);
1898
1899             fjx0             = _mm256_setzero_ps();
1900             fjy0             = _mm256_setzero_ps();
1901             fjz0             = _mm256_setzero_ps();
1902             fjx1             = _mm256_setzero_ps();
1903             fjy1             = _mm256_setzero_ps();
1904             fjz1             = _mm256_setzero_ps();
1905             fjx2             = _mm256_setzero_ps();
1906             fjy2             = _mm256_setzero_ps();
1907             fjz2             = _mm256_setzero_ps();
1908
1909             /**************************
1910              * CALCULATE INTERACTIONS *
1911              **************************/
1912
1913             r00              = _mm256_mul_ps(rsq00,rinv00);
1914             r00              = _mm256_andnot_ps(dummy_mask,r00);
1915
1916             /* EWALD ELECTROSTATICS */
1917             
1918             /* Analytical PME correction */
1919             zeta2            = _mm256_mul_ps(beta2,rsq00);
1920             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1921             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1922             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1923             felec            = _mm256_mul_ps(qq00,felec);
1924             
1925             /* LENNARD-JONES DISPERSION/REPULSION */
1926
1927             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1928             fvdw             = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
1929
1930             fscal            = _mm256_add_ps(felec,fvdw);
1931
1932             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1933
1934             /* Calculate temporary vectorial force */
1935             tx               = _mm256_mul_ps(fscal,dx00);
1936             ty               = _mm256_mul_ps(fscal,dy00);
1937             tz               = _mm256_mul_ps(fscal,dz00);
1938
1939             /* Update vectorial force */
1940             fix0             = _mm256_add_ps(fix0,tx);
1941             fiy0             = _mm256_add_ps(fiy0,ty);
1942             fiz0             = _mm256_add_ps(fiz0,tz);
1943
1944             fjx0             = _mm256_add_ps(fjx0,tx);
1945             fjy0             = _mm256_add_ps(fjy0,ty);
1946             fjz0             = _mm256_add_ps(fjz0,tz);
1947
1948             /**************************
1949              * CALCULATE INTERACTIONS *
1950              **************************/
1951
1952             r01              = _mm256_mul_ps(rsq01,rinv01);
1953             r01              = _mm256_andnot_ps(dummy_mask,r01);
1954
1955             /* EWALD ELECTROSTATICS */
1956             
1957             /* Analytical PME correction */
1958             zeta2            = _mm256_mul_ps(beta2,rsq01);
1959             rinv3            = _mm256_mul_ps(rinvsq01,rinv01);
1960             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1961             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1962             felec            = _mm256_mul_ps(qq01,felec);
1963             
1964             fscal            = felec;
1965
1966             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1967
1968             /* Calculate temporary vectorial force */
1969             tx               = _mm256_mul_ps(fscal,dx01);
1970             ty               = _mm256_mul_ps(fscal,dy01);
1971             tz               = _mm256_mul_ps(fscal,dz01);
1972
1973             /* Update vectorial force */
1974             fix0             = _mm256_add_ps(fix0,tx);
1975             fiy0             = _mm256_add_ps(fiy0,ty);
1976             fiz0             = _mm256_add_ps(fiz0,tz);
1977
1978             fjx1             = _mm256_add_ps(fjx1,tx);
1979             fjy1             = _mm256_add_ps(fjy1,ty);
1980             fjz1             = _mm256_add_ps(fjz1,tz);
1981
1982             /**************************
1983              * CALCULATE INTERACTIONS *
1984              **************************/
1985
1986             r02              = _mm256_mul_ps(rsq02,rinv02);
1987             r02              = _mm256_andnot_ps(dummy_mask,r02);
1988
1989             /* EWALD ELECTROSTATICS */
1990             
1991             /* Analytical PME correction */
1992             zeta2            = _mm256_mul_ps(beta2,rsq02);
1993             rinv3            = _mm256_mul_ps(rinvsq02,rinv02);
1994             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1995             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1996             felec            = _mm256_mul_ps(qq02,felec);
1997             
1998             fscal            = felec;
1999
2000             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2001
2002             /* Calculate temporary vectorial force */
2003             tx               = _mm256_mul_ps(fscal,dx02);
2004             ty               = _mm256_mul_ps(fscal,dy02);
2005             tz               = _mm256_mul_ps(fscal,dz02);
2006
2007             /* Update vectorial force */
2008             fix0             = _mm256_add_ps(fix0,tx);
2009             fiy0             = _mm256_add_ps(fiy0,ty);
2010             fiz0             = _mm256_add_ps(fiz0,tz);
2011
2012             fjx2             = _mm256_add_ps(fjx2,tx);
2013             fjy2             = _mm256_add_ps(fjy2,ty);
2014             fjz2             = _mm256_add_ps(fjz2,tz);
2015
2016             /**************************
2017              * CALCULATE INTERACTIONS *
2018              **************************/
2019
2020             r10              = _mm256_mul_ps(rsq10,rinv10);
2021             r10              = _mm256_andnot_ps(dummy_mask,r10);
2022
2023             /* EWALD ELECTROSTATICS */
2024             
2025             /* Analytical PME correction */
2026             zeta2            = _mm256_mul_ps(beta2,rsq10);
2027             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
2028             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2029             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2030             felec            = _mm256_mul_ps(qq10,felec);
2031             
2032             fscal            = felec;
2033
2034             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2035
2036             /* Calculate temporary vectorial force */
2037             tx               = _mm256_mul_ps(fscal,dx10);
2038             ty               = _mm256_mul_ps(fscal,dy10);
2039             tz               = _mm256_mul_ps(fscal,dz10);
2040
2041             /* Update vectorial force */
2042             fix1             = _mm256_add_ps(fix1,tx);
2043             fiy1             = _mm256_add_ps(fiy1,ty);
2044             fiz1             = _mm256_add_ps(fiz1,tz);
2045
2046             fjx0             = _mm256_add_ps(fjx0,tx);
2047             fjy0             = _mm256_add_ps(fjy0,ty);
2048             fjz0             = _mm256_add_ps(fjz0,tz);
2049
2050             /**************************
2051              * CALCULATE INTERACTIONS *
2052              **************************/
2053
2054             r11              = _mm256_mul_ps(rsq11,rinv11);
2055             r11              = _mm256_andnot_ps(dummy_mask,r11);
2056
2057             /* EWALD ELECTROSTATICS */
2058             
2059             /* Analytical PME correction */
2060             zeta2            = _mm256_mul_ps(beta2,rsq11);
2061             rinv3            = _mm256_mul_ps(rinvsq11,rinv11);
2062             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2063             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2064             felec            = _mm256_mul_ps(qq11,felec);
2065             
2066             fscal            = felec;
2067
2068             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2069
2070             /* Calculate temporary vectorial force */
2071             tx               = _mm256_mul_ps(fscal,dx11);
2072             ty               = _mm256_mul_ps(fscal,dy11);
2073             tz               = _mm256_mul_ps(fscal,dz11);
2074
2075             /* Update vectorial force */
2076             fix1             = _mm256_add_ps(fix1,tx);
2077             fiy1             = _mm256_add_ps(fiy1,ty);
2078             fiz1             = _mm256_add_ps(fiz1,tz);
2079
2080             fjx1             = _mm256_add_ps(fjx1,tx);
2081             fjy1             = _mm256_add_ps(fjy1,ty);
2082             fjz1             = _mm256_add_ps(fjz1,tz);
2083
2084             /**************************
2085              * CALCULATE INTERACTIONS *
2086              **************************/
2087
2088             r12              = _mm256_mul_ps(rsq12,rinv12);
2089             r12              = _mm256_andnot_ps(dummy_mask,r12);
2090
2091             /* EWALD ELECTROSTATICS */
2092             
2093             /* Analytical PME correction */
2094             zeta2            = _mm256_mul_ps(beta2,rsq12);
2095             rinv3            = _mm256_mul_ps(rinvsq12,rinv12);
2096             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2097             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2098             felec            = _mm256_mul_ps(qq12,felec);
2099             
2100             fscal            = felec;
2101
2102             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2103
2104             /* Calculate temporary vectorial force */
2105             tx               = _mm256_mul_ps(fscal,dx12);
2106             ty               = _mm256_mul_ps(fscal,dy12);
2107             tz               = _mm256_mul_ps(fscal,dz12);
2108
2109             /* Update vectorial force */
2110             fix1             = _mm256_add_ps(fix1,tx);
2111             fiy1             = _mm256_add_ps(fiy1,ty);
2112             fiz1             = _mm256_add_ps(fiz1,tz);
2113
2114             fjx2             = _mm256_add_ps(fjx2,tx);
2115             fjy2             = _mm256_add_ps(fjy2,ty);
2116             fjz2             = _mm256_add_ps(fjz2,tz);
2117
2118             /**************************
2119              * CALCULATE INTERACTIONS *
2120              **************************/
2121
2122             r20              = _mm256_mul_ps(rsq20,rinv20);
2123             r20              = _mm256_andnot_ps(dummy_mask,r20);
2124
2125             /* EWALD ELECTROSTATICS */
2126             
2127             /* Analytical PME correction */
2128             zeta2            = _mm256_mul_ps(beta2,rsq20);
2129             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
2130             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2131             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2132             felec            = _mm256_mul_ps(qq20,felec);
2133             
2134             fscal            = felec;
2135
2136             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2137
2138             /* Calculate temporary vectorial force */
2139             tx               = _mm256_mul_ps(fscal,dx20);
2140             ty               = _mm256_mul_ps(fscal,dy20);
2141             tz               = _mm256_mul_ps(fscal,dz20);
2142
2143             /* Update vectorial force */
2144             fix2             = _mm256_add_ps(fix2,tx);
2145             fiy2             = _mm256_add_ps(fiy2,ty);
2146             fiz2             = _mm256_add_ps(fiz2,tz);
2147
2148             fjx0             = _mm256_add_ps(fjx0,tx);
2149             fjy0             = _mm256_add_ps(fjy0,ty);
2150             fjz0             = _mm256_add_ps(fjz0,tz);
2151
2152             /**************************
2153              * CALCULATE INTERACTIONS *
2154              **************************/
2155
2156             r21              = _mm256_mul_ps(rsq21,rinv21);
2157             r21              = _mm256_andnot_ps(dummy_mask,r21);
2158
2159             /* EWALD ELECTROSTATICS */
2160             
2161             /* Analytical PME correction */
2162             zeta2            = _mm256_mul_ps(beta2,rsq21);
2163             rinv3            = _mm256_mul_ps(rinvsq21,rinv21);
2164             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2165             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2166             felec            = _mm256_mul_ps(qq21,felec);
2167             
2168             fscal            = felec;
2169
2170             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2171
2172             /* Calculate temporary vectorial force */
2173             tx               = _mm256_mul_ps(fscal,dx21);
2174             ty               = _mm256_mul_ps(fscal,dy21);
2175             tz               = _mm256_mul_ps(fscal,dz21);
2176
2177             /* Update vectorial force */
2178             fix2             = _mm256_add_ps(fix2,tx);
2179             fiy2             = _mm256_add_ps(fiy2,ty);
2180             fiz2             = _mm256_add_ps(fiz2,tz);
2181
2182             fjx1             = _mm256_add_ps(fjx1,tx);
2183             fjy1             = _mm256_add_ps(fjy1,ty);
2184             fjz1             = _mm256_add_ps(fjz1,tz);
2185
2186             /**************************
2187              * CALCULATE INTERACTIONS *
2188              **************************/
2189
2190             r22              = _mm256_mul_ps(rsq22,rinv22);
2191             r22              = _mm256_andnot_ps(dummy_mask,r22);
2192
2193             /* EWALD ELECTROSTATICS */
2194             
2195             /* Analytical PME correction */
2196             zeta2            = _mm256_mul_ps(beta2,rsq22);
2197             rinv3            = _mm256_mul_ps(rinvsq22,rinv22);
2198             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
2199             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
2200             felec            = _mm256_mul_ps(qq22,felec);
2201             
2202             fscal            = felec;
2203
2204             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
2205
2206             /* Calculate temporary vectorial force */
2207             tx               = _mm256_mul_ps(fscal,dx22);
2208             ty               = _mm256_mul_ps(fscal,dy22);
2209             tz               = _mm256_mul_ps(fscal,dz22);
2210
2211             /* Update vectorial force */
2212             fix2             = _mm256_add_ps(fix2,tx);
2213             fiy2             = _mm256_add_ps(fiy2,ty);
2214             fiz2             = _mm256_add_ps(fiz2,tz);
2215
2216             fjx2             = _mm256_add_ps(fjx2,tx);
2217             fjy2             = _mm256_add_ps(fjy2,ty);
2218             fjz2             = _mm256_add_ps(fjz2,tz);
2219
2220             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
2221             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
2222             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
2223             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
2224             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
2225             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
2226             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
2227             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
2228
2229             gmx_mm256_decrement_3rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,
2230                                                       fjx0,fjy0,fjz0,fjx1,fjy1,fjz1,fjx2,fjy2,fjz2);
2231
2232             /* Inner loop uses 520 flops */
2233         }
2234
2235         /* End of innermost loop */
2236
2237         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
2238                                                  f+i_coord_offset,fshift+i_shift_offset);
2239
2240         /* Increment number of inner iterations */
2241         inneriter                  += j_index_end - j_index_start;
2242
2243         /* Outer loop uses 18 flops */
2244     }
2245
2246     /* Increment number of outer iterations */
2247     outeriter        += nri;
2248
2249     /* Update outer/inner flops */
2250
2251     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3W3_F,outeriter*18 + inneriter*520);
2252 }