d0fb8fc127654ee83d4e807ed1863d434de79348
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3P1_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "config.h"
39
40 #include <math.h>
41
42 #include "../nb_kernel.h"
43 #include "types/simple.h"
44 #include "gromacs/math/vec.h"
45 #include "nrnb.h"
46
47 #include "gromacs/simd/math_x86_avx_256_single.h"
48 #include "kernelutil_x86_avx_256_single.h"
49
50 /*
51  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3P1_VF_avx_256_single
52  * Electrostatics interaction: Ewald
53  * VdW interaction:            LJEwald
54  * Geometry:                   Water3-Particle
55  * Calculate force/pot:        PotentialAndForce
56  */
57 void
58 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3P1_VF_avx_256_single
59                     (t_nblist                    * gmx_restrict       nlist,
60                      rvec                        * gmx_restrict          xx,
61                      rvec                        * gmx_restrict          ff,
62                      t_forcerec                  * gmx_restrict          fr,
63                      t_mdatoms                   * gmx_restrict     mdatoms,
64                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65                      t_nrnb                      * gmx_restrict        nrnb)
66 {
67     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
68      * just 0 for non-waters.
69      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
70      * jnr indices corresponding to data put in the four positions in the SIMD register.
71      */
72     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
73     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74     int              jnrA,jnrB,jnrC,jnrD;
75     int              jnrE,jnrF,jnrG,jnrH;
76     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
77     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
78     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
79     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
80     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
81     real             rcutoff_scalar;
82     real             *shiftvec,*fshift,*x,*f;
83     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
84     real             scratch[4*DIM];
85     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86     real *           vdwioffsetptr0;
87     real *           vdwgridioffsetptr0;
88     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
89     real *           vdwioffsetptr1;
90     real *           vdwgridioffsetptr1;
91     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
92     real *           vdwioffsetptr2;
93     real *           vdwgridioffsetptr2;
94     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
95     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
96     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
97     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
98     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
99     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
100     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
101     real             *charge;
102     int              nvdwtype;
103     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
104     int              *vdwtype;
105     real             *vdwparam;
106     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
107     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
108     __m256           c6grid_00;
109     __m256           c6grid_10;
110     __m256           c6grid_20;
111     real             *vdwgridparam;
112     __m256           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
113     __m256           one_half  = _mm256_set1_ps(0.5);
114     __m256           minus_one = _mm256_set1_ps(-1.0);
115     __m256i          ewitab;
116     __m128i          ewitab_lo,ewitab_hi;
117     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
118     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
119     real             *ewtab;
120     __m256           dummy_mask,cutoff_mask;
121     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
122     __m256           one     = _mm256_set1_ps(1.0);
123     __m256           two     = _mm256_set1_ps(2.0);
124     x                = xx[0];
125     f                = ff[0];
126
127     nri              = nlist->nri;
128     iinr             = nlist->iinr;
129     jindex           = nlist->jindex;
130     jjnr             = nlist->jjnr;
131     shiftidx         = nlist->shift;
132     gid              = nlist->gid;
133     shiftvec         = fr->shift_vec[0];
134     fshift           = fr->fshift[0];
135     facel            = _mm256_set1_ps(fr->epsfac);
136     charge           = mdatoms->chargeA;
137     nvdwtype         = fr->ntype;
138     vdwparam         = fr->nbfp;
139     vdwtype          = mdatoms->typeA;
140     vdwgridparam     = fr->ljpme_c6grid;
141     sh_lj_ewald      = _mm256_set1_ps(fr->ic->sh_lj_ewald);
142     ewclj            = _mm256_set1_ps(fr->ewaldcoeff_lj);
143     ewclj2           = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
144
145     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
146     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
147     beta2            = _mm256_mul_ps(beta,beta);
148     beta3            = _mm256_mul_ps(beta,beta2);
149
150     ewtab            = fr->ic->tabq_coul_FDV0;
151     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
152     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
153
154     /* Setup water-specific parameters */
155     inr              = nlist->iinr[0];
156     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
157     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
158     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
159     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
160     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
161
162     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
163     rcutoff_scalar   = fr->rcoulomb;
164     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
165     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
166
167     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
168     rvdw             = _mm256_set1_ps(fr->rvdw);
169
170     /* Avoid stupid compiler warnings */
171     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
172     j_coord_offsetA = 0;
173     j_coord_offsetB = 0;
174     j_coord_offsetC = 0;
175     j_coord_offsetD = 0;
176     j_coord_offsetE = 0;
177     j_coord_offsetF = 0;
178     j_coord_offsetG = 0;
179     j_coord_offsetH = 0;
180
181     outeriter        = 0;
182     inneriter        = 0;
183
184     for(iidx=0;iidx<4*DIM;iidx++)
185     {
186         scratch[iidx] = 0.0;
187     }
188
189     /* Start outer loop over neighborlists */
190     for(iidx=0; iidx<nri; iidx++)
191     {
192         /* Load shift vector for this list */
193         i_shift_offset   = DIM*shiftidx[iidx];
194
195         /* Load limits for loop over neighbors */
196         j_index_start    = jindex[iidx];
197         j_index_end      = jindex[iidx+1];
198
199         /* Get outer coordinate index */
200         inr              = iinr[iidx];
201         i_coord_offset   = DIM*inr;
202
203         /* Load i particle coords and add shift vector */
204         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
205                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
206
207         fix0             = _mm256_setzero_ps();
208         fiy0             = _mm256_setzero_ps();
209         fiz0             = _mm256_setzero_ps();
210         fix1             = _mm256_setzero_ps();
211         fiy1             = _mm256_setzero_ps();
212         fiz1             = _mm256_setzero_ps();
213         fix2             = _mm256_setzero_ps();
214         fiy2             = _mm256_setzero_ps();
215         fiz2             = _mm256_setzero_ps();
216
217         /* Reset potential sums */
218         velecsum         = _mm256_setzero_ps();
219         vvdwsum          = _mm256_setzero_ps();
220
221         /* Start inner kernel loop */
222         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
223         {
224
225             /* Get j neighbor index, and coordinate index */
226             jnrA             = jjnr[jidx];
227             jnrB             = jjnr[jidx+1];
228             jnrC             = jjnr[jidx+2];
229             jnrD             = jjnr[jidx+3];
230             jnrE             = jjnr[jidx+4];
231             jnrF             = jjnr[jidx+5];
232             jnrG             = jjnr[jidx+6];
233             jnrH             = jjnr[jidx+7];
234             j_coord_offsetA  = DIM*jnrA;
235             j_coord_offsetB  = DIM*jnrB;
236             j_coord_offsetC  = DIM*jnrC;
237             j_coord_offsetD  = DIM*jnrD;
238             j_coord_offsetE  = DIM*jnrE;
239             j_coord_offsetF  = DIM*jnrF;
240             j_coord_offsetG  = DIM*jnrG;
241             j_coord_offsetH  = DIM*jnrH;
242
243             /* load j atom coordinates */
244             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
245                                                  x+j_coord_offsetC,x+j_coord_offsetD,
246                                                  x+j_coord_offsetE,x+j_coord_offsetF,
247                                                  x+j_coord_offsetG,x+j_coord_offsetH,
248                                                  &jx0,&jy0,&jz0);
249
250             /* Calculate displacement vector */
251             dx00             = _mm256_sub_ps(ix0,jx0);
252             dy00             = _mm256_sub_ps(iy0,jy0);
253             dz00             = _mm256_sub_ps(iz0,jz0);
254             dx10             = _mm256_sub_ps(ix1,jx0);
255             dy10             = _mm256_sub_ps(iy1,jy0);
256             dz10             = _mm256_sub_ps(iz1,jz0);
257             dx20             = _mm256_sub_ps(ix2,jx0);
258             dy20             = _mm256_sub_ps(iy2,jy0);
259             dz20             = _mm256_sub_ps(iz2,jz0);
260
261             /* Calculate squared distance and things based on it */
262             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
263             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
264             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
265
266             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
267             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
268             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
269
270             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
271             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
272             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
273
274             /* Load parameters for j particles */
275             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
276                                                                  charge+jnrC+0,charge+jnrD+0,
277                                                                  charge+jnrE+0,charge+jnrF+0,
278                                                                  charge+jnrG+0,charge+jnrH+0);
279             vdwjidx0A        = 2*vdwtype[jnrA+0];
280             vdwjidx0B        = 2*vdwtype[jnrB+0];
281             vdwjidx0C        = 2*vdwtype[jnrC+0];
282             vdwjidx0D        = 2*vdwtype[jnrD+0];
283             vdwjidx0E        = 2*vdwtype[jnrE+0];
284             vdwjidx0F        = 2*vdwtype[jnrF+0];
285             vdwjidx0G        = 2*vdwtype[jnrG+0];
286             vdwjidx0H        = 2*vdwtype[jnrH+0];
287
288             fjx0             = _mm256_setzero_ps();
289             fjy0             = _mm256_setzero_ps();
290             fjz0             = _mm256_setzero_ps();
291
292             /**************************
293              * CALCULATE INTERACTIONS *
294              **************************/
295
296             if (gmx_mm256_any_lt(rsq00,rcutoff2))
297             {
298
299             r00              = _mm256_mul_ps(rsq00,rinv00);
300
301             /* Compute parameters for interactions between i and j atoms */
302             qq00             = _mm256_mul_ps(iq0,jq0);
303             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
304                                             vdwioffsetptr0+vdwjidx0B,
305                                             vdwioffsetptr0+vdwjidx0C,
306                                             vdwioffsetptr0+vdwjidx0D,
307                                             vdwioffsetptr0+vdwjidx0E,
308                                             vdwioffsetptr0+vdwjidx0F,
309                                             vdwioffsetptr0+vdwjidx0G,
310                                             vdwioffsetptr0+vdwjidx0H,
311                                             &c6_00,&c12_00);
312
313             c6grid_00       = gmx_mm256_load_8real_swizzle_ps(vdwgridioffsetptr0+vdwjidx0A,
314                                                                   vdwgridioffsetptr0+vdwjidx0B,
315                                                                   vdwgridioffsetptr0+vdwjidx0C,
316                                                                   vdwgridioffsetptr0+vdwjidx0D,
317                                                                   vdwgridioffsetptr0+vdwjidx0E,
318                                                                   vdwgridioffsetptr0+vdwjidx0F,
319                                                                   vdwgridioffsetptr0+vdwjidx0G,
320                                                                   vdwgridioffsetptr0+vdwjidx0H);
321
322             /* EWALD ELECTROSTATICS */
323             
324             /* Analytical PME correction */
325             zeta2            = _mm256_mul_ps(beta2,rsq00);
326             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
327             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
328             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
329             felec            = _mm256_mul_ps(qq00,felec);
330             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
331             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
332             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
333             velec            = _mm256_mul_ps(qq00,velec);
334             
335             /* Analytical LJ-PME */
336             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
337             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
338             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
339             exponent         = gmx_simd_exp_r(ewcljrsq);
340             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
341             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
342             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
343             vvdw6            = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
344             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
345             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
346                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
347             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
348             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
349
350             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
351
352             /* Update potential sum for this i atom from the interaction with this j atom. */
353             velec            = _mm256_and_ps(velec,cutoff_mask);
354             velecsum         = _mm256_add_ps(velecsum,velec);
355             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
356             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
357
358             fscal            = _mm256_add_ps(felec,fvdw);
359
360             fscal            = _mm256_and_ps(fscal,cutoff_mask);
361
362             /* Calculate temporary vectorial force */
363             tx               = _mm256_mul_ps(fscal,dx00);
364             ty               = _mm256_mul_ps(fscal,dy00);
365             tz               = _mm256_mul_ps(fscal,dz00);
366
367             /* Update vectorial force */
368             fix0             = _mm256_add_ps(fix0,tx);
369             fiy0             = _mm256_add_ps(fiy0,ty);
370             fiz0             = _mm256_add_ps(fiz0,tz);
371
372             fjx0             = _mm256_add_ps(fjx0,tx);
373             fjy0             = _mm256_add_ps(fjy0,ty);
374             fjz0             = _mm256_add_ps(fjz0,tz);
375
376             }
377
378             /**************************
379              * CALCULATE INTERACTIONS *
380              **************************/
381
382             if (gmx_mm256_any_lt(rsq10,rcutoff2))
383             {
384
385             r10              = _mm256_mul_ps(rsq10,rinv10);
386
387             /* Compute parameters for interactions between i and j atoms */
388             qq10             = _mm256_mul_ps(iq1,jq0);
389
390             /* EWALD ELECTROSTATICS */
391             
392             /* Analytical PME correction */
393             zeta2            = _mm256_mul_ps(beta2,rsq10);
394             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
395             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
396             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
397             felec            = _mm256_mul_ps(qq10,felec);
398             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
399             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
400             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
401             velec            = _mm256_mul_ps(qq10,velec);
402             
403             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
404
405             /* Update potential sum for this i atom from the interaction with this j atom. */
406             velec            = _mm256_and_ps(velec,cutoff_mask);
407             velecsum         = _mm256_add_ps(velecsum,velec);
408
409             fscal            = felec;
410
411             fscal            = _mm256_and_ps(fscal,cutoff_mask);
412
413             /* Calculate temporary vectorial force */
414             tx               = _mm256_mul_ps(fscal,dx10);
415             ty               = _mm256_mul_ps(fscal,dy10);
416             tz               = _mm256_mul_ps(fscal,dz10);
417
418             /* Update vectorial force */
419             fix1             = _mm256_add_ps(fix1,tx);
420             fiy1             = _mm256_add_ps(fiy1,ty);
421             fiz1             = _mm256_add_ps(fiz1,tz);
422
423             fjx0             = _mm256_add_ps(fjx0,tx);
424             fjy0             = _mm256_add_ps(fjy0,ty);
425             fjz0             = _mm256_add_ps(fjz0,tz);
426
427             }
428
429             /**************************
430              * CALCULATE INTERACTIONS *
431              **************************/
432
433             if (gmx_mm256_any_lt(rsq20,rcutoff2))
434             {
435
436             r20              = _mm256_mul_ps(rsq20,rinv20);
437
438             /* Compute parameters for interactions between i and j atoms */
439             qq20             = _mm256_mul_ps(iq2,jq0);
440
441             /* EWALD ELECTROSTATICS */
442             
443             /* Analytical PME correction */
444             zeta2            = _mm256_mul_ps(beta2,rsq20);
445             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
446             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
447             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
448             felec            = _mm256_mul_ps(qq20,felec);
449             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
450             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
451             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
452             velec            = _mm256_mul_ps(qq20,velec);
453             
454             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
455
456             /* Update potential sum for this i atom from the interaction with this j atom. */
457             velec            = _mm256_and_ps(velec,cutoff_mask);
458             velecsum         = _mm256_add_ps(velecsum,velec);
459
460             fscal            = felec;
461
462             fscal            = _mm256_and_ps(fscal,cutoff_mask);
463
464             /* Calculate temporary vectorial force */
465             tx               = _mm256_mul_ps(fscal,dx20);
466             ty               = _mm256_mul_ps(fscal,dy20);
467             tz               = _mm256_mul_ps(fscal,dz20);
468
469             /* Update vectorial force */
470             fix2             = _mm256_add_ps(fix2,tx);
471             fiy2             = _mm256_add_ps(fiy2,ty);
472             fiz2             = _mm256_add_ps(fiz2,tz);
473
474             fjx0             = _mm256_add_ps(fjx0,tx);
475             fjy0             = _mm256_add_ps(fjy0,ty);
476             fjz0             = _mm256_add_ps(fjz0,tz);
477
478             }
479
480             fjptrA             = f+j_coord_offsetA;
481             fjptrB             = f+j_coord_offsetB;
482             fjptrC             = f+j_coord_offsetC;
483             fjptrD             = f+j_coord_offsetD;
484             fjptrE             = f+j_coord_offsetE;
485             fjptrF             = f+j_coord_offsetF;
486             fjptrG             = f+j_coord_offsetG;
487             fjptrH             = f+j_coord_offsetH;
488
489             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
490
491             /* Inner loop uses 366 flops */
492         }
493
494         if(jidx<j_index_end)
495         {
496
497             /* Get j neighbor index, and coordinate index */
498             jnrlistA         = jjnr[jidx];
499             jnrlistB         = jjnr[jidx+1];
500             jnrlistC         = jjnr[jidx+2];
501             jnrlistD         = jjnr[jidx+3];
502             jnrlistE         = jjnr[jidx+4];
503             jnrlistF         = jjnr[jidx+5];
504             jnrlistG         = jjnr[jidx+6];
505             jnrlistH         = jjnr[jidx+7];
506             /* Sign of each element will be negative for non-real atoms.
507              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
508              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
509              */
510             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
511                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
512                                             
513             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
514             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
515             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
516             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
517             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
518             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
519             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
520             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
521             j_coord_offsetA  = DIM*jnrA;
522             j_coord_offsetB  = DIM*jnrB;
523             j_coord_offsetC  = DIM*jnrC;
524             j_coord_offsetD  = DIM*jnrD;
525             j_coord_offsetE  = DIM*jnrE;
526             j_coord_offsetF  = DIM*jnrF;
527             j_coord_offsetG  = DIM*jnrG;
528             j_coord_offsetH  = DIM*jnrH;
529
530             /* load j atom coordinates */
531             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
532                                                  x+j_coord_offsetC,x+j_coord_offsetD,
533                                                  x+j_coord_offsetE,x+j_coord_offsetF,
534                                                  x+j_coord_offsetG,x+j_coord_offsetH,
535                                                  &jx0,&jy0,&jz0);
536
537             /* Calculate displacement vector */
538             dx00             = _mm256_sub_ps(ix0,jx0);
539             dy00             = _mm256_sub_ps(iy0,jy0);
540             dz00             = _mm256_sub_ps(iz0,jz0);
541             dx10             = _mm256_sub_ps(ix1,jx0);
542             dy10             = _mm256_sub_ps(iy1,jy0);
543             dz10             = _mm256_sub_ps(iz1,jz0);
544             dx20             = _mm256_sub_ps(ix2,jx0);
545             dy20             = _mm256_sub_ps(iy2,jy0);
546             dz20             = _mm256_sub_ps(iz2,jz0);
547
548             /* Calculate squared distance and things based on it */
549             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
550             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
551             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
552
553             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
554             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
555             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
556
557             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
558             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
559             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
560
561             /* Load parameters for j particles */
562             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
563                                                                  charge+jnrC+0,charge+jnrD+0,
564                                                                  charge+jnrE+0,charge+jnrF+0,
565                                                                  charge+jnrG+0,charge+jnrH+0);
566             vdwjidx0A        = 2*vdwtype[jnrA+0];
567             vdwjidx0B        = 2*vdwtype[jnrB+0];
568             vdwjidx0C        = 2*vdwtype[jnrC+0];
569             vdwjidx0D        = 2*vdwtype[jnrD+0];
570             vdwjidx0E        = 2*vdwtype[jnrE+0];
571             vdwjidx0F        = 2*vdwtype[jnrF+0];
572             vdwjidx0G        = 2*vdwtype[jnrG+0];
573             vdwjidx0H        = 2*vdwtype[jnrH+0];
574
575             fjx0             = _mm256_setzero_ps();
576             fjy0             = _mm256_setzero_ps();
577             fjz0             = _mm256_setzero_ps();
578
579             /**************************
580              * CALCULATE INTERACTIONS *
581              **************************/
582
583             if (gmx_mm256_any_lt(rsq00,rcutoff2))
584             {
585
586             r00              = _mm256_mul_ps(rsq00,rinv00);
587             r00              = _mm256_andnot_ps(dummy_mask,r00);
588
589             /* Compute parameters for interactions between i and j atoms */
590             qq00             = _mm256_mul_ps(iq0,jq0);
591             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
592                                             vdwioffsetptr0+vdwjidx0B,
593                                             vdwioffsetptr0+vdwjidx0C,
594                                             vdwioffsetptr0+vdwjidx0D,
595                                             vdwioffsetptr0+vdwjidx0E,
596                                             vdwioffsetptr0+vdwjidx0F,
597                                             vdwioffsetptr0+vdwjidx0G,
598                                             vdwioffsetptr0+vdwjidx0H,
599                                             &c6_00,&c12_00);
600
601             c6grid_00       = gmx_mm256_load_8real_swizzle_ps(vdwgridioffsetptr0+vdwjidx0A,
602                                                                   vdwgridioffsetptr0+vdwjidx0B,
603                                                                   vdwgridioffsetptr0+vdwjidx0C,
604                                                                   vdwgridioffsetptr0+vdwjidx0D,
605                                                                   vdwgridioffsetptr0+vdwjidx0E,
606                                                                   vdwgridioffsetptr0+vdwjidx0F,
607                                                                   vdwgridioffsetptr0+vdwjidx0G,
608                                                                   vdwgridioffsetptr0+vdwjidx0H);
609
610             /* EWALD ELECTROSTATICS */
611             
612             /* Analytical PME correction */
613             zeta2            = _mm256_mul_ps(beta2,rsq00);
614             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
615             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
616             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
617             felec            = _mm256_mul_ps(qq00,felec);
618             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
619             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
620             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
621             velec            = _mm256_mul_ps(qq00,velec);
622             
623             /* Analytical LJ-PME */
624             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
625             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
626             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
627             exponent         = gmx_simd_exp_r(ewcljrsq);
628             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
629             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
630             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
631             vvdw6            = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
632             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
633             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
634                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
635             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
636             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
637
638             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
639
640             /* Update potential sum for this i atom from the interaction with this j atom. */
641             velec            = _mm256_and_ps(velec,cutoff_mask);
642             velec            = _mm256_andnot_ps(dummy_mask,velec);
643             velecsum         = _mm256_add_ps(velecsum,velec);
644             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
645             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
646             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
647
648             fscal            = _mm256_add_ps(felec,fvdw);
649
650             fscal            = _mm256_and_ps(fscal,cutoff_mask);
651
652             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
653
654             /* Calculate temporary vectorial force */
655             tx               = _mm256_mul_ps(fscal,dx00);
656             ty               = _mm256_mul_ps(fscal,dy00);
657             tz               = _mm256_mul_ps(fscal,dz00);
658
659             /* Update vectorial force */
660             fix0             = _mm256_add_ps(fix0,tx);
661             fiy0             = _mm256_add_ps(fiy0,ty);
662             fiz0             = _mm256_add_ps(fiz0,tz);
663
664             fjx0             = _mm256_add_ps(fjx0,tx);
665             fjy0             = _mm256_add_ps(fjy0,ty);
666             fjz0             = _mm256_add_ps(fjz0,tz);
667
668             }
669
670             /**************************
671              * CALCULATE INTERACTIONS *
672              **************************/
673
674             if (gmx_mm256_any_lt(rsq10,rcutoff2))
675             {
676
677             r10              = _mm256_mul_ps(rsq10,rinv10);
678             r10              = _mm256_andnot_ps(dummy_mask,r10);
679
680             /* Compute parameters for interactions between i and j atoms */
681             qq10             = _mm256_mul_ps(iq1,jq0);
682
683             /* EWALD ELECTROSTATICS */
684             
685             /* Analytical PME correction */
686             zeta2            = _mm256_mul_ps(beta2,rsq10);
687             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
688             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
689             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
690             felec            = _mm256_mul_ps(qq10,felec);
691             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
692             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
693             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
694             velec            = _mm256_mul_ps(qq10,velec);
695             
696             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
697
698             /* Update potential sum for this i atom from the interaction with this j atom. */
699             velec            = _mm256_and_ps(velec,cutoff_mask);
700             velec            = _mm256_andnot_ps(dummy_mask,velec);
701             velecsum         = _mm256_add_ps(velecsum,velec);
702
703             fscal            = felec;
704
705             fscal            = _mm256_and_ps(fscal,cutoff_mask);
706
707             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
708
709             /* Calculate temporary vectorial force */
710             tx               = _mm256_mul_ps(fscal,dx10);
711             ty               = _mm256_mul_ps(fscal,dy10);
712             tz               = _mm256_mul_ps(fscal,dz10);
713
714             /* Update vectorial force */
715             fix1             = _mm256_add_ps(fix1,tx);
716             fiy1             = _mm256_add_ps(fiy1,ty);
717             fiz1             = _mm256_add_ps(fiz1,tz);
718
719             fjx0             = _mm256_add_ps(fjx0,tx);
720             fjy0             = _mm256_add_ps(fjy0,ty);
721             fjz0             = _mm256_add_ps(fjz0,tz);
722
723             }
724
725             /**************************
726              * CALCULATE INTERACTIONS *
727              **************************/
728
729             if (gmx_mm256_any_lt(rsq20,rcutoff2))
730             {
731
732             r20              = _mm256_mul_ps(rsq20,rinv20);
733             r20              = _mm256_andnot_ps(dummy_mask,r20);
734
735             /* Compute parameters for interactions between i and j atoms */
736             qq20             = _mm256_mul_ps(iq2,jq0);
737
738             /* EWALD ELECTROSTATICS */
739             
740             /* Analytical PME correction */
741             zeta2            = _mm256_mul_ps(beta2,rsq20);
742             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
743             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
744             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
745             felec            = _mm256_mul_ps(qq20,felec);
746             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
747             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
748             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
749             velec            = _mm256_mul_ps(qq20,velec);
750             
751             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
752
753             /* Update potential sum for this i atom from the interaction with this j atom. */
754             velec            = _mm256_and_ps(velec,cutoff_mask);
755             velec            = _mm256_andnot_ps(dummy_mask,velec);
756             velecsum         = _mm256_add_ps(velecsum,velec);
757
758             fscal            = felec;
759
760             fscal            = _mm256_and_ps(fscal,cutoff_mask);
761
762             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
763
764             /* Calculate temporary vectorial force */
765             tx               = _mm256_mul_ps(fscal,dx20);
766             ty               = _mm256_mul_ps(fscal,dy20);
767             tz               = _mm256_mul_ps(fscal,dz20);
768
769             /* Update vectorial force */
770             fix2             = _mm256_add_ps(fix2,tx);
771             fiy2             = _mm256_add_ps(fiy2,ty);
772             fiz2             = _mm256_add_ps(fiz2,tz);
773
774             fjx0             = _mm256_add_ps(fjx0,tx);
775             fjy0             = _mm256_add_ps(fjy0,ty);
776             fjz0             = _mm256_add_ps(fjz0,tz);
777
778             }
779
780             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
781             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
782             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
783             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
784             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
785             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
786             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
787             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
788
789             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
790
791             /* Inner loop uses 369 flops */
792         }
793
794         /* End of innermost loop */
795
796         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
797                                                  f+i_coord_offset,fshift+i_shift_offset);
798
799         ggid                        = gid[iidx];
800         /* Update potential energies */
801         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
802         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
803
804         /* Increment number of inner iterations */
805         inneriter                  += j_index_end - j_index_start;
806
807         /* Outer loop uses 20 flops */
808     }
809
810     /* Increment number of outer iterations */
811     outeriter        += nri;
812
813     /* Update outer/inner flops */
814
815     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*369);
816 }
817 /*
818  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3P1_F_avx_256_single
819  * Electrostatics interaction: Ewald
820  * VdW interaction:            LJEwald
821  * Geometry:                   Water3-Particle
822  * Calculate force/pot:        Force
823  */
824 void
825 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW3P1_F_avx_256_single
826                     (t_nblist                    * gmx_restrict       nlist,
827                      rvec                        * gmx_restrict          xx,
828                      rvec                        * gmx_restrict          ff,
829                      t_forcerec                  * gmx_restrict          fr,
830                      t_mdatoms                   * gmx_restrict     mdatoms,
831                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
832                      t_nrnb                      * gmx_restrict        nrnb)
833 {
834     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
835      * just 0 for non-waters.
836      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
837      * jnr indices corresponding to data put in the four positions in the SIMD register.
838      */
839     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
840     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
841     int              jnrA,jnrB,jnrC,jnrD;
842     int              jnrE,jnrF,jnrG,jnrH;
843     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
844     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
845     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
846     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
847     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
848     real             rcutoff_scalar;
849     real             *shiftvec,*fshift,*x,*f;
850     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
851     real             scratch[4*DIM];
852     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
853     real *           vdwioffsetptr0;
854     real *           vdwgridioffsetptr0;
855     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
856     real *           vdwioffsetptr1;
857     real *           vdwgridioffsetptr1;
858     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
859     real *           vdwioffsetptr2;
860     real *           vdwgridioffsetptr2;
861     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
862     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
863     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
864     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
865     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
866     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
867     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
868     real             *charge;
869     int              nvdwtype;
870     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
871     int              *vdwtype;
872     real             *vdwparam;
873     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
874     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
875     __m256           c6grid_00;
876     __m256           c6grid_10;
877     __m256           c6grid_20;
878     real             *vdwgridparam;
879     __m256           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
880     __m256           one_half  = _mm256_set1_ps(0.5);
881     __m256           minus_one = _mm256_set1_ps(-1.0);
882     __m256i          ewitab;
883     __m128i          ewitab_lo,ewitab_hi;
884     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
885     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
886     real             *ewtab;
887     __m256           dummy_mask,cutoff_mask;
888     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
889     __m256           one     = _mm256_set1_ps(1.0);
890     __m256           two     = _mm256_set1_ps(2.0);
891     x                = xx[0];
892     f                = ff[0];
893
894     nri              = nlist->nri;
895     iinr             = nlist->iinr;
896     jindex           = nlist->jindex;
897     jjnr             = nlist->jjnr;
898     shiftidx         = nlist->shift;
899     gid              = nlist->gid;
900     shiftvec         = fr->shift_vec[0];
901     fshift           = fr->fshift[0];
902     facel            = _mm256_set1_ps(fr->epsfac);
903     charge           = mdatoms->chargeA;
904     nvdwtype         = fr->ntype;
905     vdwparam         = fr->nbfp;
906     vdwtype          = mdatoms->typeA;
907     vdwgridparam     = fr->ljpme_c6grid;
908     sh_lj_ewald      = _mm256_set1_ps(fr->ic->sh_lj_ewald);
909     ewclj            = _mm256_set1_ps(fr->ewaldcoeff_lj);
910     ewclj2           = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
911
912     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
913     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
914     beta2            = _mm256_mul_ps(beta,beta);
915     beta3            = _mm256_mul_ps(beta,beta2);
916
917     ewtab            = fr->ic->tabq_coul_F;
918     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
919     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
920
921     /* Setup water-specific parameters */
922     inr              = nlist->iinr[0];
923     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
924     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
925     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
926     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
927     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
928
929     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
930     rcutoff_scalar   = fr->rcoulomb;
931     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
932     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
933
934     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
935     rvdw             = _mm256_set1_ps(fr->rvdw);
936
937     /* Avoid stupid compiler warnings */
938     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
939     j_coord_offsetA = 0;
940     j_coord_offsetB = 0;
941     j_coord_offsetC = 0;
942     j_coord_offsetD = 0;
943     j_coord_offsetE = 0;
944     j_coord_offsetF = 0;
945     j_coord_offsetG = 0;
946     j_coord_offsetH = 0;
947
948     outeriter        = 0;
949     inneriter        = 0;
950
951     for(iidx=0;iidx<4*DIM;iidx++)
952     {
953         scratch[iidx] = 0.0;
954     }
955
956     /* Start outer loop over neighborlists */
957     for(iidx=0; iidx<nri; iidx++)
958     {
959         /* Load shift vector for this list */
960         i_shift_offset   = DIM*shiftidx[iidx];
961
962         /* Load limits for loop over neighbors */
963         j_index_start    = jindex[iidx];
964         j_index_end      = jindex[iidx+1];
965
966         /* Get outer coordinate index */
967         inr              = iinr[iidx];
968         i_coord_offset   = DIM*inr;
969
970         /* Load i particle coords and add shift vector */
971         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
972                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
973
974         fix0             = _mm256_setzero_ps();
975         fiy0             = _mm256_setzero_ps();
976         fiz0             = _mm256_setzero_ps();
977         fix1             = _mm256_setzero_ps();
978         fiy1             = _mm256_setzero_ps();
979         fiz1             = _mm256_setzero_ps();
980         fix2             = _mm256_setzero_ps();
981         fiy2             = _mm256_setzero_ps();
982         fiz2             = _mm256_setzero_ps();
983
984         /* Start inner kernel loop */
985         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
986         {
987
988             /* Get j neighbor index, and coordinate index */
989             jnrA             = jjnr[jidx];
990             jnrB             = jjnr[jidx+1];
991             jnrC             = jjnr[jidx+2];
992             jnrD             = jjnr[jidx+3];
993             jnrE             = jjnr[jidx+4];
994             jnrF             = jjnr[jidx+5];
995             jnrG             = jjnr[jidx+6];
996             jnrH             = jjnr[jidx+7];
997             j_coord_offsetA  = DIM*jnrA;
998             j_coord_offsetB  = DIM*jnrB;
999             j_coord_offsetC  = DIM*jnrC;
1000             j_coord_offsetD  = DIM*jnrD;
1001             j_coord_offsetE  = DIM*jnrE;
1002             j_coord_offsetF  = DIM*jnrF;
1003             j_coord_offsetG  = DIM*jnrG;
1004             j_coord_offsetH  = DIM*jnrH;
1005
1006             /* load j atom coordinates */
1007             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1008                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1009                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1010                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1011                                                  &jx0,&jy0,&jz0);
1012
1013             /* Calculate displacement vector */
1014             dx00             = _mm256_sub_ps(ix0,jx0);
1015             dy00             = _mm256_sub_ps(iy0,jy0);
1016             dz00             = _mm256_sub_ps(iz0,jz0);
1017             dx10             = _mm256_sub_ps(ix1,jx0);
1018             dy10             = _mm256_sub_ps(iy1,jy0);
1019             dz10             = _mm256_sub_ps(iz1,jz0);
1020             dx20             = _mm256_sub_ps(ix2,jx0);
1021             dy20             = _mm256_sub_ps(iy2,jy0);
1022             dz20             = _mm256_sub_ps(iz2,jz0);
1023
1024             /* Calculate squared distance and things based on it */
1025             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1026             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1027             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1028
1029             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1030             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1031             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1032
1033             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1034             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1035             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1036
1037             /* Load parameters for j particles */
1038             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
1039                                                                  charge+jnrC+0,charge+jnrD+0,
1040                                                                  charge+jnrE+0,charge+jnrF+0,
1041                                                                  charge+jnrG+0,charge+jnrH+0);
1042             vdwjidx0A        = 2*vdwtype[jnrA+0];
1043             vdwjidx0B        = 2*vdwtype[jnrB+0];
1044             vdwjidx0C        = 2*vdwtype[jnrC+0];
1045             vdwjidx0D        = 2*vdwtype[jnrD+0];
1046             vdwjidx0E        = 2*vdwtype[jnrE+0];
1047             vdwjidx0F        = 2*vdwtype[jnrF+0];
1048             vdwjidx0G        = 2*vdwtype[jnrG+0];
1049             vdwjidx0H        = 2*vdwtype[jnrH+0];
1050
1051             fjx0             = _mm256_setzero_ps();
1052             fjy0             = _mm256_setzero_ps();
1053             fjz0             = _mm256_setzero_ps();
1054
1055             /**************************
1056              * CALCULATE INTERACTIONS *
1057              **************************/
1058
1059             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1060             {
1061
1062             r00              = _mm256_mul_ps(rsq00,rinv00);
1063
1064             /* Compute parameters for interactions between i and j atoms */
1065             qq00             = _mm256_mul_ps(iq0,jq0);
1066             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
1067                                             vdwioffsetptr0+vdwjidx0B,
1068                                             vdwioffsetptr0+vdwjidx0C,
1069                                             vdwioffsetptr0+vdwjidx0D,
1070                                             vdwioffsetptr0+vdwjidx0E,
1071                                             vdwioffsetptr0+vdwjidx0F,
1072                                             vdwioffsetptr0+vdwjidx0G,
1073                                             vdwioffsetptr0+vdwjidx0H,
1074                                             &c6_00,&c12_00);
1075
1076             c6grid_00       = gmx_mm256_load_8real_swizzle_ps(vdwgridioffsetptr0+vdwjidx0A,
1077                                                                   vdwgridioffsetptr0+vdwjidx0B,
1078                                                                   vdwgridioffsetptr0+vdwjidx0C,
1079                                                                   vdwgridioffsetptr0+vdwjidx0D,
1080                                                                   vdwgridioffsetptr0+vdwjidx0E,
1081                                                                   vdwgridioffsetptr0+vdwjidx0F,
1082                                                                   vdwgridioffsetptr0+vdwjidx0G,
1083                                                                   vdwgridioffsetptr0+vdwjidx0H);
1084
1085             /* EWALD ELECTROSTATICS */
1086             
1087             /* Analytical PME correction */
1088             zeta2            = _mm256_mul_ps(beta2,rsq00);
1089             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1090             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1091             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1092             felec            = _mm256_mul_ps(qq00,felec);
1093             
1094             /* Analytical LJ-PME */
1095             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1096             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
1097             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1098             exponent         = gmx_simd_exp_r(ewcljrsq);
1099             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1100             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1101             /* f6A = 6 * C6grid * (1 - poly) */
1102             f6A              = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
1103             /* f6B = C6grid * exponent * beta^6 */
1104             f6B              = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
1105             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1106             fvdw              = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1107
1108             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1109
1110             fscal            = _mm256_add_ps(felec,fvdw);
1111
1112             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1113
1114             /* Calculate temporary vectorial force */
1115             tx               = _mm256_mul_ps(fscal,dx00);
1116             ty               = _mm256_mul_ps(fscal,dy00);
1117             tz               = _mm256_mul_ps(fscal,dz00);
1118
1119             /* Update vectorial force */
1120             fix0             = _mm256_add_ps(fix0,tx);
1121             fiy0             = _mm256_add_ps(fiy0,ty);
1122             fiz0             = _mm256_add_ps(fiz0,tz);
1123
1124             fjx0             = _mm256_add_ps(fjx0,tx);
1125             fjy0             = _mm256_add_ps(fjy0,ty);
1126             fjz0             = _mm256_add_ps(fjz0,tz);
1127
1128             }
1129
1130             /**************************
1131              * CALCULATE INTERACTIONS *
1132              **************************/
1133
1134             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1135             {
1136
1137             r10              = _mm256_mul_ps(rsq10,rinv10);
1138
1139             /* Compute parameters for interactions between i and j atoms */
1140             qq10             = _mm256_mul_ps(iq1,jq0);
1141
1142             /* EWALD ELECTROSTATICS */
1143             
1144             /* Analytical PME correction */
1145             zeta2            = _mm256_mul_ps(beta2,rsq10);
1146             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1147             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1148             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1149             felec            = _mm256_mul_ps(qq10,felec);
1150             
1151             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1152
1153             fscal            = felec;
1154
1155             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1156
1157             /* Calculate temporary vectorial force */
1158             tx               = _mm256_mul_ps(fscal,dx10);
1159             ty               = _mm256_mul_ps(fscal,dy10);
1160             tz               = _mm256_mul_ps(fscal,dz10);
1161
1162             /* Update vectorial force */
1163             fix1             = _mm256_add_ps(fix1,tx);
1164             fiy1             = _mm256_add_ps(fiy1,ty);
1165             fiz1             = _mm256_add_ps(fiz1,tz);
1166
1167             fjx0             = _mm256_add_ps(fjx0,tx);
1168             fjy0             = _mm256_add_ps(fjy0,ty);
1169             fjz0             = _mm256_add_ps(fjz0,tz);
1170
1171             }
1172
1173             /**************************
1174              * CALCULATE INTERACTIONS *
1175              **************************/
1176
1177             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1178             {
1179
1180             r20              = _mm256_mul_ps(rsq20,rinv20);
1181
1182             /* Compute parameters for interactions between i and j atoms */
1183             qq20             = _mm256_mul_ps(iq2,jq0);
1184
1185             /* EWALD ELECTROSTATICS */
1186             
1187             /* Analytical PME correction */
1188             zeta2            = _mm256_mul_ps(beta2,rsq20);
1189             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1190             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1191             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1192             felec            = _mm256_mul_ps(qq20,felec);
1193             
1194             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1195
1196             fscal            = felec;
1197
1198             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1199
1200             /* Calculate temporary vectorial force */
1201             tx               = _mm256_mul_ps(fscal,dx20);
1202             ty               = _mm256_mul_ps(fscal,dy20);
1203             tz               = _mm256_mul_ps(fscal,dz20);
1204
1205             /* Update vectorial force */
1206             fix2             = _mm256_add_ps(fix2,tx);
1207             fiy2             = _mm256_add_ps(fiy2,ty);
1208             fiz2             = _mm256_add_ps(fiz2,tz);
1209
1210             fjx0             = _mm256_add_ps(fjx0,tx);
1211             fjy0             = _mm256_add_ps(fjy0,ty);
1212             fjz0             = _mm256_add_ps(fjz0,tz);
1213
1214             }
1215
1216             fjptrA             = f+j_coord_offsetA;
1217             fjptrB             = f+j_coord_offsetB;
1218             fjptrC             = f+j_coord_offsetC;
1219             fjptrD             = f+j_coord_offsetD;
1220             fjptrE             = f+j_coord_offsetE;
1221             fjptrF             = f+j_coord_offsetF;
1222             fjptrG             = f+j_coord_offsetG;
1223             fjptrH             = f+j_coord_offsetH;
1224
1225             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1226
1227             /* Inner loop uses 203 flops */
1228         }
1229
1230         if(jidx<j_index_end)
1231         {
1232
1233             /* Get j neighbor index, and coordinate index */
1234             jnrlistA         = jjnr[jidx];
1235             jnrlistB         = jjnr[jidx+1];
1236             jnrlistC         = jjnr[jidx+2];
1237             jnrlistD         = jjnr[jidx+3];
1238             jnrlistE         = jjnr[jidx+4];
1239             jnrlistF         = jjnr[jidx+5];
1240             jnrlistG         = jjnr[jidx+6];
1241             jnrlistH         = jjnr[jidx+7];
1242             /* Sign of each element will be negative for non-real atoms.
1243              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1244              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
1245              */
1246             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
1247                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
1248                                             
1249             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1250             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1251             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1252             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1253             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
1254             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
1255             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
1256             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
1257             j_coord_offsetA  = DIM*jnrA;
1258             j_coord_offsetB  = DIM*jnrB;
1259             j_coord_offsetC  = DIM*jnrC;
1260             j_coord_offsetD  = DIM*jnrD;
1261             j_coord_offsetE  = DIM*jnrE;
1262             j_coord_offsetF  = DIM*jnrF;
1263             j_coord_offsetG  = DIM*jnrG;
1264             j_coord_offsetH  = DIM*jnrH;
1265
1266             /* load j atom coordinates */
1267             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1268                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1269                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1270                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1271                                                  &jx0,&jy0,&jz0);
1272
1273             /* Calculate displacement vector */
1274             dx00             = _mm256_sub_ps(ix0,jx0);
1275             dy00             = _mm256_sub_ps(iy0,jy0);
1276             dz00             = _mm256_sub_ps(iz0,jz0);
1277             dx10             = _mm256_sub_ps(ix1,jx0);
1278             dy10             = _mm256_sub_ps(iy1,jy0);
1279             dz10             = _mm256_sub_ps(iz1,jz0);
1280             dx20             = _mm256_sub_ps(ix2,jx0);
1281             dy20             = _mm256_sub_ps(iy2,jy0);
1282             dz20             = _mm256_sub_ps(iz2,jz0);
1283
1284             /* Calculate squared distance and things based on it */
1285             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1286             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1287             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1288
1289             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1290             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1291             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1292
1293             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1294             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1295             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1296
1297             /* Load parameters for j particles */
1298             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
1299                                                                  charge+jnrC+0,charge+jnrD+0,
1300                                                                  charge+jnrE+0,charge+jnrF+0,
1301                                                                  charge+jnrG+0,charge+jnrH+0);
1302             vdwjidx0A        = 2*vdwtype[jnrA+0];
1303             vdwjidx0B        = 2*vdwtype[jnrB+0];
1304             vdwjidx0C        = 2*vdwtype[jnrC+0];
1305             vdwjidx0D        = 2*vdwtype[jnrD+0];
1306             vdwjidx0E        = 2*vdwtype[jnrE+0];
1307             vdwjidx0F        = 2*vdwtype[jnrF+0];
1308             vdwjidx0G        = 2*vdwtype[jnrG+0];
1309             vdwjidx0H        = 2*vdwtype[jnrH+0];
1310
1311             fjx0             = _mm256_setzero_ps();
1312             fjy0             = _mm256_setzero_ps();
1313             fjz0             = _mm256_setzero_ps();
1314
1315             /**************************
1316              * CALCULATE INTERACTIONS *
1317              **************************/
1318
1319             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1320             {
1321
1322             r00              = _mm256_mul_ps(rsq00,rinv00);
1323             r00              = _mm256_andnot_ps(dummy_mask,r00);
1324
1325             /* Compute parameters for interactions between i and j atoms */
1326             qq00             = _mm256_mul_ps(iq0,jq0);
1327             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
1328                                             vdwioffsetptr0+vdwjidx0B,
1329                                             vdwioffsetptr0+vdwjidx0C,
1330                                             vdwioffsetptr0+vdwjidx0D,
1331                                             vdwioffsetptr0+vdwjidx0E,
1332                                             vdwioffsetptr0+vdwjidx0F,
1333                                             vdwioffsetptr0+vdwjidx0G,
1334                                             vdwioffsetptr0+vdwjidx0H,
1335                                             &c6_00,&c12_00);
1336
1337             c6grid_00       = gmx_mm256_load_8real_swizzle_ps(vdwgridioffsetptr0+vdwjidx0A,
1338                                                                   vdwgridioffsetptr0+vdwjidx0B,
1339                                                                   vdwgridioffsetptr0+vdwjidx0C,
1340                                                                   vdwgridioffsetptr0+vdwjidx0D,
1341                                                                   vdwgridioffsetptr0+vdwjidx0E,
1342                                                                   vdwgridioffsetptr0+vdwjidx0F,
1343                                                                   vdwgridioffsetptr0+vdwjidx0G,
1344                                                                   vdwgridioffsetptr0+vdwjidx0H);
1345
1346             /* EWALD ELECTROSTATICS */
1347             
1348             /* Analytical PME correction */
1349             zeta2            = _mm256_mul_ps(beta2,rsq00);
1350             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1351             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1352             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1353             felec            = _mm256_mul_ps(qq00,felec);
1354             
1355             /* Analytical LJ-PME */
1356             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1357             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
1358             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1359             exponent         = gmx_simd_exp_r(ewcljrsq);
1360             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1361             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1362             /* f6A = 6 * C6grid * (1 - poly) */
1363             f6A              = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
1364             /* f6B = C6grid * exponent * beta^6 */
1365             f6B              = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
1366             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1367             fvdw              = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1368
1369             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1370
1371             fscal            = _mm256_add_ps(felec,fvdw);
1372
1373             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1374
1375             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1376
1377             /* Calculate temporary vectorial force */
1378             tx               = _mm256_mul_ps(fscal,dx00);
1379             ty               = _mm256_mul_ps(fscal,dy00);
1380             tz               = _mm256_mul_ps(fscal,dz00);
1381
1382             /* Update vectorial force */
1383             fix0             = _mm256_add_ps(fix0,tx);
1384             fiy0             = _mm256_add_ps(fiy0,ty);
1385             fiz0             = _mm256_add_ps(fiz0,tz);
1386
1387             fjx0             = _mm256_add_ps(fjx0,tx);
1388             fjy0             = _mm256_add_ps(fjy0,ty);
1389             fjz0             = _mm256_add_ps(fjz0,tz);
1390
1391             }
1392
1393             /**************************
1394              * CALCULATE INTERACTIONS *
1395              **************************/
1396
1397             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1398             {
1399
1400             r10              = _mm256_mul_ps(rsq10,rinv10);
1401             r10              = _mm256_andnot_ps(dummy_mask,r10);
1402
1403             /* Compute parameters for interactions between i and j atoms */
1404             qq10             = _mm256_mul_ps(iq1,jq0);
1405
1406             /* EWALD ELECTROSTATICS */
1407             
1408             /* Analytical PME correction */
1409             zeta2            = _mm256_mul_ps(beta2,rsq10);
1410             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1411             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1412             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1413             felec            = _mm256_mul_ps(qq10,felec);
1414             
1415             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1416
1417             fscal            = felec;
1418
1419             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1420
1421             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1422
1423             /* Calculate temporary vectorial force */
1424             tx               = _mm256_mul_ps(fscal,dx10);
1425             ty               = _mm256_mul_ps(fscal,dy10);
1426             tz               = _mm256_mul_ps(fscal,dz10);
1427
1428             /* Update vectorial force */
1429             fix1             = _mm256_add_ps(fix1,tx);
1430             fiy1             = _mm256_add_ps(fiy1,ty);
1431             fiz1             = _mm256_add_ps(fiz1,tz);
1432
1433             fjx0             = _mm256_add_ps(fjx0,tx);
1434             fjy0             = _mm256_add_ps(fjy0,ty);
1435             fjz0             = _mm256_add_ps(fjz0,tz);
1436
1437             }
1438
1439             /**************************
1440              * CALCULATE INTERACTIONS *
1441              **************************/
1442
1443             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1444             {
1445
1446             r20              = _mm256_mul_ps(rsq20,rinv20);
1447             r20              = _mm256_andnot_ps(dummy_mask,r20);
1448
1449             /* Compute parameters for interactions between i and j atoms */
1450             qq20             = _mm256_mul_ps(iq2,jq0);
1451
1452             /* EWALD ELECTROSTATICS */
1453             
1454             /* Analytical PME correction */
1455             zeta2            = _mm256_mul_ps(beta2,rsq20);
1456             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1457             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1458             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1459             felec            = _mm256_mul_ps(qq20,felec);
1460             
1461             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1462
1463             fscal            = felec;
1464
1465             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1466
1467             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1468
1469             /* Calculate temporary vectorial force */
1470             tx               = _mm256_mul_ps(fscal,dx20);
1471             ty               = _mm256_mul_ps(fscal,dy20);
1472             tz               = _mm256_mul_ps(fscal,dz20);
1473
1474             /* Update vectorial force */
1475             fix2             = _mm256_add_ps(fix2,tx);
1476             fiy2             = _mm256_add_ps(fiy2,ty);
1477             fiz2             = _mm256_add_ps(fiz2,tz);
1478
1479             fjx0             = _mm256_add_ps(fjx0,tx);
1480             fjy0             = _mm256_add_ps(fjy0,ty);
1481             fjz0             = _mm256_add_ps(fjz0,tz);
1482
1483             }
1484
1485             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1486             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1487             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1488             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1489             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1490             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1491             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1492             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1493
1494             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1495
1496             /* Inner loop uses 206 flops */
1497         }
1498
1499         /* End of innermost loop */
1500
1501         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1502                                                  f+i_coord_offset,fshift+i_shift_offset);
1503
1504         /* Increment number of inner iterations */
1505         inneriter                  += j_index_end - j_index_start;
1506
1507         /* Outer loop uses 18 flops */
1508     }
1509
1510     /* Increment number of outer iterations */
1511     outeriter        += nri;
1512
1513     /* Update outer/inner flops */
1514
1515     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*206);
1516 }