95d1257dbfecd1f5ee8c4ae8eb1eef1ea599ca43
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4P1_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "config.h"
39
40 #include <math.h>
41
42 #include "../nb_kernel.h"
43 #include "types/simple.h"
44 #include "gromacs/math/vec.h"
45 #include "nrnb.h"
46
47 #include "gromacs/simd/math_x86_avx_256_single.h"
48 #include "kernelutil_x86_avx_256_single.h"
49
50 /*
51  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4P1_VF_avx_256_single
52  * Electrostatics interaction: Ewald
53  * VdW interaction:            LJEwald
54  * Geometry:                   Water4-Particle
55  * Calculate force/pot:        PotentialAndForce
56  */
57 void
58 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4P1_VF_avx_256_single
59                     (t_nblist                    * gmx_restrict       nlist,
60                      rvec                        * gmx_restrict          xx,
61                      rvec                        * gmx_restrict          ff,
62                      t_forcerec                  * gmx_restrict          fr,
63                      t_mdatoms                   * gmx_restrict     mdatoms,
64                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65                      t_nrnb                      * gmx_restrict        nrnb)
66 {
67     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
68      * just 0 for non-waters.
69      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
70      * jnr indices corresponding to data put in the four positions in the SIMD register.
71      */
72     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
73     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74     int              jnrA,jnrB,jnrC,jnrD;
75     int              jnrE,jnrF,jnrG,jnrH;
76     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
77     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
78     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
79     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
80     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
81     real             rcutoff_scalar;
82     real             *shiftvec,*fshift,*x,*f;
83     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
84     real             scratch[4*DIM];
85     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86     real *           vdwioffsetptr0;
87     real *           vdwgridioffsetptr0;
88     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
89     real *           vdwioffsetptr1;
90     real *           vdwgridioffsetptr1;
91     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
92     real *           vdwioffsetptr2;
93     real *           vdwgridioffsetptr2;
94     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
95     real *           vdwioffsetptr3;
96     real *           vdwgridioffsetptr3;
97     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
98     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
99     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
100     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
101     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
102     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
103     __m256           dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
104     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
105     real             *charge;
106     int              nvdwtype;
107     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
108     int              *vdwtype;
109     real             *vdwparam;
110     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
111     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
112     __m256           c6grid_00;
113     __m256           c6grid_10;
114     __m256           c6grid_20;
115     __m256           c6grid_30;
116     real             *vdwgridparam;
117     __m256           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
118     __m256           one_half  = _mm256_set1_ps(0.5);
119     __m256           minus_one = _mm256_set1_ps(-1.0);
120     __m256i          ewitab;
121     __m128i          ewitab_lo,ewitab_hi;
122     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
123     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
124     real             *ewtab;
125     __m256           dummy_mask,cutoff_mask;
126     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
127     __m256           one     = _mm256_set1_ps(1.0);
128     __m256           two     = _mm256_set1_ps(2.0);
129     x                = xx[0];
130     f                = ff[0];
131
132     nri              = nlist->nri;
133     iinr             = nlist->iinr;
134     jindex           = nlist->jindex;
135     jjnr             = nlist->jjnr;
136     shiftidx         = nlist->shift;
137     gid              = nlist->gid;
138     shiftvec         = fr->shift_vec[0];
139     fshift           = fr->fshift[0];
140     facel            = _mm256_set1_ps(fr->epsfac);
141     charge           = mdatoms->chargeA;
142     nvdwtype         = fr->ntype;
143     vdwparam         = fr->nbfp;
144     vdwtype          = mdatoms->typeA;
145     vdwgridparam     = fr->ljpme_c6grid;
146     sh_lj_ewald      = _mm256_set1_ps(fr->ic->sh_lj_ewald);
147     ewclj            = _mm256_set1_ps(fr->ewaldcoeff_lj);
148     ewclj2           = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
149
150     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
151     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
152     beta2            = _mm256_mul_ps(beta,beta);
153     beta3            = _mm256_mul_ps(beta,beta2);
154
155     ewtab            = fr->ic->tabq_coul_FDV0;
156     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
157     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
158
159     /* Setup water-specific parameters */
160     inr              = nlist->iinr[0];
161     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
162     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
163     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
164     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
165     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
166
167     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
168     rcutoff_scalar   = fr->rcoulomb;
169     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
170     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
171
172     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
173     rvdw             = _mm256_set1_ps(fr->rvdw);
174
175     /* Avoid stupid compiler warnings */
176     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
177     j_coord_offsetA = 0;
178     j_coord_offsetB = 0;
179     j_coord_offsetC = 0;
180     j_coord_offsetD = 0;
181     j_coord_offsetE = 0;
182     j_coord_offsetF = 0;
183     j_coord_offsetG = 0;
184     j_coord_offsetH = 0;
185
186     outeriter        = 0;
187     inneriter        = 0;
188
189     for(iidx=0;iidx<4*DIM;iidx++)
190     {
191         scratch[iidx] = 0.0;
192     }
193
194     /* Start outer loop over neighborlists */
195     for(iidx=0; iidx<nri; iidx++)
196     {
197         /* Load shift vector for this list */
198         i_shift_offset   = DIM*shiftidx[iidx];
199
200         /* Load limits for loop over neighbors */
201         j_index_start    = jindex[iidx];
202         j_index_end      = jindex[iidx+1];
203
204         /* Get outer coordinate index */
205         inr              = iinr[iidx];
206         i_coord_offset   = DIM*inr;
207
208         /* Load i particle coords and add shift vector */
209         gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
210                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
211
212         fix0             = _mm256_setzero_ps();
213         fiy0             = _mm256_setzero_ps();
214         fiz0             = _mm256_setzero_ps();
215         fix1             = _mm256_setzero_ps();
216         fiy1             = _mm256_setzero_ps();
217         fiz1             = _mm256_setzero_ps();
218         fix2             = _mm256_setzero_ps();
219         fiy2             = _mm256_setzero_ps();
220         fiz2             = _mm256_setzero_ps();
221         fix3             = _mm256_setzero_ps();
222         fiy3             = _mm256_setzero_ps();
223         fiz3             = _mm256_setzero_ps();
224
225         /* Reset potential sums */
226         velecsum         = _mm256_setzero_ps();
227         vvdwsum          = _mm256_setzero_ps();
228
229         /* Start inner kernel loop */
230         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
231         {
232
233             /* Get j neighbor index, and coordinate index */
234             jnrA             = jjnr[jidx];
235             jnrB             = jjnr[jidx+1];
236             jnrC             = jjnr[jidx+2];
237             jnrD             = jjnr[jidx+3];
238             jnrE             = jjnr[jidx+4];
239             jnrF             = jjnr[jidx+5];
240             jnrG             = jjnr[jidx+6];
241             jnrH             = jjnr[jidx+7];
242             j_coord_offsetA  = DIM*jnrA;
243             j_coord_offsetB  = DIM*jnrB;
244             j_coord_offsetC  = DIM*jnrC;
245             j_coord_offsetD  = DIM*jnrD;
246             j_coord_offsetE  = DIM*jnrE;
247             j_coord_offsetF  = DIM*jnrF;
248             j_coord_offsetG  = DIM*jnrG;
249             j_coord_offsetH  = DIM*jnrH;
250
251             /* load j atom coordinates */
252             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
253                                                  x+j_coord_offsetC,x+j_coord_offsetD,
254                                                  x+j_coord_offsetE,x+j_coord_offsetF,
255                                                  x+j_coord_offsetG,x+j_coord_offsetH,
256                                                  &jx0,&jy0,&jz0);
257
258             /* Calculate displacement vector */
259             dx00             = _mm256_sub_ps(ix0,jx0);
260             dy00             = _mm256_sub_ps(iy0,jy0);
261             dz00             = _mm256_sub_ps(iz0,jz0);
262             dx10             = _mm256_sub_ps(ix1,jx0);
263             dy10             = _mm256_sub_ps(iy1,jy0);
264             dz10             = _mm256_sub_ps(iz1,jz0);
265             dx20             = _mm256_sub_ps(ix2,jx0);
266             dy20             = _mm256_sub_ps(iy2,jy0);
267             dz20             = _mm256_sub_ps(iz2,jz0);
268             dx30             = _mm256_sub_ps(ix3,jx0);
269             dy30             = _mm256_sub_ps(iy3,jy0);
270             dz30             = _mm256_sub_ps(iz3,jz0);
271
272             /* Calculate squared distance and things based on it */
273             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
274             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
275             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
276             rsq30            = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
277
278             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
279             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
280             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
281             rinv30           = gmx_mm256_invsqrt_ps(rsq30);
282
283             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
284             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
285             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
286             rinvsq30         = _mm256_mul_ps(rinv30,rinv30);
287
288             /* Load parameters for j particles */
289             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
290                                                                  charge+jnrC+0,charge+jnrD+0,
291                                                                  charge+jnrE+0,charge+jnrF+0,
292                                                                  charge+jnrG+0,charge+jnrH+0);
293             vdwjidx0A        = 2*vdwtype[jnrA+0];
294             vdwjidx0B        = 2*vdwtype[jnrB+0];
295             vdwjidx0C        = 2*vdwtype[jnrC+0];
296             vdwjidx0D        = 2*vdwtype[jnrD+0];
297             vdwjidx0E        = 2*vdwtype[jnrE+0];
298             vdwjidx0F        = 2*vdwtype[jnrF+0];
299             vdwjidx0G        = 2*vdwtype[jnrG+0];
300             vdwjidx0H        = 2*vdwtype[jnrH+0];
301
302             fjx0             = _mm256_setzero_ps();
303             fjy0             = _mm256_setzero_ps();
304             fjz0             = _mm256_setzero_ps();
305
306             /**************************
307              * CALCULATE INTERACTIONS *
308              **************************/
309
310             if (gmx_mm256_any_lt(rsq00,rcutoff2))
311             {
312
313             r00              = _mm256_mul_ps(rsq00,rinv00);
314
315             /* Compute parameters for interactions between i and j atoms */
316             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
317                                             vdwioffsetptr0+vdwjidx0B,
318                                             vdwioffsetptr0+vdwjidx0C,
319                                             vdwioffsetptr0+vdwjidx0D,
320                                             vdwioffsetptr0+vdwjidx0E,
321                                             vdwioffsetptr0+vdwjidx0F,
322                                             vdwioffsetptr0+vdwjidx0G,
323                                             vdwioffsetptr0+vdwjidx0H,
324                                             &c6_00,&c12_00);
325
326             c6grid_00       = gmx_mm256_load_8real_swizzle_ps(vdwgridioffsetptr0+vdwjidx0A,
327                                                                   vdwgridioffsetptr0+vdwjidx0B,
328                                                                   vdwgridioffsetptr0+vdwjidx0C,
329                                                                   vdwgridioffsetptr0+vdwjidx0D,
330                                                                   vdwgridioffsetptr0+vdwjidx0E,
331                                                                   vdwgridioffsetptr0+vdwjidx0F,
332                                                                   vdwgridioffsetptr0+vdwjidx0G,
333                                                                   vdwgridioffsetptr0+vdwjidx0H);
334
335             /* Analytical LJ-PME */
336             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
337             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
338             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
339             exponent         = gmx_simd_exp_r(ewcljrsq);
340             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
341             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
342             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
343             vvdw6            = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
344             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
345             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
346                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
347             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
348             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
349
350             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
351
352             /* Update potential sum for this i atom from the interaction with this j atom. */
353             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
354             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
355
356             fscal            = fvdw;
357
358             fscal            = _mm256_and_ps(fscal,cutoff_mask);
359
360             /* Calculate temporary vectorial force */
361             tx               = _mm256_mul_ps(fscal,dx00);
362             ty               = _mm256_mul_ps(fscal,dy00);
363             tz               = _mm256_mul_ps(fscal,dz00);
364
365             /* Update vectorial force */
366             fix0             = _mm256_add_ps(fix0,tx);
367             fiy0             = _mm256_add_ps(fiy0,ty);
368             fiz0             = _mm256_add_ps(fiz0,tz);
369
370             fjx0             = _mm256_add_ps(fjx0,tx);
371             fjy0             = _mm256_add_ps(fjy0,ty);
372             fjz0             = _mm256_add_ps(fjz0,tz);
373
374             }
375
376             /**************************
377              * CALCULATE INTERACTIONS *
378              **************************/
379
380             if (gmx_mm256_any_lt(rsq10,rcutoff2))
381             {
382
383             r10              = _mm256_mul_ps(rsq10,rinv10);
384
385             /* Compute parameters for interactions between i and j atoms */
386             qq10             = _mm256_mul_ps(iq1,jq0);
387
388             /* EWALD ELECTROSTATICS */
389             
390             /* Analytical PME correction */
391             zeta2            = _mm256_mul_ps(beta2,rsq10);
392             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
393             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
394             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
395             felec            = _mm256_mul_ps(qq10,felec);
396             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
397             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
398             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
399             velec            = _mm256_mul_ps(qq10,velec);
400             
401             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
402
403             /* Update potential sum for this i atom from the interaction with this j atom. */
404             velec            = _mm256_and_ps(velec,cutoff_mask);
405             velecsum         = _mm256_add_ps(velecsum,velec);
406
407             fscal            = felec;
408
409             fscal            = _mm256_and_ps(fscal,cutoff_mask);
410
411             /* Calculate temporary vectorial force */
412             tx               = _mm256_mul_ps(fscal,dx10);
413             ty               = _mm256_mul_ps(fscal,dy10);
414             tz               = _mm256_mul_ps(fscal,dz10);
415
416             /* Update vectorial force */
417             fix1             = _mm256_add_ps(fix1,tx);
418             fiy1             = _mm256_add_ps(fiy1,ty);
419             fiz1             = _mm256_add_ps(fiz1,tz);
420
421             fjx0             = _mm256_add_ps(fjx0,tx);
422             fjy0             = _mm256_add_ps(fjy0,ty);
423             fjz0             = _mm256_add_ps(fjz0,tz);
424
425             }
426
427             /**************************
428              * CALCULATE INTERACTIONS *
429              **************************/
430
431             if (gmx_mm256_any_lt(rsq20,rcutoff2))
432             {
433
434             r20              = _mm256_mul_ps(rsq20,rinv20);
435
436             /* Compute parameters for interactions between i and j atoms */
437             qq20             = _mm256_mul_ps(iq2,jq0);
438
439             /* EWALD ELECTROSTATICS */
440             
441             /* Analytical PME correction */
442             zeta2            = _mm256_mul_ps(beta2,rsq20);
443             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
444             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
445             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
446             felec            = _mm256_mul_ps(qq20,felec);
447             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
448             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
449             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
450             velec            = _mm256_mul_ps(qq20,velec);
451             
452             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
453
454             /* Update potential sum for this i atom from the interaction with this j atom. */
455             velec            = _mm256_and_ps(velec,cutoff_mask);
456             velecsum         = _mm256_add_ps(velecsum,velec);
457
458             fscal            = felec;
459
460             fscal            = _mm256_and_ps(fscal,cutoff_mask);
461
462             /* Calculate temporary vectorial force */
463             tx               = _mm256_mul_ps(fscal,dx20);
464             ty               = _mm256_mul_ps(fscal,dy20);
465             tz               = _mm256_mul_ps(fscal,dz20);
466
467             /* Update vectorial force */
468             fix2             = _mm256_add_ps(fix2,tx);
469             fiy2             = _mm256_add_ps(fiy2,ty);
470             fiz2             = _mm256_add_ps(fiz2,tz);
471
472             fjx0             = _mm256_add_ps(fjx0,tx);
473             fjy0             = _mm256_add_ps(fjy0,ty);
474             fjz0             = _mm256_add_ps(fjz0,tz);
475
476             }
477
478             /**************************
479              * CALCULATE INTERACTIONS *
480              **************************/
481
482             if (gmx_mm256_any_lt(rsq30,rcutoff2))
483             {
484
485             r30              = _mm256_mul_ps(rsq30,rinv30);
486
487             /* Compute parameters for interactions between i and j atoms */
488             qq30             = _mm256_mul_ps(iq3,jq0);
489
490             /* EWALD ELECTROSTATICS */
491             
492             /* Analytical PME correction */
493             zeta2            = _mm256_mul_ps(beta2,rsq30);
494             rinv3            = _mm256_mul_ps(rinvsq30,rinv30);
495             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
496             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
497             felec            = _mm256_mul_ps(qq30,felec);
498             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
499             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
500             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv30,sh_ewald),pmecorrV);
501             velec            = _mm256_mul_ps(qq30,velec);
502             
503             cutoff_mask      = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
504
505             /* Update potential sum for this i atom from the interaction with this j atom. */
506             velec            = _mm256_and_ps(velec,cutoff_mask);
507             velecsum         = _mm256_add_ps(velecsum,velec);
508
509             fscal            = felec;
510
511             fscal            = _mm256_and_ps(fscal,cutoff_mask);
512
513             /* Calculate temporary vectorial force */
514             tx               = _mm256_mul_ps(fscal,dx30);
515             ty               = _mm256_mul_ps(fscal,dy30);
516             tz               = _mm256_mul_ps(fscal,dz30);
517
518             /* Update vectorial force */
519             fix3             = _mm256_add_ps(fix3,tx);
520             fiy3             = _mm256_add_ps(fiy3,ty);
521             fiz3             = _mm256_add_ps(fiz3,tz);
522
523             fjx0             = _mm256_add_ps(fjx0,tx);
524             fjy0             = _mm256_add_ps(fjy0,ty);
525             fjz0             = _mm256_add_ps(fjz0,tz);
526
527             }
528
529             fjptrA             = f+j_coord_offsetA;
530             fjptrB             = f+j_coord_offsetB;
531             fjptrC             = f+j_coord_offsetC;
532             fjptrD             = f+j_coord_offsetD;
533             fjptrE             = f+j_coord_offsetE;
534             fjptrF             = f+j_coord_offsetF;
535             fjptrG             = f+j_coord_offsetG;
536             fjptrH             = f+j_coord_offsetH;
537
538             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
539
540             /* Inner loop uses 392 flops */
541         }
542
543         if(jidx<j_index_end)
544         {
545
546             /* Get j neighbor index, and coordinate index */
547             jnrlistA         = jjnr[jidx];
548             jnrlistB         = jjnr[jidx+1];
549             jnrlistC         = jjnr[jidx+2];
550             jnrlistD         = jjnr[jidx+3];
551             jnrlistE         = jjnr[jidx+4];
552             jnrlistF         = jjnr[jidx+5];
553             jnrlistG         = jjnr[jidx+6];
554             jnrlistH         = jjnr[jidx+7];
555             /* Sign of each element will be negative for non-real atoms.
556              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
557              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
558              */
559             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
560                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
561                                             
562             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
563             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
564             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
565             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
566             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
567             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
568             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
569             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
570             j_coord_offsetA  = DIM*jnrA;
571             j_coord_offsetB  = DIM*jnrB;
572             j_coord_offsetC  = DIM*jnrC;
573             j_coord_offsetD  = DIM*jnrD;
574             j_coord_offsetE  = DIM*jnrE;
575             j_coord_offsetF  = DIM*jnrF;
576             j_coord_offsetG  = DIM*jnrG;
577             j_coord_offsetH  = DIM*jnrH;
578
579             /* load j atom coordinates */
580             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
581                                                  x+j_coord_offsetC,x+j_coord_offsetD,
582                                                  x+j_coord_offsetE,x+j_coord_offsetF,
583                                                  x+j_coord_offsetG,x+j_coord_offsetH,
584                                                  &jx0,&jy0,&jz0);
585
586             /* Calculate displacement vector */
587             dx00             = _mm256_sub_ps(ix0,jx0);
588             dy00             = _mm256_sub_ps(iy0,jy0);
589             dz00             = _mm256_sub_ps(iz0,jz0);
590             dx10             = _mm256_sub_ps(ix1,jx0);
591             dy10             = _mm256_sub_ps(iy1,jy0);
592             dz10             = _mm256_sub_ps(iz1,jz0);
593             dx20             = _mm256_sub_ps(ix2,jx0);
594             dy20             = _mm256_sub_ps(iy2,jy0);
595             dz20             = _mm256_sub_ps(iz2,jz0);
596             dx30             = _mm256_sub_ps(ix3,jx0);
597             dy30             = _mm256_sub_ps(iy3,jy0);
598             dz30             = _mm256_sub_ps(iz3,jz0);
599
600             /* Calculate squared distance and things based on it */
601             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
602             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
603             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
604             rsq30            = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
605
606             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
607             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
608             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
609             rinv30           = gmx_mm256_invsqrt_ps(rsq30);
610
611             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
612             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
613             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
614             rinvsq30         = _mm256_mul_ps(rinv30,rinv30);
615
616             /* Load parameters for j particles */
617             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
618                                                                  charge+jnrC+0,charge+jnrD+0,
619                                                                  charge+jnrE+0,charge+jnrF+0,
620                                                                  charge+jnrG+0,charge+jnrH+0);
621             vdwjidx0A        = 2*vdwtype[jnrA+0];
622             vdwjidx0B        = 2*vdwtype[jnrB+0];
623             vdwjidx0C        = 2*vdwtype[jnrC+0];
624             vdwjidx0D        = 2*vdwtype[jnrD+0];
625             vdwjidx0E        = 2*vdwtype[jnrE+0];
626             vdwjidx0F        = 2*vdwtype[jnrF+0];
627             vdwjidx0G        = 2*vdwtype[jnrG+0];
628             vdwjidx0H        = 2*vdwtype[jnrH+0];
629
630             fjx0             = _mm256_setzero_ps();
631             fjy0             = _mm256_setzero_ps();
632             fjz0             = _mm256_setzero_ps();
633
634             /**************************
635              * CALCULATE INTERACTIONS *
636              **************************/
637
638             if (gmx_mm256_any_lt(rsq00,rcutoff2))
639             {
640
641             r00              = _mm256_mul_ps(rsq00,rinv00);
642             r00              = _mm256_andnot_ps(dummy_mask,r00);
643
644             /* Compute parameters for interactions between i and j atoms */
645             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
646                                             vdwioffsetptr0+vdwjidx0B,
647                                             vdwioffsetptr0+vdwjidx0C,
648                                             vdwioffsetptr0+vdwjidx0D,
649                                             vdwioffsetptr0+vdwjidx0E,
650                                             vdwioffsetptr0+vdwjidx0F,
651                                             vdwioffsetptr0+vdwjidx0G,
652                                             vdwioffsetptr0+vdwjidx0H,
653                                             &c6_00,&c12_00);
654
655             c6grid_00       = gmx_mm256_load_8real_swizzle_ps(vdwgridioffsetptr0+vdwjidx0A,
656                                                                   vdwgridioffsetptr0+vdwjidx0B,
657                                                                   vdwgridioffsetptr0+vdwjidx0C,
658                                                                   vdwgridioffsetptr0+vdwjidx0D,
659                                                                   vdwgridioffsetptr0+vdwjidx0E,
660                                                                   vdwgridioffsetptr0+vdwjidx0F,
661                                                                   vdwgridioffsetptr0+vdwjidx0G,
662                                                                   vdwgridioffsetptr0+vdwjidx0H);
663
664             /* Analytical LJ-PME */
665             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
666             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
667             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
668             exponent         = gmx_simd_exp_r(ewcljrsq);
669             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
670             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
671             /* vvdw6 = [C6 - C6grid * (1-poly)]/r6 */
672             vvdw6            = _mm256_mul_ps(_mm256_sub_ps(c6_00,_mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly))),rinvsix);
673             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
674             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
675                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_add_ps(_mm256_mul_ps(c6_00,sh_vdw_invrcut6),_mm256_mul_ps(c6grid_00,sh_lj_ewald))),one_sixth));
676             /* fvdw = vvdw12/r - (vvdw6/r + (C6grid * exponent * beta^6)/r) */
677             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,_mm256_sub_ps(vvdw6,_mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6)))),rinvsq00);
678
679             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
680
681             /* Update potential sum for this i atom from the interaction with this j atom. */
682             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
683             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
684             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
685
686             fscal            = fvdw;
687
688             fscal            = _mm256_and_ps(fscal,cutoff_mask);
689
690             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
691
692             /* Calculate temporary vectorial force */
693             tx               = _mm256_mul_ps(fscal,dx00);
694             ty               = _mm256_mul_ps(fscal,dy00);
695             tz               = _mm256_mul_ps(fscal,dz00);
696
697             /* Update vectorial force */
698             fix0             = _mm256_add_ps(fix0,tx);
699             fiy0             = _mm256_add_ps(fiy0,ty);
700             fiz0             = _mm256_add_ps(fiz0,tz);
701
702             fjx0             = _mm256_add_ps(fjx0,tx);
703             fjy0             = _mm256_add_ps(fjy0,ty);
704             fjz0             = _mm256_add_ps(fjz0,tz);
705
706             }
707
708             /**************************
709              * CALCULATE INTERACTIONS *
710              **************************/
711
712             if (gmx_mm256_any_lt(rsq10,rcutoff2))
713             {
714
715             r10              = _mm256_mul_ps(rsq10,rinv10);
716             r10              = _mm256_andnot_ps(dummy_mask,r10);
717
718             /* Compute parameters for interactions between i and j atoms */
719             qq10             = _mm256_mul_ps(iq1,jq0);
720
721             /* EWALD ELECTROSTATICS */
722             
723             /* Analytical PME correction */
724             zeta2            = _mm256_mul_ps(beta2,rsq10);
725             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
726             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
727             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
728             felec            = _mm256_mul_ps(qq10,felec);
729             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
730             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
731             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
732             velec            = _mm256_mul_ps(qq10,velec);
733             
734             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
735
736             /* Update potential sum for this i atom from the interaction with this j atom. */
737             velec            = _mm256_and_ps(velec,cutoff_mask);
738             velec            = _mm256_andnot_ps(dummy_mask,velec);
739             velecsum         = _mm256_add_ps(velecsum,velec);
740
741             fscal            = felec;
742
743             fscal            = _mm256_and_ps(fscal,cutoff_mask);
744
745             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
746
747             /* Calculate temporary vectorial force */
748             tx               = _mm256_mul_ps(fscal,dx10);
749             ty               = _mm256_mul_ps(fscal,dy10);
750             tz               = _mm256_mul_ps(fscal,dz10);
751
752             /* Update vectorial force */
753             fix1             = _mm256_add_ps(fix1,tx);
754             fiy1             = _mm256_add_ps(fiy1,ty);
755             fiz1             = _mm256_add_ps(fiz1,tz);
756
757             fjx0             = _mm256_add_ps(fjx0,tx);
758             fjy0             = _mm256_add_ps(fjy0,ty);
759             fjz0             = _mm256_add_ps(fjz0,tz);
760
761             }
762
763             /**************************
764              * CALCULATE INTERACTIONS *
765              **************************/
766
767             if (gmx_mm256_any_lt(rsq20,rcutoff2))
768             {
769
770             r20              = _mm256_mul_ps(rsq20,rinv20);
771             r20              = _mm256_andnot_ps(dummy_mask,r20);
772
773             /* Compute parameters for interactions between i and j atoms */
774             qq20             = _mm256_mul_ps(iq2,jq0);
775
776             /* EWALD ELECTROSTATICS */
777             
778             /* Analytical PME correction */
779             zeta2            = _mm256_mul_ps(beta2,rsq20);
780             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
781             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
782             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
783             felec            = _mm256_mul_ps(qq20,felec);
784             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
785             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
786             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
787             velec            = _mm256_mul_ps(qq20,velec);
788             
789             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
790
791             /* Update potential sum for this i atom from the interaction with this j atom. */
792             velec            = _mm256_and_ps(velec,cutoff_mask);
793             velec            = _mm256_andnot_ps(dummy_mask,velec);
794             velecsum         = _mm256_add_ps(velecsum,velec);
795
796             fscal            = felec;
797
798             fscal            = _mm256_and_ps(fscal,cutoff_mask);
799
800             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
801
802             /* Calculate temporary vectorial force */
803             tx               = _mm256_mul_ps(fscal,dx20);
804             ty               = _mm256_mul_ps(fscal,dy20);
805             tz               = _mm256_mul_ps(fscal,dz20);
806
807             /* Update vectorial force */
808             fix2             = _mm256_add_ps(fix2,tx);
809             fiy2             = _mm256_add_ps(fiy2,ty);
810             fiz2             = _mm256_add_ps(fiz2,tz);
811
812             fjx0             = _mm256_add_ps(fjx0,tx);
813             fjy0             = _mm256_add_ps(fjy0,ty);
814             fjz0             = _mm256_add_ps(fjz0,tz);
815
816             }
817
818             /**************************
819              * CALCULATE INTERACTIONS *
820              **************************/
821
822             if (gmx_mm256_any_lt(rsq30,rcutoff2))
823             {
824
825             r30              = _mm256_mul_ps(rsq30,rinv30);
826             r30              = _mm256_andnot_ps(dummy_mask,r30);
827
828             /* Compute parameters for interactions between i and j atoms */
829             qq30             = _mm256_mul_ps(iq3,jq0);
830
831             /* EWALD ELECTROSTATICS */
832             
833             /* Analytical PME correction */
834             zeta2            = _mm256_mul_ps(beta2,rsq30);
835             rinv3            = _mm256_mul_ps(rinvsq30,rinv30);
836             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
837             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
838             felec            = _mm256_mul_ps(qq30,felec);
839             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
840             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
841             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv30,sh_ewald),pmecorrV);
842             velec            = _mm256_mul_ps(qq30,velec);
843             
844             cutoff_mask      = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
845
846             /* Update potential sum for this i atom from the interaction with this j atom. */
847             velec            = _mm256_and_ps(velec,cutoff_mask);
848             velec            = _mm256_andnot_ps(dummy_mask,velec);
849             velecsum         = _mm256_add_ps(velecsum,velec);
850
851             fscal            = felec;
852
853             fscal            = _mm256_and_ps(fscal,cutoff_mask);
854
855             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
856
857             /* Calculate temporary vectorial force */
858             tx               = _mm256_mul_ps(fscal,dx30);
859             ty               = _mm256_mul_ps(fscal,dy30);
860             tz               = _mm256_mul_ps(fscal,dz30);
861
862             /* Update vectorial force */
863             fix3             = _mm256_add_ps(fix3,tx);
864             fiy3             = _mm256_add_ps(fiy3,ty);
865             fiz3             = _mm256_add_ps(fiz3,tz);
866
867             fjx0             = _mm256_add_ps(fjx0,tx);
868             fjy0             = _mm256_add_ps(fjy0,ty);
869             fjz0             = _mm256_add_ps(fjz0,tz);
870
871             }
872
873             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
874             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
875             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
876             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
877             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
878             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
879             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
880             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
881
882             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
883
884             /* Inner loop uses 396 flops */
885         }
886
887         /* End of innermost loop */
888
889         gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
890                                                  f+i_coord_offset,fshift+i_shift_offset);
891
892         ggid                        = gid[iidx];
893         /* Update potential energies */
894         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
895         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
896
897         /* Increment number of inner iterations */
898         inneriter                  += j_index_end - j_index_start;
899
900         /* Outer loop uses 26 flops */
901     }
902
903     /* Increment number of outer iterations */
904     outeriter        += nri;
905
906     /* Update outer/inner flops */
907
908     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_VF,outeriter*26 + inneriter*396);
909 }
910 /*
911  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4P1_F_avx_256_single
912  * Electrostatics interaction: Ewald
913  * VdW interaction:            LJEwald
914  * Geometry:                   Water4-Particle
915  * Calculate force/pot:        Force
916  */
917 void
918 nb_kernel_ElecEwSh_VdwLJEwSh_GeomW4P1_F_avx_256_single
919                     (t_nblist                    * gmx_restrict       nlist,
920                      rvec                        * gmx_restrict          xx,
921                      rvec                        * gmx_restrict          ff,
922                      t_forcerec                  * gmx_restrict          fr,
923                      t_mdatoms                   * gmx_restrict     mdatoms,
924                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
925                      t_nrnb                      * gmx_restrict        nrnb)
926 {
927     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
928      * just 0 for non-waters.
929      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
930      * jnr indices corresponding to data put in the four positions in the SIMD register.
931      */
932     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
933     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
934     int              jnrA,jnrB,jnrC,jnrD;
935     int              jnrE,jnrF,jnrG,jnrH;
936     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
937     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
938     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
939     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
940     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
941     real             rcutoff_scalar;
942     real             *shiftvec,*fshift,*x,*f;
943     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
944     real             scratch[4*DIM];
945     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
946     real *           vdwioffsetptr0;
947     real *           vdwgridioffsetptr0;
948     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
949     real *           vdwioffsetptr1;
950     real *           vdwgridioffsetptr1;
951     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
952     real *           vdwioffsetptr2;
953     real *           vdwgridioffsetptr2;
954     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
955     real *           vdwioffsetptr3;
956     real *           vdwgridioffsetptr3;
957     __m256           ix3,iy3,iz3,fix3,fiy3,fiz3,iq3,isai3;
958     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
959     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
960     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
961     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
962     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
963     __m256           dx30,dy30,dz30,rsq30,rinv30,rinvsq30,r30,qq30,c6_30,c12_30;
964     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
965     real             *charge;
966     int              nvdwtype;
967     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
968     int              *vdwtype;
969     real             *vdwparam;
970     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
971     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
972     __m256           c6grid_00;
973     __m256           c6grid_10;
974     __m256           c6grid_20;
975     __m256           c6grid_30;
976     real             *vdwgridparam;
977     __m256           ewclj,ewclj2,ewclj6,ewcljrsq,poly,exponent,f6A,f6B,sh_lj_ewald;
978     __m256           one_half  = _mm256_set1_ps(0.5);
979     __m256           minus_one = _mm256_set1_ps(-1.0);
980     __m256i          ewitab;
981     __m128i          ewitab_lo,ewitab_hi;
982     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
983     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
984     real             *ewtab;
985     __m256           dummy_mask,cutoff_mask;
986     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
987     __m256           one     = _mm256_set1_ps(1.0);
988     __m256           two     = _mm256_set1_ps(2.0);
989     x                = xx[0];
990     f                = ff[0];
991
992     nri              = nlist->nri;
993     iinr             = nlist->iinr;
994     jindex           = nlist->jindex;
995     jjnr             = nlist->jjnr;
996     shiftidx         = nlist->shift;
997     gid              = nlist->gid;
998     shiftvec         = fr->shift_vec[0];
999     fshift           = fr->fshift[0];
1000     facel            = _mm256_set1_ps(fr->epsfac);
1001     charge           = mdatoms->chargeA;
1002     nvdwtype         = fr->ntype;
1003     vdwparam         = fr->nbfp;
1004     vdwtype          = mdatoms->typeA;
1005     vdwgridparam     = fr->ljpme_c6grid;
1006     sh_lj_ewald      = _mm256_set1_ps(fr->ic->sh_lj_ewald);
1007     ewclj            = _mm256_set1_ps(fr->ewaldcoeff_lj);
1008     ewclj2           = _mm256_mul_ps(minus_one,_mm256_mul_ps(ewclj,ewclj));
1009
1010     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
1011     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
1012     beta2            = _mm256_mul_ps(beta,beta);
1013     beta3            = _mm256_mul_ps(beta,beta2);
1014
1015     ewtab            = fr->ic->tabq_coul_F;
1016     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
1017     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
1018
1019     /* Setup water-specific parameters */
1020     inr              = nlist->iinr[0];
1021     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
1022     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
1023     iq3              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+3]));
1024     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
1025     vdwgridioffsetptr0 = vdwgridparam+2*nvdwtype*vdwtype[inr+0];
1026
1027     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
1028     rcutoff_scalar   = fr->rcoulomb;
1029     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
1030     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
1031
1032     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
1033     rvdw             = _mm256_set1_ps(fr->rvdw);
1034
1035     /* Avoid stupid compiler warnings */
1036     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
1037     j_coord_offsetA = 0;
1038     j_coord_offsetB = 0;
1039     j_coord_offsetC = 0;
1040     j_coord_offsetD = 0;
1041     j_coord_offsetE = 0;
1042     j_coord_offsetF = 0;
1043     j_coord_offsetG = 0;
1044     j_coord_offsetH = 0;
1045
1046     outeriter        = 0;
1047     inneriter        = 0;
1048
1049     for(iidx=0;iidx<4*DIM;iidx++)
1050     {
1051         scratch[iidx] = 0.0;
1052     }
1053
1054     /* Start outer loop over neighborlists */
1055     for(iidx=0; iidx<nri; iidx++)
1056     {
1057         /* Load shift vector for this list */
1058         i_shift_offset   = DIM*shiftidx[iidx];
1059
1060         /* Load limits for loop over neighbors */
1061         j_index_start    = jindex[iidx];
1062         j_index_end      = jindex[iidx+1];
1063
1064         /* Get outer coordinate index */
1065         inr              = iinr[iidx];
1066         i_coord_offset   = DIM*inr;
1067
1068         /* Load i particle coords and add shift vector */
1069         gmx_mm256_load_shift_and_4rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
1070                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2,&ix3,&iy3,&iz3);
1071
1072         fix0             = _mm256_setzero_ps();
1073         fiy0             = _mm256_setzero_ps();
1074         fiz0             = _mm256_setzero_ps();
1075         fix1             = _mm256_setzero_ps();
1076         fiy1             = _mm256_setzero_ps();
1077         fiz1             = _mm256_setzero_ps();
1078         fix2             = _mm256_setzero_ps();
1079         fiy2             = _mm256_setzero_ps();
1080         fiz2             = _mm256_setzero_ps();
1081         fix3             = _mm256_setzero_ps();
1082         fiy3             = _mm256_setzero_ps();
1083         fiz3             = _mm256_setzero_ps();
1084
1085         /* Start inner kernel loop */
1086         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1087         {
1088
1089             /* Get j neighbor index, and coordinate index */
1090             jnrA             = jjnr[jidx];
1091             jnrB             = jjnr[jidx+1];
1092             jnrC             = jjnr[jidx+2];
1093             jnrD             = jjnr[jidx+3];
1094             jnrE             = jjnr[jidx+4];
1095             jnrF             = jjnr[jidx+5];
1096             jnrG             = jjnr[jidx+6];
1097             jnrH             = jjnr[jidx+7];
1098             j_coord_offsetA  = DIM*jnrA;
1099             j_coord_offsetB  = DIM*jnrB;
1100             j_coord_offsetC  = DIM*jnrC;
1101             j_coord_offsetD  = DIM*jnrD;
1102             j_coord_offsetE  = DIM*jnrE;
1103             j_coord_offsetF  = DIM*jnrF;
1104             j_coord_offsetG  = DIM*jnrG;
1105             j_coord_offsetH  = DIM*jnrH;
1106
1107             /* load j atom coordinates */
1108             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1109                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1110                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1111                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1112                                                  &jx0,&jy0,&jz0);
1113
1114             /* Calculate displacement vector */
1115             dx00             = _mm256_sub_ps(ix0,jx0);
1116             dy00             = _mm256_sub_ps(iy0,jy0);
1117             dz00             = _mm256_sub_ps(iz0,jz0);
1118             dx10             = _mm256_sub_ps(ix1,jx0);
1119             dy10             = _mm256_sub_ps(iy1,jy0);
1120             dz10             = _mm256_sub_ps(iz1,jz0);
1121             dx20             = _mm256_sub_ps(ix2,jx0);
1122             dy20             = _mm256_sub_ps(iy2,jy0);
1123             dz20             = _mm256_sub_ps(iz2,jz0);
1124             dx30             = _mm256_sub_ps(ix3,jx0);
1125             dy30             = _mm256_sub_ps(iy3,jy0);
1126             dz30             = _mm256_sub_ps(iz3,jz0);
1127
1128             /* Calculate squared distance and things based on it */
1129             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1130             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1131             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1132             rsq30            = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
1133
1134             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1135             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1136             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1137             rinv30           = gmx_mm256_invsqrt_ps(rsq30);
1138
1139             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1140             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1141             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1142             rinvsq30         = _mm256_mul_ps(rinv30,rinv30);
1143
1144             /* Load parameters for j particles */
1145             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
1146                                                                  charge+jnrC+0,charge+jnrD+0,
1147                                                                  charge+jnrE+0,charge+jnrF+0,
1148                                                                  charge+jnrG+0,charge+jnrH+0);
1149             vdwjidx0A        = 2*vdwtype[jnrA+0];
1150             vdwjidx0B        = 2*vdwtype[jnrB+0];
1151             vdwjidx0C        = 2*vdwtype[jnrC+0];
1152             vdwjidx0D        = 2*vdwtype[jnrD+0];
1153             vdwjidx0E        = 2*vdwtype[jnrE+0];
1154             vdwjidx0F        = 2*vdwtype[jnrF+0];
1155             vdwjidx0G        = 2*vdwtype[jnrG+0];
1156             vdwjidx0H        = 2*vdwtype[jnrH+0];
1157
1158             fjx0             = _mm256_setzero_ps();
1159             fjy0             = _mm256_setzero_ps();
1160             fjz0             = _mm256_setzero_ps();
1161
1162             /**************************
1163              * CALCULATE INTERACTIONS *
1164              **************************/
1165
1166             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1167             {
1168
1169             r00              = _mm256_mul_ps(rsq00,rinv00);
1170
1171             /* Compute parameters for interactions between i and j atoms */
1172             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
1173                                             vdwioffsetptr0+vdwjidx0B,
1174                                             vdwioffsetptr0+vdwjidx0C,
1175                                             vdwioffsetptr0+vdwjidx0D,
1176                                             vdwioffsetptr0+vdwjidx0E,
1177                                             vdwioffsetptr0+vdwjidx0F,
1178                                             vdwioffsetptr0+vdwjidx0G,
1179                                             vdwioffsetptr0+vdwjidx0H,
1180                                             &c6_00,&c12_00);
1181
1182             c6grid_00       = gmx_mm256_load_8real_swizzle_ps(vdwgridioffsetptr0+vdwjidx0A,
1183                                                                   vdwgridioffsetptr0+vdwjidx0B,
1184                                                                   vdwgridioffsetptr0+vdwjidx0C,
1185                                                                   vdwgridioffsetptr0+vdwjidx0D,
1186                                                                   vdwgridioffsetptr0+vdwjidx0E,
1187                                                                   vdwgridioffsetptr0+vdwjidx0F,
1188                                                                   vdwgridioffsetptr0+vdwjidx0G,
1189                                                                   vdwgridioffsetptr0+vdwjidx0H);
1190
1191             /* Analytical LJ-PME */
1192             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1193             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
1194             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1195             exponent         = gmx_simd_exp_r(ewcljrsq);
1196             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1197             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1198             /* f6A = 6 * C6grid * (1 - poly) */
1199             f6A              = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
1200             /* f6B = C6grid * exponent * beta^6 */
1201             f6B              = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
1202             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1203             fvdw              = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1204
1205             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1206
1207             fscal            = fvdw;
1208
1209             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1210
1211             /* Calculate temporary vectorial force */
1212             tx               = _mm256_mul_ps(fscal,dx00);
1213             ty               = _mm256_mul_ps(fscal,dy00);
1214             tz               = _mm256_mul_ps(fscal,dz00);
1215
1216             /* Update vectorial force */
1217             fix0             = _mm256_add_ps(fix0,tx);
1218             fiy0             = _mm256_add_ps(fiy0,ty);
1219             fiz0             = _mm256_add_ps(fiz0,tz);
1220
1221             fjx0             = _mm256_add_ps(fjx0,tx);
1222             fjy0             = _mm256_add_ps(fjy0,ty);
1223             fjz0             = _mm256_add_ps(fjz0,tz);
1224
1225             }
1226
1227             /**************************
1228              * CALCULATE INTERACTIONS *
1229              **************************/
1230
1231             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1232             {
1233
1234             r10              = _mm256_mul_ps(rsq10,rinv10);
1235
1236             /* Compute parameters for interactions between i and j atoms */
1237             qq10             = _mm256_mul_ps(iq1,jq0);
1238
1239             /* EWALD ELECTROSTATICS */
1240             
1241             /* Analytical PME correction */
1242             zeta2            = _mm256_mul_ps(beta2,rsq10);
1243             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1244             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1245             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1246             felec            = _mm256_mul_ps(qq10,felec);
1247             
1248             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1249
1250             fscal            = felec;
1251
1252             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1253
1254             /* Calculate temporary vectorial force */
1255             tx               = _mm256_mul_ps(fscal,dx10);
1256             ty               = _mm256_mul_ps(fscal,dy10);
1257             tz               = _mm256_mul_ps(fscal,dz10);
1258
1259             /* Update vectorial force */
1260             fix1             = _mm256_add_ps(fix1,tx);
1261             fiy1             = _mm256_add_ps(fiy1,ty);
1262             fiz1             = _mm256_add_ps(fiz1,tz);
1263
1264             fjx0             = _mm256_add_ps(fjx0,tx);
1265             fjy0             = _mm256_add_ps(fjy0,ty);
1266             fjz0             = _mm256_add_ps(fjz0,tz);
1267
1268             }
1269
1270             /**************************
1271              * CALCULATE INTERACTIONS *
1272              **************************/
1273
1274             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1275             {
1276
1277             r20              = _mm256_mul_ps(rsq20,rinv20);
1278
1279             /* Compute parameters for interactions between i and j atoms */
1280             qq20             = _mm256_mul_ps(iq2,jq0);
1281
1282             /* EWALD ELECTROSTATICS */
1283             
1284             /* Analytical PME correction */
1285             zeta2            = _mm256_mul_ps(beta2,rsq20);
1286             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1287             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1288             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1289             felec            = _mm256_mul_ps(qq20,felec);
1290             
1291             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1292
1293             fscal            = felec;
1294
1295             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1296
1297             /* Calculate temporary vectorial force */
1298             tx               = _mm256_mul_ps(fscal,dx20);
1299             ty               = _mm256_mul_ps(fscal,dy20);
1300             tz               = _mm256_mul_ps(fscal,dz20);
1301
1302             /* Update vectorial force */
1303             fix2             = _mm256_add_ps(fix2,tx);
1304             fiy2             = _mm256_add_ps(fiy2,ty);
1305             fiz2             = _mm256_add_ps(fiz2,tz);
1306
1307             fjx0             = _mm256_add_ps(fjx0,tx);
1308             fjy0             = _mm256_add_ps(fjy0,ty);
1309             fjz0             = _mm256_add_ps(fjz0,tz);
1310
1311             }
1312
1313             /**************************
1314              * CALCULATE INTERACTIONS *
1315              **************************/
1316
1317             if (gmx_mm256_any_lt(rsq30,rcutoff2))
1318             {
1319
1320             r30              = _mm256_mul_ps(rsq30,rinv30);
1321
1322             /* Compute parameters for interactions between i and j atoms */
1323             qq30             = _mm256_mul_ps(iq3,jq0);
1324
1325             /* EWALD ELECTROSTATICS */
1326             
1327             /* Analytical PME correction */
1328             zeta2            = _mm256_mul_ps(beta2,rsq30);
1329             rinv3            = _mm256_mul_ps(rinvsq30,rinv30);
1330             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1331             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1332             felec            = _mm256_mul_ps(qq30,felec);
1333             
1334             cutoff_mask      = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
1335
1336             fscal            = felec;
1337
1338             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1339
1340             /* Calculate temporary vectorial force */
1341             tx               = _mm256_mul_ps(fscal,dx30);
1342             ty               = _mm256_mul_ps(fscal,dy30);
1343             tz               = _mm256_mul_ps(fscal,dz30);
1344
1345             /* Update vectorial force */
1346             fix3             = _mm256_add_ps(fix3,tx);
1347             fiy3             = _mm256_add_ps(fiy3,ty);
1348             fiz3             = _mm256_add_ps(fiz3,tz);
1349
1350             fjx0             = _mm256_add_ps(fjx0,tx);
1351             fjy0             = _mm256_add_ps(fjy0,ty);
1352             fjz0             = _mm256_add_ps(fjz0,tz);
1353
1354             }
1355
1356             fjptrA             = f+j_coord_offsetA;
1357             fjptrB             = f+j_coord_offsetB;
1358             fjptrC             = f+j_coord_offsetC;
1359             fjptrD             = f+j_coord_offsetD;
1360             fjptrE             = f+j_coord_offsetE;
1361             fjptrF             = f+j_coord_offsetF;
1362             fjptrG             = f+j_coord_offsetG;
1363             fjptrH             = f+j_coord_offsetH;
1364
1365             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1366
1367             /* Inner loop uses 229 flops */
1368         }
1369
1370         if(jidx<j_index_end)
1371         {
1372
1373             /* Get j neighbor index, and coordinate index */
1374             jnrlistA         = jjnr[jidx];
1375             jnrlistB         = jjnr[jidx+1];
1376             jnrlistC         = jjnr[jidx+2];
1377             jnrlistD         = jjnr[jidx+3];
1378             jnrlistE         = jjnr[jidx+4];
1379             jnrlistF         = jjnr[jidx+5];
1380             jnrlistG         = jjnr[jidx+6];
1381             jnrlistH         = jjnr[jidx+7];
1382             /* Sign of each element will be negative for non-real atoms.
1383              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1384              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
1385              */
1386             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
1387                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
1388                                             
1389             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1390             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1391             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1392             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1393             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
1394             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
1395             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
1396             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
1397             j_coord_offsetA  = DIM*jnrA;
1398             j_coord_offsetB  = DIM*jnrB;
1399             j_coord_offsetC  = DIM*jnrC;
1400             j_coord_offsetD  = DIM*jnrD;
1401             j_coord_offsetE  = DIM*jnrE;
1402             j_coord_offsetF  = DIM*jnrF;
1403             j_coord_offsetG  = DIM*jnrG;
1404             j_coord_offsetH  = DIM*jnrH;
1405
1406             /* load j atom coordinates */
1407             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1408                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1409                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1410                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1411                                                  &jx0,&jy0,&jz0);
1412
1413             /* Calculate displacement vector */
1414             dx00             = _mm256_sub_ps(ix0,jx0);
1415             dy00             = _mm256_sub_ps(iy0,jy0);
1416             dz00             = _mm256_sub_ps(iz0,jz0);
1417             dx10             = _mm256_sub_ps(ix1,jx0);
1418             dy10             = _mm256_sub_ps(iy1,jy0);
1419             dz10             = _mm256_sub_ps(iz1,jz0);
1420             dx20             = _mm256_sub_ps(ix2,jx0);
1421             dy20             = _mm256_sub_ps(iy2,jy0);
1422             dz20             = _mm256_sub_ps(iz2,jz0);
1423             dx30             = _mm256_sub_ps(ix3,jx0);
1424             dy30             = _mm256_sub_ps(iy3,jy0);
1425             dz30             = _mm256_sub_ps(iz3,jz0);
1426
1427             /* Calculate squared distance and things based on it */
1428             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1429             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1430             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1431             rsq30            = gmx_mm256_calc_rsq_ps(dx30,dy30,dz30);
1432
1433             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1434             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1435             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1436             rinv30           = gmx_mm256_invsqrt_ps(rsq30);
1437
1438             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1439             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1440             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1441             rinvsq30         = _mm256_mul_ps(rinv30,rinv30);
1442
1443             /* Load parameters for j particles */
1444             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
1445                                                                  charge+jnrC+0,charge+jnrD+0,
1446                                                                  charge+jnrE+0,charge+jnrF+0,
1447                                                                  charge+jnrG+0,charge+jnrH+0);
1448             vdwjidx0A        = 2*vdwtype[jnrA+0];
1449             vdwjidx0B        = 2*vdwtype[jnrB+0];
1450             vdwjidx0C        = 2*vdwtype[jnrC+0];
1451             vdwjidx0D        = 2*vdwtype[jnrD+0];
1452             vdwjidx0E        = 2*vdwtype[jnrE+0];
1453             vdwjidx0F        = 2*vdwtype[jnrF+0];
1454             vdwjidx0G        = 2*vdwtype[jnrG+0];
1455             vdwjidx0H        = 2*vdwtype[jnrH+0];
1456
1457             fjx0             = _mm256_setzero_ps();
1458             fjy0             = _mm256_setzero_ps();
1459             fjz0             = _mm256_setzero_ps();
1460
1461             /**************************
1462              * CALCULATE INTERACTIONS *
1463              **************************/
1464
1465             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1466             {
1467
1468             r00              = _mm256_mul_ps(rsq00,rinv00);
1469             r00              = _mm256_andnot_ps(dummy_mask,r00);
1470
1471             /* Compute parameters for interactions between i and j atoms */
1472             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
1473                                             vdwioffsetptr0+vdwjidx0B,
1474                                             vdwioffsetptr0+vdwjidx0C,
1475                                             vdwioffsetptr0+vdwjidx0D,
1476                                             vdwioffsetptr0+vdwjidx0E,
1477                                             vdwioffsetptr0+vdwjidx0F,
1478                                             vdwioffsetptr0+vdwjidx0G,
1479                                             vdwioffsetptr0+vdwjidx0H,
1480                                             &c6_00,&c12_00);
1481
1482             c6grid_00       = gmx_mm256_load_8real_swizzle_ps(vdwgridioffsetptr0+vdwjidx0A,
1483                                                                   vdwgridioffsetptr0+vdwjidx0B,
1484                                                                   vdwgridioffsetptr0+vdwjidx0C,
1485                                                                   vdwgridioffsetptr0+vdwjidx0D,
1486                                                                   vdwgridioffsetptr0+vdwjidx0E,
1487                                                                   vdwgridioffsetptr0+vdwjidx0F,
1488                                                                   vdwgridioffsetptr0+vdwjidx0G,
1489                                                                   vdwgridioffsetptr0+vdwjidx0H);
1490
1491             /* Analytical LJ-PME */
1492             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1493             ewcljrsq         = _mm256_mul_ps(ewclj2,rsq00);
1494             ewclj6           = _mm256_mul_ps(ewclj2,_mm256_mul_ps(ewclj2,ewclj2));
1495             exponent         = gmx_simd_exp_r(ewcljrsq);
1496             /* poly = exp(-(beta*r)^2) * (1 + (beta*r)^2 + (beta*r)^4 /2) */
1497             poly             = _mm256_mul_ps(exponent,_mm256_add_ps(_mm256_sub_ps(one,ewcljrsq),_mm256_mul_ps(_mm256_mul_ps(ewcljrsq,ewcljrsq),one_half)));
1498             /* f6A = 6 * C6grid * (1 - poly) */
1499             f6A              = _mm256_mul_ps(c6grid_00,_mm256_sub_ps(one,poly));
1500             /* f6B = C6grid * exponent * beta^6 */
1501             f6B              = _mm256_mul_ps(_mm256_mul_ps(c6grid_00,one_sixth),_mm256_mul_ps(exponent,ewclj6));
1502             /* fvdw = 12*C12/r13 - ((6*C6 - f6A)/r6 + f6B)/r */
1503             fvdw              = _mm256_mul_ps(_mm256_add_ps(_mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),_mm256_sub_ps(c6_00,f6A)),rinvsix),f6B),rinvsq00);
1504
1505             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1506
1507             fscal            = fvdw;
1508
1509             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1510
1511             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1512
1513             /* Calculate temporary vectorial force */
1514             tx               = _mm256_mul_ps(fscal,dx00);
1515             ty               = _mm256_mul_ps(fscal,dy00);
1516             tz               = _mm256_mul_ps(fscal,dz00);
1517
1518             /* Update vectorial force */
1519             fix0             = _mm256_add_ps(fix0,tx);
1520             fiy0             = _mm256_add_ps(fiy0,ty);
1521             fiz0             = _mm256_add_ps(fiz0,tz);
1522
1523             fjx0             = _mm256_add_ps(fjx0,tx);
1524             fjy0             = _mm256_add_ps(fjy0,ty);
1525             fjz0             = _mm256_add_ps(fjz0,tz);
1526
1527             }
1528
1529             /**************************
1530              * CALCULATE INTERACTIONS *
1531              **************************/
1532
1533             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1534             {
1535
1536             r10              = _mm256_mul_ps(rsq10,rinv10);
1537             r10              = _mm256_andnot_ps(dummy_mask,r10);
1538
1539             /* Compute parameters for interactions between i and j atoms */
1540             qq10             = _mm256_mul_ps(iq1,jq0);
1541
1542             /* EWALD ELECTROSTATICS */
1543             
1544             /* Analytical PME correction */
1545             zeta2            = _mm256_mul_ps(beta2,rsq10);
1546             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1547             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1548             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1549             felec            = _mm256_mul_ps(qq10,felec);
1550             
1551             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1552
1553             fscal            = felec;
1554
1555             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1556
1557             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1558
1559             /* Calculate temporary vectorial force */
1560             tx               = _mm256_mul_ps(fscal,dx10);
1561             ty               = _mm256_mul_ps(fscal,dy10);
1562             tz               = _mm256_mul_ps(fscal,dz10);
1563
1564             /* Update vectorial force */
1565             fix1             = _mm256_add_ps(fix1,tx);
1566             fiy1             = _mm256_add_ps(fiy1,ty);
1567             fiz1             = _mm256_add_ps(fiz1,tz);
1568
1569             fjx0             = _mm256_add_ps(fjx0,tx);
1570             fjy0             = _mm256_add_ps(fjy0,ty);
1571             fjz0             = _mm256_add_ps(fjz0,tz);
1572
1573             }
1574
1575             /**************************
1576              * CALCULATE INTERACTIONS *
1577              **************************/
1578
1579             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1580             {
1581
1582             r20              = _mm256_mul_ps(rsq20,rinv20);
1583             r20              = _mm256_andnot_ps(dummy_mask,r20);
1584
1585             /* Compute parameters for interactions between i and j atoms */
1586             qq20             = _mm256_mul_ps(iq2,jq0);
1587
1588             /* EWALD ELECTROSTATICS */
1589             
1590             /* Analytical PME correction */
1591             zeta2            = _mm256_mul_ps(beta2,rsq20);
1592             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1593             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1594             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1595             felec            = _mm256_mul_ps(qq20,felec);
1596             
1597             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1598
1599             fscal            = felec;
1600
1601             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1602
1603             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1604
1605             /* Calculate temporary vectorial force */
1606             tx               = _mm256_mul_ps(fscal,dx20);
1607             ty               = _mm256_mul_ps(fscal,dy20);
1608             tz               = _mm256_mul_ps(fscal,dz20);
1609
1610             /* Update vectorial force */
1611             fix2             = _mm256_add_ps(fix2,tx);
1612             fiy2             = _mm256_add_ps(fiy2,ty);
1613             fiz2             = _mm256_add_ps(fiz2,tz);
1614
1615             fjx0             = _mm256_add_ps(fjx0,tx);
1616             fjy0             = _mm256_add_ps(fjy0,ty);
1617             fjz0             = _mm256_add_ps(fjz0,tz);
1618
1619             }
1620
1621             /**************************
1622              * CALCULATE INTERACTIONS *
1623              **************************/
1624
1625             if (gmx_mm256_any_lt(rsq30,rcutoff2))
1626             {
1627
1628             r30              = _mm256_mul_ps(rsq30,rinv30);
1629             r30              = _mm256_andnot_ps(dummy_mask,r30);
1630
1631             /* Compute parameters for interactions between i and j atoms */
1632             qq30             = _mm256_mul_ps(iq3,jq0);
1633
1634             /* EWALD ELECTROSTATICS */
1635             
1636             /* Analytical PME correction */
1637             zeta2            = _mm256_mul_ps(beta2,rsq30);
1638             rinv3            = _mm256_mul_ps(rinvsq30,rinv30);
1639             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1640             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1641             felec            = _mm256_mul_ps(qq30,felec);
1642             
1643             cutoff_mask      = _mm256_cmp_ps(rsq30,rcutoff2,_CMP_LT_OQ);
1644
1645             fscal            = felec;
1646
1647             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1648
1649             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1650
1651             /* Calculate temporary vectorial force */
1652             tx               = _mm256_mul_ps(fscal,dx30);
1653             ty               = _mm256_mul_ps(fscal,dy30);
1654             tz               = _mm256_mul_ps(fscal,dz30);
1655
1656             /* Update vectorial force */
1657             fix3             = _mm256_add_ps(fix3,tx);
1658             fiy3             = _mm256_add_ps(fiy3,ty);
1659             fiz3             = _mm256_add_ps(fiz3,tz);
1660
1661             fjx0             = _mm256_add_ps(fjx0,tx);
1662             fjy0             = _mm256_add_ps(fjy0,ty);
1663             fjz0             = _mm256_add_ps(fjz0,tz);
1664
1665             }
1666
1667             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1668             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1669             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1670             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1671             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1672             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1673             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1674             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1675
1676             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1677
1678             /* Inner loop uses 233 flops */
1679         }
1680
1681         /* End of innermost loop */
1682
1683         gmx_mm256_update_iforce_4atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,fix3,fiy3,fiz3,
1684                                                  f+i_coord_offset,fshift+i_shift_offset);
1685
1686         /* Increment number of inner iterations */
1687         inneriter                  += j_index_end - j_index_start;
1688
1689         /* Outer loop uses 24 flops */
1690     }
1691
1692     /* Increment number of outer iterations */
1693     outeriter        += nri;
1694
1695     /* Update outer/inner flops */
1696
1697     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W4_F,outeriter*24 + inneriter*233);
1698 }