Use full path for legacyheaders
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013,2014, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #include "config.h"
39
40 #include <math.h>
41
42 #include "../nb_kernel.h"
43 #include "gromacs/legacyheaders/types/simple.h"
44 #include "gromacs/math/vec.h"
45 #include "gromacs/legacyheaders/nrnb.h"
46
47 #include "gromacs/simd/math_x86_avx_256_single.h"
48 #include "kernelutil_x86_avx_256_single.h"
49
50 /*
51  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_VF_avx_256_single
52  * Electrostatics interaction: Ewald
53  * VdW interaction:            LennardJones
54  * Geometry:                   Water3-Particle
55  * Calculate force/pot:        PotentialAndForce
56  */
57 void
58 nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_VF_avx_256_single
59                     (t_nblist                    * gmx_restrict       nlist,
60                      rvec                        * gmx_restrict          xx,
61                      rvec                        * gmx_restrict          ff,
62                      t_forcerec                  * gmx_restrict          fr,
63                      t_mdatoms                   * gmx_restrict     mdatoms,
64                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
65                      t_nrnb                      * gmx_restrict        nrnb)
66 {
67     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
68      * just 0 for non-waters.
69      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
70      * jnr indices corresponding to data put in the four positions in the SIMD register.
71      */
72     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
73     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
74     int              jnrA,jnrB,jnrC,jnrD;
75     int              jnrE,jnrF,jnrG,jnrH;
76     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
77     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
78     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
79     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
80     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
81     real             rcutoff_scalar;
82     real             *shiftvec,*fshift,*x,*f;
83     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
84     real             scratch[4*DIM];
85     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
86     real *           vdwioffsetptr0;
87     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
88     real *           vdwioffsetptr1;
89     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
90     real *           vdwioffsetptr2;
91     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
92     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
93     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
94     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
95     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
96     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
97     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
98     real             *charge;
99     int              nvdwtype;
100     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
101     int              *vdwtype;
102     real             *vdwparam;
103     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
104     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
105     __m256i          ewitab;
106     __m128i          ewitab_lo,ewitab_hi;
107     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
108     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
109     real             *ewtab;
110     __m256           dummy_mask,cutoff_mask;
111     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
112     __m256           one     = _mm256_set1_ps(1.0);
113     __m256           two     = _mm256_set1_ps(2.0);
114     x                = xx[0];
115     f                = ff[0];
116
117     nri              = nlist->nri;
118     iinr             = nlist->iinr;
119     jindex           = nlist->jindex;
120     jjnr             = nlist->jjnr;
121     shiftidx         = nlist->shift;
122     gid              = nlist->gid;
123     shiftvec         = fr->shift_vec[0];
124     fshift           = fr->fshift[0];
125     facel            = _mm256_set1_ps(fr->epsfac);
126     charge           = mdatoms->chargeA;
127     nvdwtype         = fr->ntype;
128     vdwparam         = fr->nbfp;
129     vdwtype          = mdatoms->typeA;
130
131     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
132     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
133     beta2            = _mm256_mul_ps(beta,beta);
134     beta3            = _mm256_mul_ps(beta,beta2);
135
136     ewtab            = fr->ic->tabq_coul_FDV0;
137     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
138     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
139
140     /* Setup water-specific parameters */
141     inr              = nlist->iinr[0];
142     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
143     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
144     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
145     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
146
147     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
148     rcutoff_scalar   = fr->rcoulomb;
149     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
150     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
151
152     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
153     rvdw             = _mm256_set1_ps(fr->rvdw);
154
155     /* Avoid stupid compiler warnings */
156     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
157     j_coord_offsetA = 0;
158     j_coord_offsetB = 0;
159     j_coord_offsetC = 0;
160     j_coord_offsetD = 0;
161     j_coord_offsetE = 0;
162     j_coord_offsetF = 0;
163     j_coord_offsetG = 0;
164     j_coord_offsetH = 0;
165
166     outeriter        = 0;
167     inneriter        = 0;
168
169     for(iidx=0;iidx<4*DIM;iidx++)
170     {
171         scratch[iidx] = 0.0;
172     }
173
174     /* Start outer loop over neighborlists */
175     for(iidx=0; iidx<nri; iidx++)
176     {
177         /* Load shift vector for this list */
178         i_shift_offset   = DIM*shiftidx[iidx];
179
180         /* Load limits for loop over neighbors */
181         j_index_start    = jindex[iidx];
182         j_index_end      = jindex[iidx+1];
183
184         /* Get outer coordinate index */
185         inr              = iinr[iidx];
186         i_coord_offset   = DIM*inr;
187
188         /* Load i particle coords and add shift vector */
189         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
190                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
191
192         fix0             = _mm256_setzero_ps();
193         fiy0             = _mm256_setzero_ps();
194         fiz0             = _mm256_setzero_ps();
195         fix1             = _mm256_setzero_ps();
196         fiy1             = _mm256_setzero_ps();
197         fiz1             = _mm256_setzero_ps();
198         fix2             = _mm256_setzero_ps();
199         fiy2             = _mm256_setzero_ps();
200         fiz2             = _mm256_setzero_ps();
201
202         /* Reset potential sums */
203         velecsum         = _mm256_setzero_ps();
204         vvdwsum          = _mm256_setzero_ps();
205
206         /* Start inner kernel loop */
207         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
208         {
209
210             /* Get j neighbor index, and coordinate index */
211             jnrA             = jjnr[jidx];
212             jnrB             = jjnr[jidx+1];
213             jnrC             = jjnr[jidx+2];
214             jnrD             = jjnr[jidx+3];
215             jnrE             = jjnr[jidx+4];
216             jnrF             = jjnr[jidx+5];
217             jnrG             = jjnr[jidx+6];
218             jnrH             = jjnr[jidx+7];
219             j_coord_offsetA  = DIM*jnrA;
220             j_coord_offsetB  = DIM*jnrB;
221             j_coord_offsetC  = DIM*jnrC;
222             j_coord_offsetD  = DIM*jnrD;
223             j_coord_offsetE  = DIM*jnrE;
224             j_coord_offsetF  = DIM*jnrF;
225             j_coord_offsetG  = DIM*jnrG;
226             j_coord_offsetH  = DIM*jnrH;
227
228             /* load j atom coordinates */
229             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
230                                                  x+j_coord_offsetC,x+j_coord_offsetD,
231                                                  x+j_coord_offsetE,x+j_coord_offsetF,
232                                                  x+j_coord_offsetG,x+j_coord_offsetH,
233                                                  &jx0,&jy0,&jz0);
234
235             /* Calculate displacement vector */
236             dx00             = _mm256_sub_ps(ix0,jx0);
237             dy00             = _mm256_sub_ps(iy0,jy0);
238             dz00             = _mm256_sub_ps(iz0,jz0);
239             dx10             = _mm256_sub_ps(ix1,jx0);
240             dy10             = _mm256_sub_ps(iy1,jy0);
241             dz10             = _mm256_sub_ps(iz1,jz0);
242             dx20             = _mm256_sub_ps(ix2,jx0);
243             dy20             = _mm256_sub_ps(iy2,jy0);
244             dz20             = _mm256_sub_ps(iz2,jz0);
245
246             /* Calculate squared distance and things based on it */
247             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
248             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
249             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
250
251             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
252             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
253             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
254
255             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
256             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
257             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
258
259             /* Load parameters for j particles */
260             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
261                                                                  charge+jnrC+0,charge+jnrD+0,
262                                                                  charge+jnrE+0,charge+jnrF+0,
263                                                                  charge+jnrG+0,charge+jnrH+0);
264             vdwjidx0A        = 2*vdwtype[jnrA+0];
265             vdwjidx0B        = 2*vdwtype[jnrB+0];
266             vdwjidx0C        = 2*vdwtype[jnrC+0];
267             vdwjidx0D        = 2*vdwtype[jnrD+0];
268             vdwjidx0E        = 2*vdwtype[jnrE+0];
269             vdwjidx0F        = 2*vdwtype[jnrF+0];
270             vdwjidx0G        = 2*vdwtype[jnrG+0];
271             vdwjidx0H        = 2*vdwtype[jnrH+0];
272
273             fjx0             = _mm256_setzero_ps();
274             fjy0             = _mm256_setzero_ps();
275             fjz0             = _mm256_setzero_ps();
276
277             /**************************
278              * CALCULATE INTERACTIONS *
279              **************************/
280
281             if (gmx_mm256_any_lt(rsq00,rcutoff2))
282             {
283
284             r00              = _mm256_mul_ps(rsq00,rinv00);
285
286             /* Compute parameters for interactions between i and j atoms */
287             qq00             = _mm256_mul_ps(iq0,jq0);
288             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
289                                             vdwioffsetptr0+vdwjidx0B,
290                                             vdwioffsetptr0+vdwjidx0C,
291                                             vdwioffsetptr0+vdwjidx0D,
292                                             vdwioffsetptr0+vdwjidx0E,
293                                             vdwioffsetptr0+vdwjidx0F,
294                                             vdwioffsetptr0+vdwjidx0G,
295                                             vdwioffsetptr0+vdwjidx0H,
296                                             &c6_00,&c12_00);
297
298             /* EWALD ELECTROSTATICS */
299             
300             /* Analytical PME correction */
301             zeta2            = _mm256_mul_ps(beta2,rsq00);
302             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
303             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
304             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
305             felec            = _mm256_mul_ps(qq00,felec);
306             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
307             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
308             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
309             velec            = _mm256_mul_ps(qq00,velec);
310             
311             /* LENNARD-JONES DISPERSION/REPULSION */
312
313             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
314             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
315             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
316             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
317                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
318             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
319
320             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
321
322             /* Update potential sum for this i atom from the interaction with this j atom. */
323             velec            = _mm256_and_ps(velec,cutoff_mask);
324             velecsum         = _mm256_add_ps(velecsum,velec);
325             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
326             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
327
328             fscal            = _mm256_add_ps(felec,fvdw);
329
330             fscal            = _mm256_and_ps(fscal,cutoff_mask);
331
332             /* Calculate temporary vectorial force */
333             tx               = _mm256_mul_ps(fscal,dx00);
334             ty               = _mm256_mul_ps(fscal,dy00);
335             tz               = _mm256_mul_ps(fscal,dz00);
336
337             /* Update vectorial force */
338             fix0             = _mm256_add_ps(fix0,tx);
339             fiy0             = _mm256_add_ps(fiy0,ty);
340             fiz0             = _mm256_add_ps(fiz0,tz);
341
342             fjx0             = _mm256_add_ps(fjx0,tx);
343             fjy0             = _mm256_add_ps(fjy0,ty);
344             fjz0             = _mm256_add_ps(fjz0,tz);
345
346             }
347
348             /**************************
349              * CALCULATE INTERACTIONS *
350              **************************/
351
352             if (gmx_mm256_any_lt(rsq10,rcutoff2))
353             {
354
355             r10              = _mm256_mul_ps(rsq10,rinv10);
356
357             /* Compute parameters for interactions between i and j atoms */
358             qq10             = _mm256_mul_ps(iq1,jq0);
359
360             /* EWALD ELECTROSTATICS */
361             
362             /* Analytical PME correction */
363             zeta2            = _mm256_mul_ps(beta2,rsq10);
364             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
365             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
366             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
367             felec            = _mm256_mul_ps(qq10,felec);
368             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
369             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
370             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
371             velec            = _mm256_mul_ps(qq10,velec);
372             
373             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
374
375             /* Update potential sum for this i atom from the interaction with this j atom. */
376             velec            = _mm256_and_ps(velec,cutoff_mask);
377             velecsum         = _mm256_add_ps(velecsum,velec);
378
379             fscal            = felec;
380
381             fscal            = _mm256_and_ps(fscal,cutoff_mask);
382
383             /* Calculate temporary vectorial force */
384             tx               = _mm256_mul_ps(fscal,dx10);
385             ty               = _mm256_mul_ps(fscal,dy10);
386             tz               = _mm256_mul_ps(fscal,dz10);
387
388             /* Update vectorial force */
389             fix1             = _mm256_add_ps(fix1,tx);
390             fiy1             = _mm256_add_ps(fiy1,ty);
391             fiz1             = _mm256_add_ps(fiz1,tz);
392
393             fjx0             = _mm256_add_ps(fjx0,tx);
394             fjy0             = _mm256_add_ps(fjy0,ty);
395             fjz0             = _mm256_add_ps(fjz0,tz);
396
397             }
398
399             /**************************
400              * CALCULATE INTERACTIONS *
401              **************************/
402
403             if (gmx_mm256_any_lt(rsq20,rcutoff2))
404             {
405
406             r20              = _mm256_mul_ps(rsq20,rinv20);
407
408             /* Compute parameters for interactions between i and j atoms */
409             qq20             = _mm256_mul_ps(iq2,jq0);
410
411             /* EWALD ELECTROSTATICS */
412             
413             /* Analytical PME correction */
414             zeta2            = _mm256_mul_ps(beta2,rsq20);
415             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
416             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
417             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
418             felec            = _mm256_mul_ps(qq20,felec);
419             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
420             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
421             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
422             velec            = _mm256_mul_ps(qq20,velec);
423             
424             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
425
426             /* Update potential sum for this i atom from the interaction with this j atom. */
427             velec            = _mm256_and_ps(velec,cutoff_mask);
428             velecsum         = _mm256_add_ps(velecsum,velec);
429
430             fscal            = felec;
431
432             fscal            = _mm256_and_ps(fscal,cutoff_mask);
433
434             /* Calculate temporary vectorial force */
435             tx               = _mm256_mul_ps(fscal,dx20);
436             ty               = _mm256_mul_ps(fscal,dy20);
437             tz               = _mm256_mul_ps(fscal,dz20);
438
439             /* Update vectorial force */
440             fix2             = _mm256_add_ps(fix2,tx);
441             fiy2             = _mm256_add_ps(fiy2,ty);
442             fiz2             = _mm256_add_ps(fiz2,tz);
443
444             fjx0             = _mm256_add_ps(fjx0,tx);
445             fjy0             = _mm256_add_ps(fjy0,ty);
446             fjz0             = _mm256_add_ps(fjz0,tz);
447
448             }
449
450             fjptrA             = f+j_coord_offsetA;
451             fjptrB             = f+j_coord_offsetB;
452             fjptrC             = f+j_coord_offsetC;
453             fjptrD             = f+j_coord_offsetD;
454             fjptrE             = f+j_coord_offsetE;
455             fjptrF             = f+j_coord_offsetF;
456             fjptrG             = f+j_coord_offsetG;
457             fjptrH             = f+j_coord_offsetH;
458
459             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
460
461             /* Inner loop uses 348 flops */
462         }
463
464         if(jidx<j_index_end)
465         {
466
467             /* Get j neighbor index, and coordinate index */
468             jnrlistA         = jjnr[jidx];
469             jnrlistB         = jjnr[jidx+1];
470             jnrlistC         = jjnr[jidx+2];
471             jnrlistD         = jjnr[jidx+3];
472             jnrlistE         = jjnr[jidx+4];
473             jnrlistF         = jjnr[jidx+5];
474             jnrlistG         = jjnr[jidx+6];
475             jnrlistH         = jjnr[jidx+7];
476             /* Sign of each element will be negative for non-real atoms.
477              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
478              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
479              */
480             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
481                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
482                                             
483             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
484             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
485             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
486             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
487             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
488             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
489             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
490             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
491             j_coord_offsetA  = DIM*jnrA;
492             j_coord_offsetB  = DIM*jnrB;
493             j_coord_offsetC  = DIM*jnrC;
494             j_coord_offsetD  = DIM*jnrD;
495             j_coord_offsetE  = DIM*jnrE;
496             j_coord_offsetF  = DIM*jnrF;
497             j_coord_offsetG  = DIM*jnrG;
498             j_coord_offsetH  = DIM*jnrH;
499
500             /* load j atom coordinates */
501             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
502                                                  x+j_coord_offsetC,x+j_coord_offsetD,
503                                                  x+j_coord_offsetE,x+j_coord_offsetF,
504                                                  x+j_coord_offsetG,x+j_coord_offsetH,
505                                                  &jx0,&jy0,&jz0);
506
507             /* Calculate displacement vector */
508             dx00             = _mm256_sub_ps(ix0,jx0);
509             dy00             = _mm256_sub_ps(iy0,jy0);
510             dz00             = _mm256_sub_ps(iz0,jz0);
511             dx10             = _mm256_sub_ps(ix1,jx0);
512             dy10             = _mm256_sub_ps(iy1,jy0);
513             dz10             = _mm256_sub_ps(iz1,jz0);
514             dx20             = _mm256_sub_ps(ix2,jx0);
515             dy20             = _mm256_sub_ps(iy2,jy0);
516             dz20             = _mm256_sub_ps(iz2,jz0);
517
518             /* Calculate squared distance and things based on it */
519             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
520             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
521             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
522
523             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
524             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
525             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
526
527             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
528             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
529             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
530
531             /* Load parameters for j particles */
532             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
533                                                                  charge+jnrC+0,charge+jnrD+0,
534                                                                  charge+jnrE+0,charge+jnrF+0,
535                                                                  charge+jnrG+0,charge+jnrH+0);
536             vdwjidx0A        = 2*vdwtype[jnrA+0];
537             vdwjidx0B        = 2*vdwtype[jnrB+0];
538             vdwjidx0C        = 2*vdwtype[jnrC+0];
539             vdwjidx0D        = 2*vdwtype[jnrD+0];
540             vdwjidx0E        = 2*vdwtype[jnrE+0];
541             vdwjidx0F        = 2*vdwtype[jnrF+0];
542             vdwjidx0G        = 2*vdwtype[jnrG+0];
543             vdwjidx0H        = 2*vdwtype[jnrH+0];
544
545             fjx0             = _mm256_setzero_ps();
546             fjy0             = _mm256_setzero_ps();
547             fjz0             = _mm256_setzero_ps();
548
549             /**************************
550              * CALCULATE INTERACTIONS *
551              **************************/
552
553             if (gmx_mm256_any_lt(rsq00,rcutoff2))
554             {
555
556             r00              = _mm256_mul_ps(rsq00,rinv00);
557             r00              = _mm256_andnot_ps(dummy_mask,r00);
558
559             /* Compute parameters for interactions between i and j atoms */
560             qq00             = _mm256_mul_ps(iq0,jq0);
561             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
562                                             vdwioffsetptr0+vdwjidx0B,
563                                             vdwioffsetptr0+vdwjidx0C,
564                                             vdwioffsetptr0+vdwjidx0D,
565                                             vdwioffsetptr0+vdwjidx0E,
566                                             vdwioffsetptr0+vdwjidx0F,
567                                             vdwioffsetptr0+vdwjidx0G,
568                                             vdwioffsetptr0+vdwjidx0H,
569                                             &c6_00,&c12_00);
570
571             /* EWALD ELECTROSTATICS */
572             
573             /* Analytical PME correction */
574             zeta2            = _mm256_mul_ps(beta2,rsq00);
575             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
576             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
577             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
578             felec            = _mm256_mul_ps(qq00,felec);
579             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
580             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
581             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
582             velec            = _mm256_mul_ps(qq00,velec);
583             
584             /* LENNARD-JONES DISPERSION/REPULSION */
585
586             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
587             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
588             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
589             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
590                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
591             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
592
593             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
594
595             /* Update potential sum for this i atom from the interaction with this j atom. */
596             velec            = _mm256_and_ps(velec,cutoff_mask);
597             velec            = _mm256_andnot_ps(dummy_mask,velec);
598             velecsum         = _mm256_add_ps(velecsum,velec);
599             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
600             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
601             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
602
603             fscal            = _mm256_add_ps(felec,fvdw);
604
605             fscal            = _mm256_and_ps(fscal,cutoff_mask);
606
607             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
608
609             /* Calculate temporary vectorial force */
610             tx               = _mm256_mul_ps(fscal,dx00);
611             ty               = _mm256_mul_ps(fscal,dy00);
612             tz               = _mm256_mul_ps(fscal,dz00);
613
614             /* Update vectorial force */
615             fix0             = _mm256_add_ps(fix0,tx);
616             fiy0             = _mm256_add_ps(fiy0,ty);
617             fiz0             = _mm256_add_ps(fiz0,tz);
618
619             fjx0             = _mm256_add_ps(fjx0,tx);
620             fjy0             = _mm256_add_ps(fjy0,ty);
621             fjz0             = _mm256_add_ps(fjz0,tz);
622
623             }
624
625             /**************************
626              * CALCULATE INTERACTIONS *
627              **************************/
628
629             if (gmx_mm256_any_lt(rsq10,rcutoff2))
630             {
631
632             r10              = _mm256_mul_ps(rsq10,rinv10);
633             r10              = _mm256_andnot_ps(dummy_mask,r10);
634
635             /* Compute parameters for interactions between i and j atoms */
636             qq10             = _mm256_mul_ps(iq1,jq0);
637
638             /* EWALD ELECTROSTATICS */
639             
640             /* Analytical PME correction */
641             zeta2            = _mm256_mul_ps(beta2,rsq10);
642             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
643             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
644             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
645             felec            = _mm256_mul_ps(qq10,felec);
646             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
647             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
648             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
649             velec            = _mm256_mul_ps(qq10,velec);
650             
651             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
652
653             /* Update potential sum for this i atom from the interaction with this j atom. */
654             velec            = _mm256_and_ps(velec,cutoff_mask);
655             velec            = _mm256_andnot_ps(dummy_mask,velec);
656             velecsum         = _mm256_add_ps(velecsum,velec);
657
658             fscal            = felec;
659
660             fscal            = _mm256_and_ps(fscal,cutoff_mask);
661
662             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
663
664             /* Calculate temporary vectorial force */
665             tx               = _mm256_mul_ps(fscal,dx10);
666             ty               = _mm256_mul_ps(fscal,dy10);
667             tz               = _mm256_mul_ps(fscal,dz10);
668
669             /* Update vectorial force */
670             fix1             = _mm256_add_ps(fix1,tx);
671             fiy1             = _mm256_add_ps(fiy1,ty);
672             fiz1             = _mm256_add_ps(fiz1,tz);
673
674             fjx0             = _mm256_add_ps(fjx0,tx);
675             fjy0             = _mm256_add_ps(fjy0,ty);
676             fjz0             = _mm256_add_ps(fjz0,tz);
677
678             }
679
680             /**************************
681              * CALCULATE INTERACTIONS *
682              **************************/
683
684             if (gmx_mm256_any_lt(rsq20,rcutoff2))
685             {
686
687             r20              = _mm256_mul_ps(rsq20,rinv20);
688             r20              = _mm256_andnot_ps(dummy_mask,r20);
689
690             /* Compute parameters for interactions between i and j atoms */
691             qq20             = _mm256_mul_ps(iq2,jq0);
692
693             /* EWALD ELECTROSTATICS */
694             
695             /* Analytical PME correction */
696             zeta2            = _mm256_mul_ps(beta2,rsq20);
697             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
698             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
699             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
700             felec            = _mm256_mul_ps(qq20,felec);
701             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
702             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
703             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
704             velec            = _mm256_mul_ps(qq20,velec);
705             
706             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
707
708             /* Update potential sum for this i atom from the interaction with this j atom. */
709             velec            = _mm256_and_ps(velec,cutoff_mask);
710             velec            = _mm256_andnot_ps(dummy_mask,velec);
711             velecsum         = _mm256_add_ps(velecsum,velec);
712
713             fscal            = felec;
714
715             fscal            = _mm256_and_ps(fscal,cutoff_mask);
716
717             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
718
719             /* Calculate temporary vectorial force */
720             tx               = _mm256_mul_ps(fscal,dx20);
721             ty               = _mm256_mul_ps(fscal,dy20);
722             tz               = _mm256_mul_ps(fscal,dz20);
723
724             /* Update vectorial force */
725             fix2             = _mm256_add_ps(fix2,tx);
726             fiy2             = _mm256_add_ps(fiy2,ty);
727             fiz2             = _mm256_add_ps(fiz2,tz);
728
729             fjx0             = _mm256_add_ps(fjx0,tx);
730             fjy0             = _mm256_add_ps(fjy0,ty);
731             fjz0             = _mm256_add_ps(fjz0,tz);
732
733             }
734
735             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
736             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
737             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
738             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
739             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
740             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
741             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
742             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
743
744             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
745
746             /* Inner loop uses 351 flops */
747         }
748
749         /* End of innermost loop */
750
751         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
752                                                  f+i_coord_offset,fshift+i_shift_offset);
753
754         ggid                        = gid[iidx];
755         /* Update potential energies */
756         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
757         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
758
759         /* Increment number of inner iterations */
760         inneriter                  += j_index_end - j_index_start;
761
762         /* Outer loop uses 20 flops */
763     }
764
765     /* Increment number of outer iterations */
766     outeriter        += nri;
767
768     /* Update outer/inner flops */
769
770     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*351);
771 }
772 /*
773  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_F_avx_256_single
774  * Electrostatics interaction: Ewald
775  * VdW interaction:            LennardJones
776  * Geometry:                   Water3-Particle
777  * Calculate force/pot:        Force
778  */
779 void
780 nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_F_avx_256_single
781                     (t_nblist                    * gmx_restrict       nlist,
782                      rvec                        * gmx_restrict          xx,
783                      rvec                        * gmx_restrict          ff,
784                      t_forcerec                  * gmx_restrict          fr,
785                      t_mdatoms                   * gmx_restrict     mdatoms,
786                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
787                      t_nrnb                      * gmx_restrict        nrnb)
788 {
789     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
790      * just 0 for non-waters.
791      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
792      * jnr indices corresponding to data put in the four positions in the SIMD register.
793      */
794     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
795     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
796     int              jnrA,jnrB,jnrC,jnrD;
797     int              jnrE,jnrF,jnrG,jnrH;
798     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
799     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
800     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
801     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
802     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
803     real             rcutoff_scalar;
804     real             *shiftvec,*fshift,*x,*f;
805     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
806     real             scratch[4*DIM];
807     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
808     real *           vdwioffsetptr0;
809     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
810     real *           vdwioffsetptr1;
811     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
812     real *           vdwioffsetptr2;
813     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
814     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
815     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
816     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
817     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
818     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
819     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
820     real             *charge;
821     int              nvdwtype;
822     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
823     int              *vdwtype;
824     real             *vdwparam;
825     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
826     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
827     __m256i          ewitab;
828     __m128i          ewitab_lo,ewitab_hi;
829     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
830     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
831     real             *ewtab;
832     __m256           dummy_mask,cutoff_mask;
833     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
834     __m256           one     = _mm256_set1_ps(1.0);
835     __m256           two     = _mm256_set1_ps(2.0);
836     x                = xx[0];
837     f                = ff[0];
838
839     nri              = nlist->nri;
840     iinr             = nlist->iinr;
841     jindex           = nlist->jindex;
842     jjnr             = nlist->jjnr;
843     shiftidx         = nlist->shift;
844     gid              = nlist->gid;
845     shiftvec         = fr->shift_vec[0];
846     fshift           = fr->fshift[0];
847     facel            = _mm256_set1_ps(fr->epsfac);
848     charge           = mdatoms->chargeA;
849     nvdwtype         = fr->ntype;
850     vdwparam         = fr->nbfp;
851     vdwtype          = mdatoms->typeA;
852
853     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
854     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
855     beta2            = _mm256_mul_ps(beta,beta);
856     beta3            = _mm256_mul_ps(beta,beta2);
857
858     ewtab            = fr->ic->tabq_coul_F;
859     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
860     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
861
862     /* Setup water-specific parameters */
863     inr              = nlist->iinr[0];
864     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
865     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
866     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
867     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
868
869     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
870     rcutoff_scalar   = fr->rcoulomb;
871     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
872     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
873
874     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
875     rvdw             = _mm256_set1_ps(fr->rvdw);
876
877     /* Avoid stupid compiler warnings */
878     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
879     j_coord_offsetA = 0;
880     j_coord_offsetB = 0;
881     j_coord_offsetC = 0;
882     j_coord_offsetD = 0;
883     j_coord_offsetE = 0;
884     j_coord_offsetF = 0;
885     j_coord_offsetG = 0;
886     j_coord_offsetH = 0;
887
888     outeriter        = 0;
889     inneriter        = 0;
890
891     for(iidx=0;iidx<4*DIM;iidx++)
892     {
893         scratch[iidx] = 0.0;
894     }
895
896     /* Start outer loop over neighborlists */
897     for(iidx=0; iidx<nri; iidx++)
898     {
899         /* Load shift vector for this list */
900         i_shift_offset   = DIM*shiftidx[iidx];
901
902         /* Load limits for loop over neighbors */
903         j_index_start    = jindex[iidx];
904         j_index_end      = jindex[iidx+1];
905
906         /* Get outer coordinate index */
907         inr              = iinr[iidx];
908         i_coord_offset   = DIM*inr;
909
910         /* Load i particle coords and add shift vector */
911         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
912                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
913
914         fix0             = _mm256_setzero_ps();
915         fiy0             = _mm256_setzero_ps();
916         fiz0             = _mm256_setzero_ps();
917         fix1             = _mm256_setzero_ps();
918         fiy1             = _mm256_setzero_ps();
919         fiz1             = _mm256_setzero_ps();
920         fix2             = _mm256_setzero_ps();
921         fiy2             = _mm256_setzero_ps();
922         fiz2             = _mm256_setzero_ps();
923
924         /* Start inner kernel loop */
925         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
926         {
927
928             /* Get j neighbor index, and coordinate index */
929             jnrA             = jjnr[jidx];
930             jnrB             = jjnr[jidx+1];
931             jnrC             = jjnr[jidx+2];
932             jnrD             = jjnr[jidx+3];
933             jnrE             = jjnr[jidx+4];
934             jnrF             = jjnr[jidx+5];
935             jnrG             = jjnr[jidx+6];
936             jnrH             = jjnr[jidx+7];
937             j_coord_offsetA  = DIM*jnrA;
938             j_coord_offsetB  = DIM*jnrB;
939             j_coord_offsetC  = DIM*jnrC;
940             j_coord_offsetD  = DIM*jnrD;
941             j_coord_offsetE  = DIM*jnrE;
942             j_coord_offsetF  = DIM*jnrF;
943             j_coord_offsetG  = DIM*jnrG;
944             j_coord_offsetH  = DIM*jnrH;
945
946             /* load j atom coordinates */
947             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
948                                                  x+j_coord_offsetC,x+j_coord_offsetD,
949                                                  x+j_coord_offsetE,x+j_coord_offsetF,
950                                                  x+j_coord_offsetG,x+j_coord_offsetH,
951                                                  &jx0,&jy0,&jz0);
952
953             /* Calculate displacement vector */
954             dx00             = _mm256_sub_ps(ix0,jx0);
955             dy00             = _mm256_sub_ps(iy0,jy0);
956             dz00             = _mm256_sub_ps(iz0,jz0);
957             dx10             = _mm256_sub_ps(ix1,jx0);
958             dy10             = _mm256_sub_ps(iy1,jy0);
959             dz10             = _mm256_sub_ps(iz1,jz0);
960             dx20             = _mm256_sub_ps(ix2,jx0);
961             dy20             = _mm256_sub_ps(iy2,jy0);
962             dz20             = _mm256_sub_ps(iz2,jz0);
963
964             /* Calculate squared distance and things based on it */
965             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
966             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
967             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
968
969             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
970             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
971             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
972
973             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
974             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
975             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
976
977             /* Load parameters for j particles */
978             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
979                                                                  charge+jnrC+0,charge+jnrD+0,
980                                                                  charge+jnrE+0,charge+jnrF+0,
981                                                                  charge+jnrG+0,charge+jnrH+0);
982             vdwjidx0A        = 2*vdwtype[jnrA+0];
983             vdwjidx0B        = 2*vdwtype[jnrB+0];
984             vdwjidx0C        = 2*vdwtype[jnrC+0];
985             vdwjidx0D        = 2*vdwtype[jnrD+0];
986             vdwjidx0E        = 2*vdwtype[jnrE+0];
987             vdwjidx0F        = 2*vdwtype[jnrF+0];
988             vdwjidx0G        = 2*vdwtype[jnrG+0];
989             vdwjidx0H        = 2*vdwtype[jnrH+0];
990
991             fjx0             = _mm256_setzero_ps();
992             fjy0             = _mm256_setzero_ps();
993             fjz0             = _mm256_setzero_ps();
994
995             /**************************
996              * CALCULATE INTERACTIONS *
997              **************************/
998
999             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1000             {
1001
1002             r00              = _mm256_mul_ps(rsq00,rinv00);
1003
1004             /* Compute parameters for interactions between i and j atoms */
1005             qq00             = _mm256_mul_ps(iq0,jq0);
1006             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
1007                                             vdwioffsetptr0+vdwjidx0B,
1008                                             vdwioffsetptr0+vdwjidx0C,
1009                                             vdwioffsetptr0+vdwjidx0D,
1010                                             vdwioffsetptr0+vdwjidx0E,
1011                                             vdwioffsetptr0+vdwjidx0F,
1012                                             vdwioffsetptr0+vdwjidx0G,
1013                                             vdwioffsetptr0+vdwjidx0H,
1014                                             &c6_00,&c12_00);
1015
1016             /* EWALD ELECTROSTATICS */
1017             
1018             /* Analytical PME correction */
1019             zeta2            = _mm256_mul_ps(beta2,rsq00);
1020             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1021             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1022             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1023             felec            = _mm256_mul_ps(qq00,felec);
1024             
1025             /* LENNARD-JONES DISPERSION/REPULSION */
1026
1027             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1028             fvdw             = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
1029
1030             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1031
1032             fscal            = _mm256_add_ps(felec,fvdw);
1033
1034             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1035
1036             /* Calculate temporary vectorial force */
1037             tx               = _mm256_mul_ps(fscal,dx00);
1038             ty               = _mm256_mul_ps(fscal,dy00);
1039             tz               = _mm256_mul_ps(fscal,dz00);
1040
1041             /* Update vectorial force */
1042             fix0             = _mm256_add_ps(fix0,tx);
1043             fiy0             = _mm256_add_ps(fiy0,ty);
1044             fiz0             = _mm256_add_ps(fiz0,tz);
1045
1046             fjx0             = _mm256_add_ps(fjx0,tx);
1047             fjy0             = _mm256_add_ps(fjy0,ty);
1048             fjz0             = _mm256_add_ps(fjz0,tz);
1049
1050             }
1051
1052             /**************************
1053              * CALCULATE INTERACTIONS *
1054              **************************/
1055
1056             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1057             {
1058
1059             r10              = _mm256_mul_ps(rsq10,rinv10);
1060
1061             /* Compute parameters for interactions between i and j atoms */
1062             qq10             = _mm256_mul_ps(iq1,jq0);
1063
1064             /* EWALD ELECTROSTATICS */
1065             
1066             /* Analytical PME correction */
1067             zeta2            = _mm256_mul_ps(beta2,rsq10);
1068             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1069             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1070             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1071             felec            = _mm256_mul_ps(qq10,felec);
1072             
1073             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1074
1075             fscal            = felec;
1076
1077             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1078
1079             /* Calculate temporary vectorial force */
1080             tx               = _mm256_mul_ps(fscal,dx10);
1081             ty               = _mm256_mul_ps(fscal,dy10);
1082             tz               = _mm256_mul_ps(fscal,dz10);
1083
1084             /* Update vectorial force */
1085             fix1             = _mm256_add_ps(fix1,tx);
1086             fiy1             = _mm256_add_ps(fiy1,ty);
1087             fiz1             = _mm256_add_ps(fiz1,tz);
1088
1089             fjx0             = _mm256_add_ps(fjx0,tx);
1090             fjy0             = _mm256_add_ps(fjy0,ty);
1091             fjz0             = _mm256_add_ps(fjz0,tz);
1092
1093             }
1094
1095             /**************************
1096              * CALCULATE INTERACTIONS *
1097              **************************/
1098
1099             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1100             {
1101
1102             r20              = _mm256_mul_ps(rsq20,rinv20);
1103
1104             /* Compute parameters for interactions between i and j atoms */
1105             qq20             = _mm256_mul_ps(iq2,jq0);
1106
1107             /* EWALD ELECTROSTATICS */
1108             
1109             /* Analytical PME correction */
1110             zeta2            = _mm256_mul_ps(beta2,rsq20);
1111             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1112             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1113             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1114             felec            = _mm256_mul_ps(qq20,felec);
1115             
1116             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1117
1118             fscal            = felec;
1119
1120             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1121
1122             /* Calculate temporary vectorial force */
1123             tx               = _mm256_mul_ps(fscal,dx20);
1124             ty               = _mm256_mul_ps(fscal,dy20);
1125             tz               = _mm256_mul_ps(fscal,dz20);
1126
1127             /* Update vectorial force */
1128             fix2             = _mm256_add_ps(fix2,tx);
1129             fiy2             = _mm256_add_ps(fiy2,ty);
1130             fiz2             = _mm256_add_ps(fiz2,tz);
1131
1132             fjx0             = _mm256_add_ps(fjx0,tx);
1133             fjy0             = _mm256_add_ps(fjy0,ty);
1134             fjz0             = _mm256_add_ps(fjz0,tz);
1135
1136             }
1137
1138             fjptrA             = f+j_coord_offsetA;
1139             fjptrB             = f+j_coord_offsetB;
1140             fjptrC             = f+j_coord_offsetC;
1141             fjptrD             = f+j_coord_offsetD;
1142             fjptrE             = f+j_coord_offsetE;
1143             fjptrF             = f+j_coord_offsetF;
1144             fjptrG             = f+j_coord_offsetG;
1145             fjptrH             = f+j_coord_offsetH;
1146
1147             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1148
1149             /* Inner loop uses 187 flops */
1150         }
1151
1152         if(jidx<j_index_end)
1153         {
1154
1155             /* Get j neighbor index, and coordinate index */
1156             jnrlistA         = jjnr[jidx];
1157             jnrlistB         = jjnr[jidx+1];
1158             jnrlistC         = jjnr[jidx+2];
1159             jnrlistD         = jjnr[jidx+3];
1160             jnrlistE         = jjnr[jidx+4];
1161             jnrlistF         = jjnr[jidx+5];
1162             jnrlistG         = jjnr[jidx+6];
1163             jnrlistH         = jjnr[jidx+7];
1164             /* Sign of each element will be negative for non-real atoms.
1165              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1166              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
1167              */
1168             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
1169                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
1170                                             
1171             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1172             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1173             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1174             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1175             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
1176             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
1177             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
1178             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
1179             j_coord_offsetA  = DIM*jnrA;
1180             j_coord_offsetB  = DIM*jnrB;
1181             j_coord_offsetC  = DIM*jnrC;
1182             j_coord_offsetD  = DIM*jnrD;
1183             j_coord_offsetE  = DIM*jnrE;
1184             j_coord_offsetF  = DIM*jnrF;
1185             j_coord_offsetG  = DIM*jnrG;
1186             j_coord_offsetH  = DIM*jnrH;
1187
1188             /* load j atom coordinates */
1189             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1190                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1191                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1192                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1193                                                  &jx0,&jy0,&jz0);
1194
1195             /* Calculate displacement vector */
1196             dx00             = _mm256_sub_ps(ix0,jx0);
1197             dy00             = _mm256_sub_ps(iy0,jy0);
1198             dz00             = _mm256_sub_ps(iz0,jz0);
1199             dx10             = _mm256_sub_ps(ix1,jx0);
1200             dy10             = _mm256_sub_ps(iy1,jy0);
1201             dz10             = _mm256_sub_ps(iz1,jz0);
1202             dx20             = _mm256_sub_ps(ix2,jx0);
1203             dy20             = _mm256_sub_ps(iy2,jy0);
1204             dz20             = _mm256_sub_ps(iz2,jz0);
1205
1206             /* Calculate squared distance and things based on it */
1207             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1208             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1209             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1210
1211             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1212             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1213             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1214
1215             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1216             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1217             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1218
1219             /* Load parameters for j particles */
1220             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
1221                                                                  charge+jnrC+0,charge+jnrD+0,
1222                                                                  charge+jnrE+0,charge+jnrF+0,
1223                                                                  charge+jnrG+0,charge+jnrH+0);
1224             vdwjidx0A        = 2*vdwtype[jnrA+0];
1225             vdwjidx0B        = 2*vdwtype[jnrB+0];
1226             vdwjidx0C        = 2*vdwtype[jnrC+0];
1227             vdwjidx0D        = 2*vdwtype[jnrD+0];
1228             vdwjidx0E        = 2*vdwtype[jnrE+0];
1229             vdwjidx0F        = 2*vdwtype[jnrF+0];
1230             vdwjidx0G        = 2*vdwtype[jnrG+0];
1231             vdwjidx0H        = 2*vdwtype[jnrH+0];
1232
1233             fjx0             = _mm256_setzero_ps();
1234             fjy0             = _mm256_setzero_ps();
1235             fjz0             = _mm256_setzero_ps();
1236
1237             /**************************
1238              * CALCULATE INTERACTIONS *
1239              **************************/
1240
1241             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1242             {
1243
1244             r00              = _mm256_mul_ps(rsq00,rinv00);
1245             r00              = _mm256_andnot_ps(dummy_mask,r00);
1246
1247             /* Compute parameters for interactions between i and j atoms */
1248             qq00             = _mm256_mul_ps(iq0,jq0);
1249             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
1250                                             vdwioffsetptr0+vdwjidx0B,
1251                                             vdwioffsetptr0+vdwjidx0C,
1252                                             vdwioffsetptr0+vdwjidx0D,
1253                                             vdwioffsetptr0+vdwjidx0E,
1254                                             vdwioffsetptr0+vdwjidx0F,
1255                                             vdwioffsetptr0+vdwjidx0G,
1256                                             vdwioffsetptr0+vdwjidx0H,
1257                                             &c6_00,&c12_00);
1258
1259             /* EWALD ELECTROSTATICS */
1260             
1261             /* Analytical PME correction */
1262             zeta2            = _mm256_mul_ps(beta2,rsq00);
1263             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1264             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1265             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1266             felec            = _mm256_mul_ps(qq00,felec);
1267             
1268             /* LENNARD-JONES DISPERSION/REPULSION */
1269
1270             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1271             fvdw             = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
1272
1273             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1274
1275             fscal            = _mm256_add_ps(felec,fvdw);
1276
1277             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1278
1279             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1280
1281             /* Calculate temporary vectorial force */
1282             tx               = _mm256_mul_ps(fscal,dx00);
1283             ty               = _mm256_mul_ps(fscal,dy00);
1284             tz               = _mm256_mul_ps(fscal,dz00);
1285
1286             /* Update vectorial force */
1287             fix0             = _mm256_add_ps(fix0,tx);
1288             fiy0             = _mm256_add_ps(fiy0,ty);
1289             fiz0             = _mm256_add_ps(fiz0,tz);
1290
1291             fjx0             = _mm256_add_ps(fjx0,tx);
1292             fjy0             = _mm256_add_ps(fjy0,ty);
1293             fjz0             = _mm256_add_ps(fjz0,tz);
1294
1295             }
1296
1297             /**************************
1298              * CALCULATE INTERACTIONS *
1299              **************************/
1300
1301             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1302             {
1303
1304             r10              = _mm256_mul_ps(rsq10,rinv10);
1305             r10              = _mm256_andnot_ps(dummy_mask,r10);
1306
1307             /* Compute parameters for interactions between i and j atoms */
1308             qq10             = _mm256_mul_ps(iq1,jq0);
1309
1310             /* EWALD ELECTROSTATICS */
1311             
1312             /* Analytical PME correction */
1313             zeta2            = _mm256_mul_ps(beta2,rsq10);
1314             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1315             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1316             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1317             felec            = _mm256_mul_ps(qq10,felec);
1318             
1319             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1320
1321             fscal            = felec;
1322
1323             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1324
1325             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1326
1327             /* Calculate temporary vectorial force */
1328             tx               = _mm256_mul_ps(fscal,dx10);
1329             ty               = _mm256_mul_ps(fscal,dy10);
1330             tz               = _mm256_mul_ps(fscal,dz10);
1331
1332             /* Update vectorial force */
1333             fix1             = _mm256_add_ps(fix1,tx);
1334             fiy1             = _mm256_add_ps(fiy1,ty);
1335             fiz1             = _mm256_add_ps(fiz1,tz);
1336
1337             fjx0             = _mm256_add_ps(fjx0,tx);
1338             fjy0             = _mm256_add_ps(fjy0,ty);
1339             fjz0             = _mm256_add_ps(fjz0,tz);
1340
1341             }
1342
1343             /**************************
1344              * CALCULATE INTERACTIONS *
1345              **************************/
1346
1347             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1348             {
1349
1350             r20              = _mm256_mul_ps(rsq20,rinv20);
1351             r20              = _mm256_andnot_ps(dummy_mask,r20);
1352
1353             /* Compute parameters for interactions between i and j atoms */
1354             qq20             = _mm256_mul_ps(iq2,jq0);
1355
1356             /* EWALD ELECTROSTATICS */
1357             
1358             /* Analytical PME correction */
1359             zeta2            = _mm256_mul_ps(beta2,rsq20);
1360             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1361             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1362             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1363             felec            = _mm256_mul_ps(qq20,felec);
1364             
1365             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1366
1367             fscal            = felec;
1368
1369             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1370
1371             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1372
1373             /* Calculate temporary vectorial force */
1374             tx               = _mm256_mul_ps(fscal,dx20);
1375             ty               = _mm256_mul_ps(fscal,dy20);
1376             tz               = _mm256_mul_ps(fscal,dz20);
1377
1378             /* Update vectorial force */
1379             fix2             = _mm256_add_ps(fix2,tx);
1380             fiy2             = _mm256_add_ps(fiy2,ty);
1381             fiz2             = _mm256_add_ps(fiz2,tz);
1382
1383             fjx0             = _mm256_add_ps(fjx0,tx);
1384             fjy0             = _mm256_add_ps(fjy0,ty);
1385             fjz0             = _mm256_add_ps(fjz0,tz);
1386
1387             }
1388
1389             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1390             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1391             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1392             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1393             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1394             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1395             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1396             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1397
1398             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1399
1400             /* Inner loop uses 190 flops */
1401         }
1402
1403         /* End of innermost loop */
1404
1405         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1406                                                  f+i_coord_offset,fshift+i_shift_offset);
1407
1408         /* Increment number of inner iterations */
1409         inneriter                  += j_index_end - j_index_start;
1410
1411         /* Outer loop uses 18 flops */
1412     }
1413
1414     /* Increment number of outer iterations */
1415     outeriter        += nri;
1416
1417     /* Update outer/inner flops */
1418
1419     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*190);
1420 }