Version bumps after new release
[alexxy/gromacs.git] / src / gromacs / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_avx_256_single.c
1 /*
2  * This file is part of the GROMACS molecular simulation package.
3  *
4  * Copyright (c) 2012,2013, by the GROMACS development team, led by
5  * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6  * and including many others, as listed in the AUTHORS file in the
7  * top-level source directory and at http://www.gromacs.org.
8  *
9  * GROMACS is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU Lesser General Public License
11  * as published by the Free Software Foundation; either version 2.1
12  * of the License, or (at your option) any later version.
13  *
14  * GROMACS is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * Lesser General Public License for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public
20  * License along with GROMACS; if not, see
21  * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA.
23  *
24  * If you want to redistribute modifications to GROMACS, please
25  * consider that scientific software is very special. Version
26  * control is crucial - bugs must be traceable. We will be happy to
27  * consider code for inclusion in the official distribution, but
28  * derived work must not be called official GROMACS. Details are found
29  * in the README & COPYING files - if they are missing, get the
30  * official version at http://www.gromacs.org.
31  *
32  * To help us fund GROMACS development, we humbly ask that you cite
33  * the research papers on the package. Check out http://www.gromacs.org.
34  */
35 /*
36  * Note: this file was generated by the GROMACS avx_256_single kernel generator.
37  */
38 #ifdef HAVE_CONFIG_H
39 #include <config.h>
40 #endif
41
42 #include <math.h>
43
44 #include "../nb_kernel.h"
45 #include "types/simple.h"
46 #include "vec.h"
47 #include "nrnb.h"
48
49 #include "gromacs/simd/math_x86_avx_256_single.h"
50 #include "kernelutil_x86_avx_256_single.h"
51
52 /*
53  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_VF_avx_256_single
54  * Electrostatics interaction: Ewald
55  * VdW interaction:            LennardJones
56  * Geometry:                   Water3-Particle
57  * Calculate force/pot:        PotentialAndForce
58  */
59 void
60 nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_VF_avx_256_single
61                     (t_nblist                    * gmx_restrict       nlist,
62                      rvec                        * gmx_restrict          xx,
63                      rvec                        * gmx_restrict          ff,
64                      t_forcerec                  * gmx_restrict          fr,
65                      t_mdatoms                   * gmx_restrict     mdatoms,
66                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
67                      t_nrnb                      * gmx_restrict        nrnb)
68 {
69     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
70      * just 0 for non-waters.
71      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
72      * jnr indices corresponding to data put in the four positions in the SIMD register.
73      */
74     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
75     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
76     int              jnrA,jnrB,jnrC,jnrD;
77     int              jnrE,jnrF,jnrG,jnrH;
78     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
79     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
80     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
81     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
82     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
83     real             rcutoff_scalar;
84     real             *shiftvec,*fshift,*x,*f;
85     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
86     real             scratch[4*DIM];
87     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
88     real *           vdwioffsetptr0;
89     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
90     real *           vdwioffsetptr1;
91     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
92     real *           vdwioffsetptr2;
93     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
94     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
95     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
96     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
97     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
98     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
99     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
100     real             *charge;
101     int              nvdwtype;
102     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
103     int              *vdwtype;
104     real             *vdwparam;
105     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
106     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
107     __m256i          ewitab;
108     __m128i          ewitab_lo,ewitab_hi;
109     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
110     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
111     real             *ewtab;
112     __m256           dummy_mask,cutoff_mask;
113     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
114     __m256           one     = _mm256_set1_ps(1.0);
115     __m256           two     = _mm256_set1_ps(2.0);
116     x                = xx[0];
117     f                = ff[0];
118
119     nri              = nlist->nri;
120     iinr             = nlist->iinr;
121     jindex           = nlist->jindex;
122     jjnr             = nlist->jjnr;
123     shiftidx         = nlist->shift;
124     gid              = nlist->gid;
125     shiftvec         = fr->shift_vec[0];
126     fshift           = fr->fshift[0];
127     facel            = _mm256_set1_ps(fr->epsfac);
128     charge           = mdatoms->chargeA;
129     nvdwtype         = fr->ntype;
130     vdwparam         = fr->nbfp;
131     vdwtype          = mdatoms->typeA;
132
133     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
134     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
135     beta2            = _mm256_mul_ps(beta,beta);
136     beta3            = _mm256_mul_ps(beta,beta2);
137
138     ewtab            = fr->ic->tabq_coul_FDV0;
139     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
140     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
141
142     /* Setup water-specific parameters */
143     inr              = nlist->iinr[0];
144     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
145     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
146     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
147     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
148
149     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
150     rcutoff_scalar   = fr->rcoulomb;
151     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
152     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
153
154     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
155     rvdw             = _mm256_set1_ps(fr->rvdw);
156
157     /* Avoid stupid compiler warnings */
158     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
159     j_coord_offsetA = 0;
160     j_coord_offsetB = 0;
161     j_coord_offsetC = 0;
162     j_coord_offsetD = 0;
163     j_coord_offsetE = 0;
164     j_coord_offsetF = 0;
165     j_coord_offsetG = 0;
166     j_coord_offsetH = 0;
167
168     outeriter        = 0;
169     inneriter        = 0;
170
171     for(iidx=0;iidx<4*DIM;iidx++)
172     {
173         scratch[iidx] = 0.0;
174     }
175
176     /* Start outer loop over neighborlists */
177     for(iidx=0; iidx<nri; iidx++)
178     {
179         /* Load shift vector for this list */
180         i_shift_offset   = DIM*shiftidx[iidx];
181
182         /* Load limits for loop over neighbors */
183         j_index_start    = jindex[iidx];
184         j_index_end      = jindex[iidx+1];
185
186         /* Get outer coordinate index */
187         inr              = iinr[iidx];
188         i_coord_offset   = DIM*inr;
189
190         /* Load i particle coords and add shift vector */
191         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
192                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
193
194         fix0             = _mm256_setzero_ps();
195         fiy0             = _mm256_setzero_ps();
196         fiz0             = _mm256_setzero_ps();
197         fix1             = _mm256_setzero_ps();
198         fiy1             = _mm256_setzero_ps();
199         fiz1             = _mm256_setzero_ps();
200         fix2             = _mm256_setzero_ps();
201         fiy2             = _mm256_setzero_ps();
202         fiz2             = _mm256_setzero_ps();
203
204         /* Reset potential sums */
205         velecsum         = _mm256_setzero_ps();
206         vvdwsum          = _mm256_setzero_ps();
207
208         /* Start inner kernel loop */
209         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
210         {
211
212             /* Get j neighbor index, and coordinate index */
213             jnrA             = jjnr[jidx];
214             jnrB             = jjnr[jidx+1];
215             jnrC             = jjnr[jidx+2];
216             jnrD             = jjnr[jidx+3];
217             jnrE             = jjnr[jidx+4];
218             jnrF             = jjnr[jidx+5];
219             jnrG             = jjnr[jidx+6];
220             jnrH             = jjnr[jidx+7];
221             j_coord_offsetA  = DIM*jnrA;
222             j_coord_offsetB  = DIM*jnrB;
223             j_coord_offsetC  = DIM*jnrC;
224             j_coord_offsetD  = DIM*jnrD;
225             j_coord_offsetE  = DIM*jnrE;
226             j_coord_offsetF  = DIM*jnrF;
227             j_coord_offsetG  = DIM*jnrG;
228             j_coord_offsetH  = DIM*jnrH;
229
230             /* load j atom coordinates */
231             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
232                                                  x+j_coord_offsetC,x+j_coord_offsetD,
233                                                  x+j_coord_offsetE,x+j_coord_offsetF,
234                                                  x+j_coord_offsetG,x+j_coord_offsetH,
235                                                  &jx0,&jy0,&jz0);
236
237             /* Calculate displacement vector */
238             dx00             = _mm256_sub_ps(ix0,jx0);
239             dy00             = _mm256_sub_ps(iy0,jy0);
240             dz00             = _mm256_sub_ps(iz0,jz0);
241             dx10             = _mm256_sub_ps(ix1,jx0);
242             dy10             = _mm256_sub_ps(iy1,jy0);
243             dz10             = _mm256_sub_ps(iz1,jz0);
244             dx20             = _mm256_sub_ps(ix2,jx0);
245             dy20             = _mm256_sub_ps(iy2,jy0);
246             dz20             = _mm256_sub_ps(iz2,jz0);
247
248             /* Calculate squared distance and things based on it */
249             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
250             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
251             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
252
253             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
254             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
255             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
256
257             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
258             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
259             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
260
261             /* Load parameters for j particles */
262             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
263                                                                  charge+jnrC+0,charge+jnrD+0,
264                                                                  charge+jnrE+0,charge+jnrF+0,
265                                                                  charge+jnrG+0,charge+jnrH+0);
266             vdwjidx0A        = 2*vdwtype[jnrA+0];
267             vdwjidx0B        = 2*vdwtype[jnrB+0];
268             vdwjidx0C        = 2*vdwtype[jnrC+0];
269             vdwjidx0D        = 2*vdwtype[jnrD+0];
270             vdwjidx0E        = 2*vdwtype[jnrE+0];
271             vdwjidx0F        = 2*vdwtype[jnrF+0];
272             vdwjidx0G        = 2*vdwtype[jnrG+0];
273             vdwjidx0H        = 2*vdwtype[jnrH+0];
274
275             fjx0             = _mm256_setzero_ps();
276             fjy0             = _mm256_setzero_ps();
277             fjz0             = _mm256_setzero_ps();
278
279             /**************************
280              * CALCULATE INTERACTIONS *
281              **************************/
282
283             if (gmx_mm256_any_lt(rsq00,rcutoff2))
284             {
285
286             r00              = _mm256_mul_ps(rsq00,rinv00);
287
288             /* Compute parameters for interactions between i and j atoms */
289             qq00             = _mm256_mul_ps(iq0,jq0);
290             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
291                                             vdwioffsetptr0+vdwjidx0B,
292                                             vdwioffsetptr0+vdwjidx0C,
293                                             vdwioffsetptr0+vdwjidx0D,
294                                             vdwioffsetptr0+vdwjidx0E,
295                                             vdwioffsetptr0+vdwjidx0F,
296                                             vdwioffsetptr0+vdwjidx0G,
297                                             vdwioffsetptr0+vdwjidx0H,
298                                             &c6_00,&c12_00);
299
300             /* EWALD ELECTROSTATICS */
301             
302             /* Analytical PME correction */
303             zeta2            = _mm256_mul_ps(beta2,rsq00);
304             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
305             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
306             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
307             felec            = _mm256_mul_ps(qq00,felec);
308             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
309             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
310             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
311             velec            = _mm256_mul_ps(qq00,velec);
312             
313             /* LENNARD-JONES DISPERSION/REPULSION */
314
315             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
316             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
317             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
318             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
319                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
320             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
321
322             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
323
324             /* Update potential sum for this i atom from the interaction with this j atom. */
325             velec            = _mm256_and_ps(velec,cutoff_mask);
326             velecsum         = _mm256_add_ps(velecsum,velec);
327             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
328             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
329
330             fscal            = _mm256_add_ps(felec,fvdw);
331
332             fscal            = _mm256_and_ps(fscal,cutoff_mask);
333
334             /* Calculate temporary vectorial force */
335             tx               = _mm256_mul_ps(fscal,dx00);
336             ty               = _mm256_mul_ps(fscal,dy00);
337             tz               = _mm256_mul_ps(fscal,dz00);
338
339             /* Update vectorial force */
340             fix0             = _mm256_add_ps(fix0,tx);
341             fiy0             = _mm256_add_ps(fiy0,ty);
342             fiz0             = _mm256_add_ps(fiz0,tz);
343
344             fjx0             = _mm256_add_ps(fjx0,tx);
345             fjy0             = _mm256_add_ps(fjy0,ty);
346             fjz0             = _mm256_add_ps(fjz0,tz);
347
348             }
349
350             /**************************
351              * CALCULATE INTERACTIONS *
352              **************************/
353
354             if (gmx_mm256_any_lt(rsq10,rcutoff2))
355             {
356
357             r10              = _mm256_mul_ps(rsq10,rinv10);
358
359             /* Compute parameters for interactions between i and j atoms */
360             qq10             = _mm256_mul_ps(iq1,jq0);
361
362             /* EWALD ELECTROSTATICS */
363             
364             /* Analytical PME correction */
365             zeta2            = _mm256_mul_ps(beta2,rsq10);
366             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
367             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
368             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
369             felec            = _mm256_mul_ps(qq10,felec);
370             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
371             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
372             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
373             velec            = _mm256_mul_ps(qq10,velec);
374             
375             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
376
377             /* Update potential sum for this i atom from the interaction with this j atom. */
378             velec            = _mm256_and_ps(velec,cutoff_mask);
379             velecsum         = _mm256_add_ps(velecsum,velec);
380
381             fscal            = felec;
382
383             fscal            = _mm256_and_ps(fscal,cutoff_mask);
384
385             /* Calculate temporary vectorial force */
386             tx               = _mm256_mul_ps(fscal,dx10);
387             ty               = _mm256_mul_ps(fscal,dy10);
388             tz               = _mm256_mul_ps(fscal,dz10);
389
390             /* Update vectorial force */
391             fix1             = _mm256_add_ps(fix1,tx);
392             fiy1             = _mm256_add_ps(fiy1,ty);
393             fiz1             = _mm256_add_ps(fiz1,tz);
394
395             fjx0             = _mm256_add_ps(fjx0,tx);
396             fjy0             = _mm256_add_ps(fjy0,ty);
397             fjz0             = _mm256_add_ps(fjz0,tz);
398
399             }
400
401             /**************************
402              * CALCULATE INTERACTIONS *
403              **************************/
404
405             if (gmx_mm256_any_lt(rsq20,rcutoff2))
406             {
407
408             r20              = _mm256_mul_ps(rsq20,rinv20);
409
410             /* Compute parameters for interactions between i and j atoms */
411             qq20             = _mm256_mul_ps(iq2,jq0);
412
413             /* EWALD ELECTROSTATICS */
414             
415             /* Analytical PME correction */
416             zeta2            = _mm256_mul_ps(beta2,rsq20);
417             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
418             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
419             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
420             felec            = _mm256_mul_ps(qq20,felec);
421             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
422             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
423             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
424             velec            = _mm256_mul_ps(qq20,velec);
425             
426             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
427
428             /* Update potential sum for this i atom from the interaction with this j atom. */
429             velec            = _mm256_and_ps(velec,cutoff_mask);
430             velecsum         = _mm256_add_ps(velecsum,velec);
431
432             fscal            = felec;
433
434             fscal            = _mm256_and_ps(fscal,cutoff_mask);
435
436             /* Calculate temporary vectorial force */
437             tx               = _mm256_mul_ps(fscal,dx20);
438             ty               = _mm256_mul_ps(fscal,dy20);
439             tz               = _mm256_mul_ps(fscal,dz20);
440
441             /* Update vectorial force */
442             fix2             = _mm256_add_ps(fix2,tx);
443             fiy2             = _mm256_add_ps(fiy2,ty);
444             fiz2             = _mm256_add_ps(fiz2,tz);
445
446             fjx0             = _mm256_add_ps(fjx0,tx);
447             fjy0             = _mm256_add_ps(fjy0,ty);
448             fjz0             = _mm256_add_ps(fjz0,tz);
449
450             }
451
452             fjptrA             = f+j_coord_offsetA;
453             fjptrB             = f+j_coord_offsetB;
454             fjptrC             = f+j_coord_offsetC;
455             fjptrD             = f+j_coord_offsetD;
456             fjptrE             = f+j_coord_offsetE;
457             fjptrF             = f+j_coord_offsetF;
458             fjptrG             = f+j_coord_offsetG;
459             fjptrH             = f+j_coord_offsetH;
460
461             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
462
463             /* Inner loop uses 348 flops */
464         }
465
466         if(jidx<j_index_end)
467         {
468
469             /* Get j neighbor index, and coordinate index */
470             jnrlistA         = jjnr[jidx];
471             jnrlistB         = jjnr[jidx+1];
472             jnrlistC         = jjnr[jidx+2];
473             jnrlistD         = jjnr[jidx+3];
474             jnrlistE         = jjnr[jidx+4];
475             jnrlistF         = jjnr[jidx+5];
476             jnrlistG         = jjnr[jidx+6];
477             jnrlistH         = jjnr[jidx+7];
478             /* Sign of each element will be negative for non-real atoms.
479              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
480              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
481              */
482             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
483                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
484                                             
485             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
486             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
487             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
488             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
489             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
490             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
491             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
492             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
493             j_coord_offsetA  = DIM*jnrA;
494             j_coord_offsetB  = DIM*jnrB;
495             j_coord_offsetC  = DIM*jnrC;
496             j_coord_offsetD  = DIM*jnrD;
497             j_coord_offsetE  = DIM*jnrE;
498             j_coord_offsetF  = DIM*jnrF;
499             j_coord_offsetG  = DIM*jnrG;
500             j_coord_offsetH  = DIM*jnrH;
501
502             /* load j atom coordinates */
503             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
504                                                  x+j_coord_offsetC,x+j_coord_offsetD,
505                                                  x+j_coord_offsetE,x+j_coord_offsetF,
506                                                  x+j_coord_offsetG,x+j_coord_offsetH,
507                                                  &jx0,&jy0,&jz0);
508
509             /* Calculate displacement vector */
510             dx00             = _mm256_sub_ps(ix0,jx0);
511             dy00             = _mm256_sub_ps(iy0,jy0);
512             dz00             = _mm256_sub_ps(iz0,jz0);
513             dx10             = _mm256_sub_ps(ix1,jx0);
514             dy10             = _mm256_sub_ps(iy1,jy0);
515             dz10             = _mm256_sub_ps(iz1,jz0);
516             dx20             = _mm256_sub_ps(ix2,jx0);
517             dy20             = _mm256_sub_ps(iy2,jy0);
518             dz20             = _mm256_sub_ps(iz2,jz0);
519
520             /* Calculate squared distance and things based on it */
521             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
522             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
523             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
524
525             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
526             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
527             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
528
529             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
530             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
531             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
532
533             /* Load parameters for j particles */
534             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
535                                                                  charge+jnrC+0,charge+jnrD+0,
536                                                                  charge+jnrE+0,charge+jnrF+0,
537                                                                  charge+jnrG+0,charge+jnrH+0);
538             vdwjidx0A        = 2*vdwtype[jnrA+0];
539             vdwjidx0B        = 2*vdwtype[jnrB+0];
540             vdwjidx0C        = 2*vdwtype[jnrC+0];
541             vdwjidx0D        = 2*vdwtype[jnrD+0];
542             vdwjidx0E        = 2*vdwtype[jnrE+0];
543             vdwjidx0F        = 2*vdwtype[jnrF+0];
544             vdwjidx0G        = 2*vdwtype[jnrG+0];
545             vdwjidx0H        = 2*vdwtype[jnrH+0];
546
547             fjx0             = _mm256_setzero_ps();
548             fjy0             = _mm256_setzero_ps();
549             fjz0             = _mm256_setzero_ps();
550
551             /**************************
552              * CALCULATE INTERACTIONS *
553              **************************/
554
555             if (gmx_mm256_any_lt(rsq00,rcutoff2))
556             {
557
558             r00              = _mm256_mul_ps(rsq00,rinv00);
559             r00              = _mm256_andnot_ps(dummy_mask,r00);
560
561             /* Compute parameters for interactions between i and j atoms */
562             qq00             = _mm256_mul_ps(iq0,jq0);
563             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
564                                             vdwioffsetptr0+vdwjidx0B,
565                                             vdwioffsetptr0+vdwjidx0C,
566                                             vdwioffsetptr0+vdwjidx0D,
567                                             vdwioffsetptr0+vdwjidx0E,
568                                             vdwioffsetptr0+vdwjidx0F,
569                                             vdwioffsetptr0+vdwjidx0G,
570                                             vdwioffsetptr0+vdwjidx0H,
571                                             &c6_00,&c12_00);
572
573             /* EWALD ELECTROSTATICS */
574             
575             /* Analytical PME correction */
576             zeta2            = _mm256_mul_ps(beta2,rsq00);
577             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
578             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
579             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
580             felec            = _mm256_mul_ps(qq00,felec);
581             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
582             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
583             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv00,sh_ewald),pmecorrV);
584             velec            = _mm256_mul_ps(qq00,velec);
585             
586             /* LENNARD-JONES DISPERSION/REPULSION */
587
588             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
589             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
590             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
591             vvdw             = _mm256_sub_ps(_mm256_mul_ps( _mm256_sub_ps(vvdw12 , _mm256_mul_ps(c12_00,_mm256_mul_ps(sh_vdw_invrcut6,sh_vdw_invrcut6))), one_twelfth) ,
592                                           _mm256_mul_ps( _mm256_sub_ps(vvdw6,_mm256_mul_ps(c6_00,sh_vdw_invrcut6)),one_sixth));
593             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
594
595             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
596
597             /* Update potential sum for this i atom from the interaction with this j atom. */
598             velec            = _mm256_and_ps(velec,cutoff_mask);
599             velec            = _mm256_andnot_ps(dummy_mask,velec);
600             velecsum         = _mm256_add_ps(velecsum,velec);
601             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
602             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
603             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
604
605             fscal            = _mm256_add_ps(felec,fvdw);
606
607             fscal            = _mm256_and_ps(fscal,cutoff_mask);
608
609             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
610
611             /* Calculate temporary vectorial force */
612             tx               = _mm256_mul_ps(fscal,dx00);
613             ty               = _mm256_mul_ps(fscal,dy00);
614             tz               = _mm256_mul_ps(fscal,dz00);
615
616             /* Update vectorial force */
617             fix0             = _mm256_add_ps(fix0,tx);
618             fiy0             = _mm256_add_ps(fiy0,ty);
619             fiz0             = _mm256_add_ps(fiz0,tz);
620
621             fjx0             = _mm256_add_ps(fjx0,tx);
622             fjy0             = _mm256_add_ps(fjy0,ty);
623             fjz0             = _mm256_add_ps(fjz0,tz);
624
625             }
626
627             /**************************
628              * CALCULATE INTERACTIONS *
629              **************************/
630
631             if (gmx_mm256_any_lt(rsq10,rcutoff2))
632             {
633
634             r10              = _mm256_mul_ps(rsq10,rinv10);
635             r10              = _mm256_andnot_ps(dummy_mask,r10);
636
637             /* Compute parameters for interactions between i and j atoms */
638             qq10             = _mm256_mul_ps(iq1,jq0);
639
640             /* EWALD ELECTROSTATICS */
641             
642             /* Analytical PME correction */
643             zeta2            = _mm256_mul_ps(beta2,rsq10);
644             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
645             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
646             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
647             felec            = _mm256_mul_ps(qq10,felec);
648             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
649             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
650             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv10,sh_ewald),pmecorrV);
651             velec            = _mm256_mul_ps(qq10,velec);
652             
653             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
654
655             /* Update potential sum for this i atom from the interaction with this j atom. */
656             velec            = _mm256_and_ps(velec,cutoff_mask);
657             velec            = _mm256_andnot_ps(dummy_mask,velec);
658             velecsum         = _mm256_add_ps(velecsum,velec);
659
660             fscal            = felec;
661
662             fscal            = _mm256_and_ps(fscal,cutoff_mask);
663
664             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
665
666             /* Calculate temporary vectorial force */
667             tx               = _mm256_mul_ps(fscal,dx10);
668             ty               = _mm256_mul_ps(fscal,dy10);
669             tz               = _mm256_mul_ps(fscal,dz10);
670
671             /* Update vectorial force */
672             fix1             = _mm256_add_ps(fix1,tx);
673             fiy1             = _mm256_add_ps(fiy1,ty);
674             fiz1             = _mm256_add_ps(fiz1,tz);
675
676             fjx0             = _mm256_add_ps(fjx0,tx);
677             fjy0             = _mm256_add_ps(fjy0,ty);
678             fjz0             = _mm256_add_ps(fjz0,tz);
679
680             }
681
682             /**************************
683              * CALCULATE INTERACTIONS *
684              **************************/
685
686             if (gmx_mm256_any_lt(rsq20,rcutoff2))
687             {
688
689             r20              = _mm256_mul_ps(rsq20,rinv20);
690             r20              = _mm256_andnot_ps(dummy_mask,r20);
691
692             /* Compute parameters for interactions between i and j atoms */
693             qq20             = _mm256_mul_ps(iq2,jq0);
694
695             /* EWALD ELECTROSTATICS */
696             
697             /* Analytical PME correction */
698             zeta2            = _mm256_mul_ps(beta2,rsq20);
699             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
700             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
701             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
702             felec            = _mm256_mul_ps(qq20,felec);
703             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
704             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
705             velec            = _mm256_sub_ps(_mm256_sub_ps(rinv20,sh_ewald),pmecorrV);
706             velec            = _mm256_mul_ps(qq20,velec);
707             
708             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
709
710             /* Update potential sum for this i atom from the interaction with this j atom. */
711             velec            = _mm256_and_ps(velec,cutoff_mask);
712             velec            = _mm256_andnot_ps(dummy_mask,velec);
713             velecsum         = _mm256_add_ps(velecsum,velec);
714
715             fscal            = felec;
716
717             fscal            = _mm256_and_ps(fscal,cutoff_mask);
718
719             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
720
721             /* Calculate temporary vectorial force */
722             tx               = _mm256_mul_ps(fscal,dx20);
723             ty               = _mm256_mul_ps(fscal,dy20);
724             tz               = _mm256_mul_ps(fscal,dz20);
725
726             /* Update vectorial force */
727             fix2             = _mm256_add_ps(fix2,tx);
728             fiy2             = _mm256_add_ps(fiy2,ty);
729             fiz2             = _mm256_add_ps(fiz2,tz);
730
731             fjx0             = _mm256_add_ps(fjx0,tx);
732             fjy0             = _mm256_add_ps(fjy0,ty);
733             fjz0             = _mm256_add_ps(fjz0,tz);
734
735             }
736
737             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
738             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
739             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
740             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
741             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
742             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
743             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
744             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
745
746             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
747
748             /* Inner loop uses 351 flops */
749         }
750
751         /* End of innermost loop */
752
753         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
754                                                  f+i_coord_offset,fshift+i_shift_offset);
755
756         ggid                        = gid[iidx];
757         /* Update potential energies */
758         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
759         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
760
761         /* Increment number of inner iterations */
762         inneriter                  += j_index_end - j_index_start;
763
764         /* Outer loop uses 20 flops */
765     }
766
767     /* Increment number of outer iterations */
768     outeriter        += nri;
769
770     /* Update outer/inner flops */
771
772     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*351);
773 }
774 /*
775  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_F_avx_256_single
776  * Electrostatics interaction: Ewald
777  * VdW interaction:            LennardJones
778  * Geometry:                   Water3-Particle
779  * Calculate force/pot:        Force
780  */
781 void
782 nb_kernel_ElecEwSh_VdwLJSh_GeomW3P1_F_avx_256_single
783                     (t_nblist                    * gmx_restrict       nlist,
784                      rvec                        * gmx_restrict          xx,
785                      rvec                        * gmx_restrict          ff,
786                      t_forcerec                  * gmx_restrict          fr,
787                      t_mdatoms                   * gmx_restrict     mdatoms,
788                      nb_kernel_data_t gmx_unused * gmx_restrict kernel_data,
789                      t_nrnb                      * gmx_restrict        nrnb)
790 {
791     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
792      * just 0 for non-waters.
793      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
794      * jnr indices corresponding to data put in the four positions in the SIMD register.
795      */
796     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
797     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
798     int              jnrA,jnrB,jnrC,jnrD;
799     int              jnrE,jnrF,jnrG,jnrH;
800     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
801     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
802     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
803     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
804     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
805     real             rcutoff_scalar;
806     real             *shiftvec,*fshift,*x,*f;
807     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
808     real             scratch[4*DIM];
809     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
810     real *           vdwioffsetptr0;
811     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
812     real *           vdwioffsetptr1;
813     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
814     real *           vdwioffsetptr2;
815     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
816     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
817     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
818     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
819     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
820     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
821     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
822     real             *charge;
823     int              nvdwtype;
824     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
825     int              *vdwtype;
826     real             *vdwparam;
827     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
828     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
829     __m256i          ewitab;
830     __m128i          ewitab_lo,ewitab_hi;
831     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
832     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
833     real             *ewtab;
834     __m256           dummy_mask,cutoff_mask;
835     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
836     __m256           one     = _mm256_set1_ps(1.0);
837     __m256           two     = _mm256_set1_ps(2.0);
838     x                = xx[0];
839     f                = ff[0];
840
841     nri              = nlist->nri;
842     iinr             = nlist->iinr;
843     jindex           = nlist->jindex;
844     jjnr             = nlist->jjnr;
845     shiftidx         = nlist->shift;
846     gid              = nlist->gid;
847     shiftvec         = fr->shift_vec[0];
848     fshift           = fr->fshift[0];
849     facel            = _mm256_set1_ps(fr->epsfac);
850     charge           = mdatoms->chargeA;
851     nvdwtype         = fr->ntype;
852     vdwparam         = fr->nbfp;
853     vdwtype          = mdatoms->typeA;
854
855     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
856     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff_q);
857     beta2            = _mm256_mul_ps(beta,beta);
858     beta3            = _mm256_mul_ps(beta,beta2);
859
860     ewtab            = fr->ic->tabq_coul_F;
861     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
862     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
863
864     /* Setup water-specific parameters */
865     inr              = nlist->iinr[0];
866     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
867     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
868     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
869     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
870
871     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
872     rcutoff_scalar   = fr->rcoulomb;
873     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
874     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
875
876     sh_vdw_invrcut6  = _mm256_set1_ps(fr->ic->sh_invrc6);
877     rvdw             = _mm256_set1_ps(fr->rvdw);
878
879     /* Avoid stupid compiler warnings */
880     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
881     j_coord_offsetA = 0;
882     j_coord_offsetB = 0;
883     j_coord_offsetC = 0;
884     j_coord_offsetD = 0;
885     j_coord_offsetE = 0;
886     j_coord_offsetF = 0;
887     j_coord_offsetG = 0;
888     j_coord_offsetH = 0;
889
890     outeriter        = 0;
891     inneriter        = 0;
892
893     for(iidx=0;iidx<4*DIM;iidx++)
894     {
895         scratch[iidx] = 0.0;
896     }
897
898     /* Start outer loop over neighborlists */
899     for(iidx=0; iidx<nri; iidx++)
900     {
901         /* Load shift vector for this list */
902         i_shift_offset   = DIM*shiftidx[iidx];
903
904         /* Load limits for loop over neighbors */
905         j_index_start    = jindex[iidx];
906         j_index_end      = jindex[iidx+1];
907
908         /* Get outer coordinate index */
909         inr              = iinr[iidx];
910         i_coord_offset   = DIM*inr;
911
912         /* Load i particle coords and add shift vector */
913         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
914                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
915
916         fix0             = _mm256_setzero_ps();
917         fiy0             = _mm256_setzero_ps();
918         fiz0             = _mm256_setzero_ps();
919         fix1             = _mm256_setzero_ps();
920         fiy1             = _mm256_setzero_ps();
921         fiz1             = _mm256_setzero_ps();
922         fix2             = _mm256_setzero_ps();
923         fiy2             = _mm256_setzero_ps();
924         fiz2             = _mm256_setzero_ps();
925
926         /* Start inner kernel loop */
927         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
928         {
929
930             /* Get j neighbor index, and coordinate index */
931             jnrA             = jjnr[jidx];
932             jnrB             = jjnr[jidx+1];
933             jnrC             = jjnr[jidx+2];
934             jnrD             = jjnr[jidx+3];
935             jnrE             = jjnr[jidx+4];
936             jnrF             = jjnr[jidx+5];
937             jnrG             = jjnr[jidx+6];
938             jnrH             = jjnr[jidx+7];
939             j_coord_offsetA  = DIM*jnrA;
940             j_coord_offsetB  = DIM*jnrB;
941             j_coord_offsetC  = DIM*jnrC;
942             j_coord_offsetD  = DIM*jnrD;
943             j_coord_offsetE  = DIM*jnrE;
944             j_coord_offsetF  = DIM*jnrF;
945             j_coord_offsetG  = DIM*jnrG;
946             j_coord_offsetH  = DIM*jnrH;
947
948             /* load j atom coordinates */
949             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
950                                                  x+j_coord_offsetC,x+j_coord_offsetD,
951                                                  x+j_coord_offsetE,x+j_coord_offsetF,
952                                                  x+j_coord_offsetG,x+j_coord_offsetH,
953                                                  &jx0,&jy0,&jz0);
954
955             /* Calculate displacement vector */
956             dx00             = _mm256_sub_ps(ix0,jx0);
957             dy00             = _mm256_sub_ps(iy0,jy0);
958             dz00             = _mm256_sub_ps(iz0,jz0);
959             dx10             = _mm256_sub_ps(ix1,jx0);
960             dy10             = _mm256_sub_ps(iy1,jy0);
961             dz10             = _mm256_sub_ps(iz1,jz0);
962             dx20             = _mm256_sub_ps(ix2,jx0);
963             dy20             = _mm256_sub_ps(iy2,jy0);
964             dz20             = _mm256_sub_ps(iz2,jz0);
965
966             /* Calculate squared distance and things based on it */
967             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
968             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
969             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
970
971             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
972             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
973             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
974
975             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
976             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
977             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
978
979             /* Load parameters for j particles */
980             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
981                                                                  charge+jnrC+0,charge+jnrD+0,
982                                                                  charge+jnrE+0,charge+jnrF+0,
983                                                                  charge+jnrG+0,charge+jnrH+0);
984             vdwjidx0A        = 2*vdwtype[jnrA+0];
985             vdwjidx0B        = 2*vdwtype[jnrB+0];
986             vdwjidx0C        = 2*vdwtype[jnrC+0];
987             vdwjidx0D        = 2*vdwtype[jnrD+0];
988             vdwjidx0E        = 2*vdwtype[jnrE+0];
989             vdwjidx0F        = 2*vdwtype[jnrF+0];
990             vdwjidx0G        = 2*vdwtype[jnrG+0];
991             vdwjidx0H        = 2*vdwtype[jnrH+0];
992
993             fjx0             = _mm256_setzero_ps();
994             fjy0             = _mm256_setzero_ps();
995             fjz0             = _mm256_setzero_ps();
996
997             /**************************
998              * CALCULATE INTERACTIONS *
999              **************************/
1000
1001             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1002             {
1003
1004             r00              = _mm256_mul_ps(rsq00,rinv00);
1005
1006             /* Compute parameters for interactions between i and j atoms */
1007             qq00             = _mm256_mul_ps(iq0,jq0);
1008             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
1009                                             vdwioffsetptr0+vdwjidx0B,
1010                                             vdwioffsetptr0+vdwjidx0C,
1011                                             vdwioffsetptr0+vdwjidx0D,
1012                                             vdwioffsetptr0+vdwjidx0E,
1013                                             vdwioffsetptr0+vdwjidx0F,
1014                                             vdwioffsetptr0+vdwjidx0G,
1015                                             vdwioffsetptr0+vdwjidx0H,
1016                                             &c6_00,&c12_00);
1017
1018             /* EWALD ELECTROSTATICS */
1019             
1020             /* Analytical PME correction */
1021             zeta2            = _mm256_mul_ps(beta2,rsq00);
1022             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1023             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1024             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1025             felec            = _mm256_mul_ps(qq00,felec);
1026             
1027             /* LENNARD-JONES DISPERSION/REPULSION */
1028
1029             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1030             fvdw             = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
1031
1032             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1033
1034             fscal            = _mm256_add_ps(felec,fvdw);
1035
1036             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1037
1038             /* Calculate temporary vectorial force */
1039             tx               = _mm256_mul_ps(fscal,dx00);
1040             ty               = _mm256_mul_ps(fscal,dy00);
1041             tz               = _mm256_mul_ps(fscal,dz00);
1042
1043             /* Update vectorial force */
1044             fix0             = _mm256_add_ps(fix0,tx);
1045             fiy0             = _mm256_add_ps(fiy0,ty);
1046             fiz0             = _mm256_add_ps(fiz0,tz);
1047
1048             fjx0             = _mm256_add_ps(fjx0,tx);
1049             fjy0             = _mm256_add_ps(fjy0,ty);
1050             fjz0             = _mm256_add_ps(fjz0,tz);
1051
1052             }
1053
1054             /**************************
1055              * CALCULATE INTERACTIONS *
1056              **************************/
1057
1058             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1059             {
1060
1061             r10              = _mm256_mul_ps(rsq10,rinv10);
1062
1063             /* Compute parameters for interactions between i and j atoms */
1064             qq10             = _mm256_mul_ps(iq1,jq0);
1065
1066             /* EWALD ELECTROSTATICS */
1067             
1068             /* Analytical PME correction */
1069             zeta2            = _mm256_mul_ps(beta2,rsq10);
1070             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1071             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1072             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1073             felec            = _mm256_mul_ps(qq10,felec);
1074             
1075             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1076
1077             fscal            = felec;
1078
1079             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1080
1081             /* Calculate temporary vectorial force */
1082             tx               = _mm256_mul_ps(fscal,dx10);
1083             ty               = _mm256_mul_ps(fscal,dy10);
1084             tz               = _mm256_mul_ps(fscal,dz10);
1085
1086             /* Update vectorial force */
1087             fix1             = _mm256_add_ps(fix1,tx);
1088             fiy1             = _mm256_add_ps(fiy1,ty);
1089             fiz1             = _mm256_add_ps(fiz1,tz);
1090
1091             fjx0             = _mm256_add_ps(fjx0,tx);
1092             fjy0             = _mm256_add_ps(fjy0,ty);
1093             fjz0             = _mm256_add_ps(fjz0,tz);
1094
1095             }
1096
1097             /**************************
1098              * CALCULATE INTERACTIONS *
1099              **************************/
1100
1101             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1102             {
1103
1104             r20              = _mm256_mul_ps(rsq20,rinv20);
1105
1106             /* Compute parameters for interactions between i and j atoms */
1107             qq20             = _mm256_mul_ps(iq2,jq0);
1108
1109             /* EWALD ELECTROSTATICS */
1110             
1111             /* Analytical PME correction */
1112             zeta2            = _mm256_mul_ps(beta2,rsq20);
1113             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1114             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1115             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1116             felec            = _mm256_mul_ps(qq20,felec);
1117             
1118             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1119
1120             fscal            = felec;
1121
1122             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1123
1124             /* Calculate temporary vectorial force */
1125             tx               = _mm256_mul_ps(fscal,dx20);
1126             ty               = _mm256_mul_ps(fscal,dy20);
1127             tz               = _mm256_mul_ps(fscal,dz20);
1128
1129             /* Update vectorial force */
1130             fix2             = _mm256_add_ps(fix2,tx);
1131             fiy2             = _mm256_add_ps(fiy2,ty);
1132             fiz2             = _mm256_add_ps(fiz2,tz);
1133
1134             fjx0             = _mm256_add_ps(fjx0,tx);
1135             fjy0             = _mm256_add_ps(fjy0,ty);
1136             fjz0             = _mm256_add_ps(fjz0,tz);
1137
1138             }
1139
1140             fjptrA             = f+j_coord_offsetA;
1141             fjptrB             = f+j_coord_offsetB;
1142             fjptrC             = f+j_coord_offsetC;
1143             fjptrD             = f+j_coord_offsetD;
1144             fjptrE             = f+j_coord_offsetE;
1145             fjptrF             = f+j_coord_offsetF;
1146             fjptrG             = f+j_coord_offsetG;
1147             fjptrH             = f+j_coord_offsetH;
1148
1149             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1150
1151             /* Inner loop uses 187 flops */
1152         }
1153
1154         if(jidx<j_index_end)
1155         {
1156
1157             /* Get j neighbor index, and coordinate index */
1158             jnrlistA         = jjnr[jidx];
1159             jnrlistB         = jjnr[jidx+1];
1160             jnrlistC         = jjnr[jidx+2];
1161             jnrlistD         = jjnr[jidx+3];
1162             jnrlistE         = jjnr[jidx+4];
1163             jnrlistF         = jjnr[jidx+5];
1164             jnrlistG         = jjnr[jidx+6];
1165             jnrlistH         = jjnr[jidx+7];
1166             /* Sign of each element will be negative for non-real atoms.
1167              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1168              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
1169              */
1170             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
1171                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
1172                                             
1173             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1174             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1175             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1176             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1177             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
1178             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
1179             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
1180             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
1181             j_coord_offsetA  = DIM*jnrA;
1182             j_coord_offsetB  = DIM*jnrB;
1183             j_coord_offsetC  = DIM*jnrC;
1184             j_coord_offsetD  = DIM*jnrD;
1185             j_coord_offsetE  = DIM*jnrE;
1186             j_coord_offsetF  = DIM*jnrF;
1187             j_coord_offsetG  = DIM*jnrG;
1188             j_coord_offsetH  = DIM*jnrH;
1189
1190             /* load j atom coordinates */
1191             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1192                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1193                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1194                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1195                                                  &jx0,&jy0,&jz0);
1196
1197             /* Calculate displacement vector */
1198             dx00             = _mm256_sub_ps(ix0,jx0);
1199             dy00             = _mm256_sub_ps(iy0,jy0);
1200             dz00             = _mm256_sub_ps(iz0,jz0);
1201             dx10             = _mm256_sub_ps(ix1,jx0);
1202             dy10             = _mm256_sub_ps(iy1,jy0);
1203             dz10             = _mm256_sub_ps(iz1,jz0);
1204             dx20             = _mm256_sub_ps(ix2,jx0);
1205             dy20             = _mm256_sub_ps(iy2,jy0);
1206             dz20             = _mm256_sub_ps(iz2,jz0);
1207
1208             /* Calculate squared distance and things based on it */
1209             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1210             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1211             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1212
1213             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1214             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1215             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1216
1217             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1218             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1219             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1220
1221             /* Load parameters for j particles */
1222             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
1223                                                                  charge+jnrC+0,charge+jnrD+0,
1224                                                                  charge+jnrE+0,charge+jnrF+0,
1225                                                                  charge+jnrG+0,charge+jnrH+0);
1226             vdwjidx0A        = 2*vdwtype[jnrA+0];
1227             vdwjidx0B        = 2*vdwtype[jnrB+0];
1228             vdwjidx0C        = 2*vdwtype[jnrC+0];
1229             vdwjidx0D        = 2*vdwtype[jnrD+0];
1230             vdwjidx0E        = 2*vdwtype[jnrE+0];
1231             vdwjidx0F        = 2*vdwtype[jnrF+0];
1232             vdwjidx0G        = 2*vdwtype[jnrG+0];
1233             vdwjidx0H        = 2*vdwtype[jnrH+0];
1234
1235             fjx0             = _mm256_setzero_ps();
1236             fjy0             = _mm256_setzero_ps();
1237             fjz0             = _mm256_setzero_ps();
1238
1239             /**************************
1240              * CALCULATE INTERACTIONS *
1241              **************************/
1242
1243             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1244             {
1245
1246             r00              = _mm256_mul_ps(rsq00,rinv00);
1247             r00              = _mm256_andnot_ps(dummy_mask,r00);
1248
1249             /* Compute parameters for interactions between i and j atoms */
1250             qq00             = _mm256_mul_ps(iq0,jq0);
1251             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
1252                                             vdwioffsetptr0+vdwjidx0B,
1253                                             vdwioffsetptr0+vdwjidx0C,
1254                                             vdwioffsetptr0+vdwjidx0D,
1255                                             vdwioffsetptr0+vdwjidx0E,
1256                                             vdwioffsetptr0+vdwjidx0F,
1257                                             vdwioffsetptr0+vdwjidx0G,
1258                                             vdwioffsetptr0+vdwjidx0H,
1259                                             &c6_00,&c12_00);
1260
1261             /* EWALD ELECTROSTATICS */
1262             
1263             /* Analytical PME correction */
1264             zeta2            = _mm256_mul_ps(beta2,rsq00);
1265             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1266             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1267             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1268             felec            = _mm256_mul_ps(qq00,felec);
1269             
1270             /* LENNARD-JONES DISPERSION/REPULSION */
1271
1272             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1273             fvdw             = _mm256_mul_ps(_mm256_sub_ps(_mm256_mul_ps(c12_00,rinvsix),c6_00),_mm256_mul_ps(rinvsix,rinvsq00));
1274
1275             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1276
1277             fscal            = _mm256_add_ps(felec,fvdw);
1278
1279             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1280
1281             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1282
1283             /* Calculate temporary vectorial force */
1284             tx               = _mm256_mul_ps(fscal,dx00);
1285             ty               = _mm256_mul_ps(fscal,dy00);
1286             tz               = _mm256_mul_ps(fscal,dz00);
1287
1288             /* Update vectorial force */
1289             fix0             = _mm256_add_ps(fix0,tx);
1290             fiy0             = _mm256_add_ps(fiy0,ty);
1291             fiz0             = _mm256_add_ps(fiz0,tz);
1292
1293             fjx0             = _mm256_add_ps(fjx0,tx);
1294             fjy0             = _mm256_add_ps(fjy0,ty);
1295             fjz0             = _mm256_add_ps(fjz0,tz);
1296
1297             }
1298
1299             /**************************
1300              * CALCULATE INTERACTIONS *
1301              **************************/
1302
1303             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1304             {
1305
1306             r10              = _mm256_mul_ps(rsq10,rinv10);
1307             r10              = _mm256_andnot_ps(dummy_mask,r10);
1308
1309             /* Compute parameters for interactions between i and j atoms */
1310             qq10             = _mm256_mul_ps(iq1,jq0);
1311
1312             /* EWALD ELECTROSTATICS */
1313             
1314             /* Analytical PME correction */
1315             zeta2            = _mm256_mul_ps(beta2,rsq10);
1316             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1317             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1318             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1319             felec            = _mm256_mul_ps(qq10,felec);
1320             
1321             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1322
1323             fscal            = felec;
1324
1325             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1326
1327             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1328
1329             /* Calculate temporary vectorial force */
1330             tx               = _mm256_mul_ps(fscal,dx10);
1331             ty               = _mm256_mul_ps(fscal,dy10);
1332             tz               = _mm256_mul_ps(fscal,dz10);
1333
1334             /* Update vectorial force */
1335             fix1             = _mm256_add_ps(fix1,tx);
1336             fiy1             = _mm256_add_ps(fiy1,ty);
1337             fiz1             = _mm256_add_ps(fiz1,tz);
1338
1339             fjx0             = _mm256_add_ps(fjx0,tx);
1340             fjy0             = _mm256_add_ps(fjy0,ty);
1341             fjz0             = _mm256_add_ps(fjz0,tz);
1342
1343             }
1344
1345             /**************************
1346              * CALCULATE INTERACTIONS *
1347              **************************/
1348
1349             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1350             {
1351
1352             r20              = _mm256_mul_ps(rsq20,rinv20);
1353             r20              = _mm256_andnot_ps(dummy_mask,r20);
1354
1355             /* Compute parameters for interactions between i and j atoms */
1356             qq20             = _mm256_mul_ps(iq2,jq0);
1357
1358             /* EWALD ELECTROSTATICS */
1359             
1360             /* Analytical PME correction */
1361             zeta2            = _mm256_mul_ps(beta2,rsq20);
1362             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1363             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1364             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1365             felec            = _mm256_mul_ps(qq20,felec);
1366             
1367             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1368
1369             fscal            = felec;
1370
1371             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1372
1373             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1374
1375             /* Calculate temporary vectorial force */
1376             tx               = _mm256_mul_ps(fscal,dx20);
1377             ty               = _mm256_mul_ps(fscal,dy20);
1378             tz               = _mm256_mul_ps(fscal,dz20);
1379
1380             /* Update vectorial force */
1381             fix2             = _mm256_add_ps(fix2,tx);
1382             fiy2             = _mm256_add_ps(fiy2,ty);
1383             fiz2             = _mm256_add_ps(fiz2,tz);
1384
1385             fjx0             = _mm256_add_ps(fjx0,tx);
1386             fjy0             = _mm256_add_ps(fjy0,ty);
1387             fjz0             = _mm256_add_ps(fjz0,tz);
1388
1389             }
1390
1391             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1392             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1393             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1394             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1395             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1396             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1397             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1398             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1399
1400             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1401
1402             /* Inner loop uses 190 flops */
1403         }
1404
1405         /* End of innermost loop */
1406
1407         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1408                                                  f+i_coord_offset,fshift+i_shift_offset);
1409
1410         /* Increment number of inner iterations */
1411         inneriter                  += j_index_end - j_index_start;
1412
1413         /* Outer loop uses 18 flops */
1414     }
1415
1416     /* Increment number of outer iterations */
1417     outeriter        += nri;
1418
1419     /* Update outer/inner flops */
1420
1421     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*190);
1422 }