made errors during GPU detection non-fatal
[alexxy/gromacs.git] / src / gmxlib / nonbonded / nb_kernel_avx_256_single / nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_avx_256_single.c
1 /*
2  * Note: this file was generated by the Gromacs avx_256_single kernel generator.
3  *
4  *                This source code is part of
5  *
6  *                 G   R   O   M   A   C   S
7  *
8  * Copyright (c) 2001-2012, The GROMACS Development Team
9  *
10  * Gromacs is a library for molecular simulation and trajectory analysis,
11  * written by Erik Lindahl, David van der Spoel, Berk Hess, and others - for
12  * a full list of developers and information, check out http://www.gromacs.org
13  *
14  * This program is free software; you can redistribute it and/or modify it under
15  * the terms of the GNU Lesser General Public License as published by the Free
16  * Software Foundation; either version 2 of the License, or (at your option) any
17  * later version.
18  *
19  * To help fund GROMACS development, we humbly ask that you cite
20  * the papers people have written on it - you can find them on the website.
21  */
22 #ifdef HAVE_CONFIG_H
23 #include <config.h>
24 #endif
25
26 #include <math.h>
27
28 #include "../nb_kernel.h"
29 #include "types/simple.h"
30 #include "vec.h"
31 #include "nrnb.h"
32
33 #include "gmx_math_x86_avx_256_single.h"
34 #include "kernelutil_x86_avx_256_single.h"
35
36 /*
37  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_VF_avx_256_single
38  * Electrostatics interaction: Ewald
39  * VdW interaction:            LennardJones
40  * Geometry:                   Water3-Particle
41  * Calculate force/pot:        PotentialAndForce
42  */
43 void
44 nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_VF_avx_256_single
45                     (t_nblist * gmx_restrict                nlist,
46                      rvec * gmx_restrict                    xx,
47                      rvec * gmx_restrict                    ff,
48                      t_forcerec * gmx_restrict              fr,
49                      t_mdatoms * gmx_restrict               mdatoms,
50                      nb_kernel_data_t * gmx_restrict        kernel_data,
51                      t_nrnb * gmx_restrict                  nrnb)
52 {
53     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
54      * just 0 for non-waters.
55      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
56      * jnr indices corresponding to data put in the four positions in the SIMD register.
57      */
58     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
59     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
60     int              jnrA,jnrB,jnrC,jnrD;
61     int              jnrE,jnrF,jnrG,jnrH;
62     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
63     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
64     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
65     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
66     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
67     real             rcutoff_scalar;
68     real             *shiftvec,*fshift,*x,*f;
69     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
70     real             scratch[4*DIM];
71     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
72     real *           vdwioffsetptr0;
73     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
74     real *           vdwioffsetptr1;
75     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
76     real *           vdwioffsetptr2;
77     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
78     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
79     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
80     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
81     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
82     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
83     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
84     real             *charge;
85     int              nvdwtype;
86     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
87     int              *vdwtype;
88     real             *vdwparam;
89     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
90     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
91     __m256i          ewitab;
92     __m128i          ewitab_lo,ewitab_hi;
93     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
94     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
95     real             *ewtab;
96     __m256           rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
97     real             rswitch_scalar,d_scalar;
98     __m256           dummy_mask,cutoff_mask;
99     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
100     __m256           one     = _mm256_set1_ps(1.0);
101     __m256           two     = _mm256_set1_ps(2.0);
102     x                = xx[0];
103     f                = ff[0];
104
105     nri              = nlist->nri;
106     iinr             = nlist->iinr;
107     jindex           = nlist->jindex;
108     jjnr             = nlist->jjnr;
109     shiftidx         = nlist->shift;
110     gid              = nlist->gid;
111     shiftvec         = fr->shift_vec[0];
112     fshift           = fr->fshift[0];
113     facel            = _mm256_set1_ps(fr->epsfac);
114     charge           = mdatoms->chargeA;
115     nvdwtype         = fr->ntype;
116     vdwparam         = fr->nbfp;
117     vdwtype          = mdatoms->typeA;
118
119     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
120     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff);
121     beta2            = _mm256_mul_ps(beta,beta);
122     beta3            = _mm256_mul_ps(beta,beta2);
123
124     ewtab            = fr->ic->tabq_coul_FDV0;
125     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
126     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
127
128     /* Setup water-specific parameters */
129     inr              = nlist->iinr[0];
130     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
131     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
132     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
133     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
134
135     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
136     rcutoff_scalar   = fr->rcoulomb;
137     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
138     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
139
140     rswitch_scalar   = fr->rcoulomb_switch;
141     rswitch          = _mm256_set1_ps(rswitch_scalar);
142     /* Setup switch parameters */
143     d_scalar         = rcutoff_scalar-rswitch_scalar;
144     d                = _mm256_set1_ps(d_scalar);
145     swV3             = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
146     swV4             = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
147     swV5             = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
148     swF2             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
149     swF3             = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
150     swF4             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
151
152     /* Avoid stupid compiler warnings */
153     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
154     j_coord_offsetA = 0;
155     j_coord_offsetB = 0;
156     j_coord_offsetC = 0;
157     j_coord_offsetD = 0;
158     j_coord_offsetE = 0;
159     j_coord_offsetF = 0;
160     j_coord_offsetG = 0;
161     j_coord_offsetH = 0;
162
163     outeriter        = 0;
164     inneriter        = 0;
165
166     for(iidx=0;iidx<4*DIM;iidx++)
167     {
168         scratch[iidx] = 0.0;
169     }
170
171     /* Start outer loop over neighborlists */
172     for(iidx=0; iidx<nri; iidx++)
173     {
174         /* Load shift vector for this list */
175         i_shift_offset   = DIM*shiftidx[iidx];
176
177         /* Load limits for loop over neighbors */
178         j_index_start    = jindex[iidx];
179         j_index_end      = jindex[iidx+1];
180
181         /* Get outer coordinate index */
182         inr              = iinr[iidx];
183         i_coord_offset   = DIM*inr;
184
185         /* Load i particle coords and add shift vector */
186         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
187                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
188
189         fix0             = _mm256_setzero_ps();
190         fiy0             = _mm256_setzero_ps();
191         fiz0             = _mm256_setzero_ps();
192         fix1             = _mm256_setzero_ps();
193         fiy1             = _mm256_setzero_ps();
194         fiz1             = _mm256_setzero_ps();
195         fix2             = _mm256_setzero_ps();
196         fiy2             = _mm256_setzero_ps();
197         fiz2             = _mm256_setzero_ps();
198
199         /* Reset potential sums */
200         velecsum         = _mm256_setzero_ps();
201         vvdwsum          = _mm256_setzero_ps();
202
203         /* Start inner kernel loop */
204         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
205         {
206
207             /* Get j neighbor index, and coordinate index */
208             jnrA             = jjnr[jidx];
209             jnrB             = jjnr[jidx+1];
210             jnrC             = jjnr[jidx+2];
211             jnrD             = jjnr[jidx+3];
212             jnrE             = jjnr[jidx+4];
213             jnrF             = jjnr[jidx+5];
214             jnrG             = jjnr[jidx+6];
215             jnrH             = jjnr[jidx+7];
216             j_coord_offsetA  = DIM*jnrA;
217             j_coord_offsetB  = DIM*jnrB;
218             j_coord_offsetC  = DIM*jnrC;
219             j_coord_offsetD  = DIM*jnrD;
220             j_coord_offsetE  = DIM*jnrE;
221             j_coord_offsetF  = DIM*jnrF;
222             j_coord_offsetG  = DIM*jnrG;
223             j_coord_offsetH  = DIM*jnrH;
224
225             /* load j atom coordinates */
226             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
227                                                  x+j_coord_offsetC,x+j_coord_offsetD,
228                                                  x+j_coord_offsetE,x+j_coord_offsetF,
229                                                  x+j_coord_offsetG,x+j_coord_offsetH,
230                                                  &jx0,&jy0,&jz0);
231
232             /* Calculate displacement vector */
233             dx00             = _mm256_sub_ps(ix0,jx0);
234             dy00             = _mm256_sub_ps(iy0,jy0);
235             dz00             = _mm256_sub_ps(iz0,jz0);
236             dx10             = _mm256_sub_ps(ix1,jx0);
237             dy10             = _mm256_sub_ps(iy1,jy0);
238             dz10             = _mm256_sub_ps(iz1,jz0);
239             dx20             = _mm256_sub_ps(ix2,jx0);
240             dy20             = _mm256_sub_ps(iy2,jy0);
241             dz20             = _mm256_sub_ps(iz2,jz0);
242
243             /* Calculate squared distance and things based on it */
244             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
245             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
246             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
247
248             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
249             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
250             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
251
252             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
253             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
254             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
255
256             /* Load parameters for j particles */
257             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
258                                                                  charge+jnrC+0,charge+jnrD+0,
259                                                                  charge+jnrE+0,charge+jnrF+0,
260                                                                  charge+jnrG+0,charge+jnrH+0);
261             vdwjidx0A        = 2*vdwtype[jnrA+0];
262             vdwjidx0B        = 2*vdwtype[jnrB+0];
263             vdwjidx0C        = 2*vdwtype[jnrC+0];
264             vdwjidx0D        = 2*vdwtype[jnrD+0];
265             vdwjidx0E        = 2*vdwtype[jnrE+0];
266             vdwjidx0F        = 2*vdwtype[jnrF+0];
267             vdwjidx0G        = 2*vdwtype[jnrG+0];
268             vdwjidx0H        = 2*vdwtype[jnrH+0];
269
270             fjx0             = _mm256_setzero_ps();
271             fjy0             = _mm256_setzero_ps();
272             fjz0             = _mm256_setzero_ps();
273
274             /**************************
275              * CALCULATE INTERACTIONS *
276              **************************/
277
278             if (gmx_mm256_any_lt(rsq00,rcutoff2))
279             {
280
281             r00              = _mm256_mul_ps(rsq00,rinv00);
282
283             /* Compute parameters for interactions between i and j atoms */
284             qq00             = _mm256_mul_ps(iq0,jq0);
285             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
286                                             vdwioffsetptr0+vdwjidx0B,
287                                             vdwioffsetptr0+vdwjidx0C,
288                                             vdwioffsetptr0+vdwjidx0D,
289                                             vdwioffsetptr0+vdwjidx0E,
290                                             vdwioffsetptr0+vdwjidx0F,
291                                             vdwioffsetptr0+vdwjidx0G,
292                                             vdwioffsetptr0+vdwjidx0H,
293                                             &c6_00,&c12_00);
294
295             /* EWALD ELECTROSTATICS */
296             
297             /* Analytical PME correction */
298             zeta2            = _mm256_mul_ps(beta2,rsq00);
299             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
300             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
301             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
302             felec            = _mm256_mul_ps(qq00,felec);
303             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
304             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
305             velec            = _mm256_sub_ps(rinv00,pmecorrV);
306             velec            = _mm256_mul_ps(qq00,velec);
307             
308             /* LENNARD-JONES DISPERSION/REPULSION */
309
310             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
311             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
312             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
313             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
314             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
315
316             d                = _mm256_sub_ps(r00,rswitch);
317             d                = _mm256_max_ps(d,_mm256_setzero_ps());
318             d2               = _mm256_mul_ps(d,d);
319             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
320
321             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
322
323             /* Evaluate switch function */
324             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
325             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
326             fvdw             = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
327             velec            = _mm256_mul_ps(velec,sw);
328             vvdw             = _mm256_mul_ps(vvdw,sw);
329             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
330
331             /* Update potential sum for this i atom from the interaction with this j atom. */
332             velec            = _mm256_and_ps(velec,cutoff_mask);
333             velecsum         = _mm256_add_ps(velecsum,velec);
334             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
335             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
336
337             fscal            = _mm256_add_ps(felec,fvdw);
338
339             fscal            = _mm256_and_ps(fscal,cutoff_mask);
340
341             /* Calculate temporary vectorial force */
342             tx               = _mm256_mul_ps(fscal,dx00);
343             ty               = _mm256_mul_ps(fscal,dy00);
344             tz               = _mm256_mul_ps(fscal,dz00);
345
346             /* Update vectorial force */
347             fix0             = _mm256_add_ps(fix0,tx);
348             fiy0             = _mm256_add_ps(fiy0,ty);
349             fiz0             = _mm256_add_ps(fiz0,tz);
350
351             fjx0             = _mm256_add_ps(fjx0,tx);
352             fjy0             = _mm256_add_ps(fjy0,ty);
353             fjz0             = _mm256_add_ps(fjz0,tz);
354
355             }
356
357             /**************************
358              * CALCULATE INTERACTIONS *
359              **************************/
360
361             if (gmx_mm256_any_lt(rsq10,rcutoff2))
362             {
363
364             r10              = _mm256_mul_ps(rsq10,rinv10);
365
366             /* Compute parameters for interactions between i and j atoms */
367             qq10             = _mm256_mul_ps(iq1,jq0);
368
369             /* EWALD ELECTROSTATICS */
370             
371             /* Analytical PME correction */
372             zeta2            = _mm256_mul_ps(beta2,rsq10);
373             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
374             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
375             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
376             felec            = _mm256_mul_ps(qq10,felec);
377             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
378             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
379             velec            = _mm256_sub_ps(rinv10,pmecorrV);
380             velec            = _mm256_mul_ps(qq10,velec);
381             
382             d                = _mm256_sub_ps(r10,rswitch);
383             d                = _mm256_max_ps(d,_mm256_setzero_ps());
384             d2               = _mm256_mul_ps(d,d);
385             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
386
387             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
388
389             /* Evaluate switch function */
390             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
391             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
392             velec            = _mm256_mul_ps(velec,sw);
393             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
394
395             /* Update potential sum for this i atom from the interaction with this j atom. */
396             velec            = _mm256_and_ps(velec,cutoff_mask);
397             velecsum         = _mm256_add_ps(velecsum,velec);
398
399             fscal            = felec;
400
401             fscal            = _mm256_and_ps(fscal,cutoff_mask);
402
403             /* Calculate temporary vectorial force */
404             tx               = _mm256_mul_ps(fscal,dx10);
405             ty               = _mm256_mul_ps(fscal,dy10);
406             tz               = _mm256_mul_ps(fscal,dz10);
407
408             /* Update vectorial force */
409             fix1             = _mm256_add_ps(fix1,tx);
410             fiy1             = _mm256_add_ps(fiy1,ty);
411             fiz1             = _mm256_add_ps(fiz1,tz);
412
413             fjx0             = _mm256_add_ps(fjx0,tx);
414             fjy0             = _mm256_add_ps(fjy0,ty);
415             fjz0             = _mm256_add_ps(fjz0,tz);
416
417             }
418
419             /**************************
420              * CALCULATE INTERACTIONS *
421              **************************/
422
423             if (gmx_mm256_any_lt(rsq20,rcutoff2))
424             {
425
426             r20              = _mm256_mul_ps(rsq20,rinv20);
427
428             /* Compute parameters for interactions between i and j atoms */
429             qq20             = _mm256_mul_ps(iq2,jq0);
430
431             /* EWALD ELECTROSTATICS */
432             
433             /* Analytical PME correction */
434             zeta2            = _mm256_mul_ps(beta2,rsq20);
435             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
436             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
437             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
438             felec            = _mm256_mul_ps(qq20,felec);
439             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
440             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
441             velec            = _mm256_sub_ps(rinv20,pmecorrV);
442             velec            = _mm256_mul_ps(qq20,velec);
443             
444             d                = _mm256_sub_ps(r20,rswitch);
445             d                = _mm256_max_ps(d,_mm256_setzero_ps());
446             d2               = _mm256_mul_ps(d,d);
447             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
448
449             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
450
451             /* Evaluate switch function */
452             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
453             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
454             velec            = _mm256_mul_ps(velec,sw);
455             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
456
457             /* Update potential sum for this i atom from the interaction with this j atom. */
458             velec            = _mm256_and_ps(velec,cutoff_mask);
459             velecsum         = _mm256_add_ps(velecsum,velec);
460
461             fscal            = felec;
462
463             fscal            = _mm256_and_ps(fscal,cutoff_mask);
464
465             /* Calculate temporary vectorial force */
466             tx               = _mm256_mul_ps(fscal,dx20);
467             ty               = _mm256_mul_ps(fscal,dy20);
468             tz               = _mm256_mul_ps(fscal,dz20);
469
470             /* Update vectorial force */
471             fix2             = _mm256_add_ps(fix2,tx);
472             fiy2             = _mm256_add_ps(fiy2,ty);
473             fiz2             = _mm256_add_ps(fiz2,tz);
474
475             fjx0             = _mm256_add_ps(fjx0,tx);
476             fjy0             = _mm256_add_ps(fjy0,ty);
477             fjz0             = _mm256_add_ps(fjz0,tz);
478
479             }
480
481             fjptrA             = f+j_coord_offsetA;
482             fjptrB             = f+j_coord_offsetB;
483             fjptrC             = f+j_coord_offsetC;
484             fjptrD             = f+j_coord_offsetD;
485             fjptrE             = f+j_coord_offsetE;
486             fjptrF             = f+j_coord_offsetF;
487             fjptrG             = f+j_coord_offsetG;
488             fjptrH             = f+j_coord_offsetH;
489
490             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
491
492             /* Inner loop uses 345 flops */
493         }
494
495         if(jidx<j_index_end)
496         {
497
498             /* Get j neighbor index, and coordinate index */
499             jnrlistA         = jjnr[jidx];
500             jnrlistB         = jjnr[jidx+1];
501             jnrlistC         = jjnr[jidx+2];
502             jnrlistD         = jjnr[jidx+3];
503             jnrlistE         = jjnr[jidx+4];
504             jnrlistF         = jjnr[jidx+5];
505             jnrlistG         = jjnr[jidx+6];
506             jnrlistH         = jjnr[jidx+7];
507             /* Sign of each element will be negative for non-real atoms.
508              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
509              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
510              */
511             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
512                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
513                                             
514             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
515             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
516             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
517             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
518             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
519             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
520             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
521             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
522             j_coord_offsetA  = DIM*jnrA;
523             j_coord_offsetB  = DIM*jnrB;
524             j_coord_offsetC  = DIM*jnrC;
525             j_coord_offsetD  = DIM*jnrD;
526             j_coord_offsetE  = DIM*jnrE;
527             j_coord_offsetF  = DIM*jnrF;
528             j_coord_offsetG  = DIM*jnrG;
529             j_coord_offsetH  = DIM*jnrH;
530
531             /* load j atom coordinates */
532             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
533                                                  x+j_coord_offsetC,x+j_coord_offsetD,
534                                                  x+j_coord_offsetE,x+j_coord_offsetF,
535                                                  x+j_coord_offsetG,x+j_coord_offsetH,
536                                                  &jx0,&jy0,&jz0);
537
538             /* Calculate displacement vector */
539             dx00             = _mm256_sub_ps(ix0,jx0);
540             dy00             = _mm256_sub_ps(iy0,jy0);
541             dz00             = _mm256_sub_ps(iz0,jz0);
542             dx10             = _mm256_sub_ps(ix1,jx0);
543             dy10             = _mm256_sub_ps(iy1,jy0);
544             dz10             = _mm256_sub_ps(iz1,jz0);
545             dx20             = _mm256_sub_ps(ix2,jx0);
546             dy20             = _mm256_sub_ps(iy2,jy0);
547             dz20             = _mm256_sub_ps(iz2,jz0);
548
549             /* Calculate squared distance and things based on it */
550             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
551             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
552             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
553
554             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
555             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
556             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
557
558             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
559             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
560             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
561
562             /* Load parameters for j particles */
563             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
564                                                                  charge+jnrC+0,charge+jnrD+0,
565                                                                  charge+jnrE+0,charge+jnrF+0,
566                                                                  charge+jnrG+0,charge+jnrH+0);
567             vdwjidx0A        = 2*vdwtype[jnrA+0];
568             vdwjidx0B        = 2*vdwtype[jnrB+0];
569             vdwjidx0C        = 2*vdwtype[jnrC+0];
570             vdwjidx0D        = 2*vdwtype[jnrD+0];
571             vdwjidx0E        = 2*vdwtype[jnrE+0];
572             vdwjidx0F        = 2*vdwtype[jnrF+0];
573             vdwjidx0G        = 2*vdwtype[jnrG+0];
574             vdwjidx0H        = 2*vdwtype[jnrH+0];
575
576             fjx0             = _mm256_setzero_ps();
577             fjy0             = _mm256_setzero_ps();
578             fjz0             = _mm256_setzero_ps();
579
580             /**************************
581              * CALCULATE INTERACTIONS *
582              **************************/
583
584             if (gmx_mm256_any_lt(rsq00,rcutoff2))
585             {
586
587             r00              = _mm256_mul_ps(rsq00,rinv00);
588             r00              = _mm256_andnot_ps(dummy_mask,r00);
589
590             /* Compute parameters for interactions between i and j atoms */
591             qq00             = _mm256_mul_ps(iq0,jq0);
592             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
593                                             vdwioffsetptr0+vdwjidx0B,
594                                             vdwioffsetptr0+vdwjidx0C,
595                                             vdwioffsetptr0+vdwjidx0D,
596                                             vdwioffsetptr0+vdwjidx0E,
597                                             vdwioffsetptr0+vdwjidx0F,
598                                             vdwioffsetptr0+vdwjidx0G,
599                                             vdwioffsetptr0+vdwjidx0H,
600                                             &c6_00,&c12_00);
601
602             /* EWALD ELECTROSTATICS */
603             
604             /* Analytical PME correction */
605             zeta2            = _mm256_mul_ps(beta2,rsq00);
606             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
607             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
608             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
609             felec            = _mm256_mul_ps(qq00,felec);
610             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
611             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
612             velec            = _mm256_sub_ps(rinv00,pmecorrV);
613             velec            = _mm256_mul_ps(qq00,velec);
614             
615             /* LENNARD-JONES DISPERSION/REPULSION */
616
617             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
618             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
619             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
620             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
621             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
622
623             d                = _mm256_sub_ps(r00,rswitch);
624             d                = _mm256_max_ps(d,_mm256_setzero_ps());
625             d2               = _mm256_mul_ps(d,d);
626             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
627
628             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
629
630             /* Evaluate switch function */
631             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
632             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
633             fvdw             = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
634             velec            = _mm256_mul_ps(velec,sw);
635             vvdw             = _mm256_mul_ps(vvdw,sw);
636             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
637
638             /* Update potential sum for this i atom from the interaction with this j atom. */
639             velec            = _mm256_and_ps(velec,cutoff_mask);
640             velec            = _mm256_andnot_ps(dummy_mask,velec);
641             velecsum         = _mm256_add_ps(velecsum,velec);
642             vvdw             = _mm256_and_ps(vvdw,cutoff_mask);
643             vvdw             = _mm256_andnot_ps(dummy_mask,vvdw);
644             vvdwsum          = _mm256_add_ps(vvdwsum,vvdw);
645
646             fscal            = _mm256_add_ps(felec,fvdw);
647
648             fscal            = _mm256_and_ps(fscal,cutoff_mask);
649
650             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
651
652             /* Calculate temporary vectorial force */
653             tx               = _mm256_mul_ps(fscal,dx00);
654             ty               = _mm256_mul_ps(fscal,dy00);
655             tz               = _mm256_mul_ps(fscal,dz00);
656
657             /* Update vectorial force */
658             fix0             = _mm256_add_ps(fix0,tx);
659             fiy0             = _mm256_add_ps(fiy0,ty);
660             fiz0             = _mm256_add_ps(fiz0,tz);
661
662             fjx0             = _mm256_add_ps(fjx0,tx);
663             fjy0             = _mm256_add_ps(fjy0,ty);
664             fjz0             = _mm256_add_ps(fjz0,tz);
665
666             }
667
668             /**************************
669              * CALCULATE INTERACTIONS *
670              **************************/
671
672             if (gmx_mm256_any_lt(rsq10,rcutoff2))
673             {
674
675             r10              = _mm256_mul_ps(rsq10,rinv10);
676             r10              = _mm256_andnot_ps(dummy_mask,r10);
677
678             /* Compute parameters for interactions between i and j atoms */
679             qq10             = _mm256_mul_ps(iq1,jq0);
680
681             /* EWALD ELECTROSTATICS */
682             
683             /* Analytical PME correction */
684             zeta2            = _mm256_mul_ps(beta2,rsq10);
685             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
686             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
687             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
688             felec            = _mm256_mul_ps(qq10,felec);
689             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
690             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
691             velec            = _mm256_sub_ps(rinv10,pmecorrV);
692             velec            = _mm256_mul_ps(qq10,velec);
693             
694             d                = _mm256_sub_ps(r10,rswitch);
695             d                = _mm256_max_ps(d,_mm256_setzero_ps());
696             d2               = _mm256_mul_ps(d,d);
697             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
698
699             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
700
701             /* Evaluate switch function */
702             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
703             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
704             velec            = _mm256_mul_ps(velec,sw);
705             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
706
707             /* Update potential sum for this i atom from the interaction with this j atom. */
708             velec            = _mm256_and_ps(velec,cutoff_mask);
709             velec            = _mm256_andnot_ps(dummy_mask,velec);
710             velecsum         = _mm256_add_ps(velecsum,velec);
711
712             fscal            = felec;
713
714             fscal            = _mm256_and_ps(fscal,cutoff_mask);
715
716             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
717
718             /* Calculate temporary vectorial force */
719             tx               = _mm256_mul_ps(fscal,dx10);
720             ty               = _mm256_mul_ps(fscal,dy10);
721             tz               = _mm256_mul_ps(fscal,dz10);
722
723             /* Update vectorial force */
724             fix1             = _mm256_add_ps(fix1,tx);
725             fiy1             = _mm256_add_ps(fiy1,ty);
726             fiz1             = _mm256_add_ps(fiz1,tz);
727
728             fjx0             = _mm256_add_ps(fjx0,tx);
729             fjy0             = _mm256_add_ps(fjy0,ty);
730             fjz0             = _mm256_add_ps(fjz0,tz);
731
732             }
733
734             /**************************
735              * CALCULATE INTERACTIONS *
736              **************************/
737
738             if (gmx_mm256_any_lt(rsq20,rcutoff2))
739             {
740
741             r20              = _mm256_mul_ps(rsq20,rinv20);
742             r20              = _mm256_andnot_ps(dummy_mask,r20);
743
744             /* Compute parameters for interactions between i and j atoms */
745             qq20             = _mm256_mul_ps(iq2,jq0);
746
747             /* EWALD ELECTROSTATICS */
748             
749             /* Analytical PME correction */
750             zeta2            = _mm256_mul_ps(beta2,rsq20);
751             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
752             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
753             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
754             felec            = _mm256_mul_ps(qq20,felec);
755             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
756             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
757             velec            = _mm256_sub_ps(rinv20,pmecorrV);
758             velec            = _mm256_mul_ps(qq20,velec);
759             
760             d                = _mm256_sub_ps(r20,rswitch);
761             d                = _mm256_max_ps(d,_mm256_setzero_ps());
762             d2               = _mm256_mul_ps(d,d);
763             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
764
765             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
766
767             /* Evaluate switch function */
768             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
769             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
770             velec            = _mm256_mul_ps(velec,sw);
771             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
772
773             /* Update potential sum for this i atom from the interaction with this j atom. */
774             velec            = _mm256_and_ps(velec,cutoff_mask);
775             velec            = _mm256_andnot_ps(dummy_mask,velec);
776             velecsum         = _mm256_add_ps(velecsum,velec);
777
778             fscal            = felec;
779
780             fscal            = _mm256_and_ps(fscal,cutoff_mask);
781
782             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
783
784             /* Calculate temporary vectorial force */
785             tx               = _mm256_mul_ps(fscal,dx20);
786             ty               = _mm256_mul_ps(fscal,dy20);
787             tz               = _mm256_mul_ps(fscal,dz20);
788
789             /* Update vectorial force */
790             fix2             = _mm256_add_ps(fix2,tx);
791             fiy2             = _mm256_add_ps(fiy2,ty);
792             fiz2             = _mm256_add_ps(fiz2,tz);
793
794             fjx0             = _mm256_add_ps(fjx0,tx);
795             fjy0             = _mm256_add_ps(fjy0,ty);
796             fjz0             = _mm256_add_ps(fjz0,tz);
797
798             }
799
800             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
801             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
802             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
803             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
804             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
805             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
806             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
807             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
808
809             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
810
811             /* Inner loop uses 348 flops */
812         }
813
814         /* End of innermost loop */
815
816         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
817                                                  f+i_coord_offset,fshift+i_shift_offset);
818
819         ggid                        = gid[iidx];
820         /* Update potential energies */
821         gmx_mm256_update_1pot_ps(velecsum,kernel_data->energygrp_elec+ggid);
822         gmx_mm256_update_1pot_ps(vvdwsum,kernel_data->energygrp_vdw+ggid);
823
824         /* Increment number of inner iterations */
825         inneriter                  += j_index_end - j_index_start;
826
827         /* Outer loop uses 20 flops */
828     }
829
830     /* Increment number of outer iterations */
831     outeriter        += nri;
832
833     /* Update outer/inner flops */
834
835     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_VF,outeriter*20 + inneriter*348);
836 }
837 /*
838  * Gromacs nonbonded kernel:   nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_F_avx_256_single
839  * Electrostatics interaction: Ewald
840  * VdW interaction:            LennardJones
841  * Geometry:                   Water3-Particle
842  * Calculate force/pot:        Force
843  */
844 void
845 nb_kernel_ElecEwSw_VdwLJSw_GeomW3P1_F_avx_256_single
846                     (t_nblist * gmx_restrict                nlist,
847                      rvec * gmx_restrict                    xx,
848                      rvec * gmx_restrict                    ff,
849                      t_forcerec * gmx_restrict              fr,
850                      t_mdatoms * gmx_restrict               mdatoms,
851                      nb_kernel_data_t * gmx_restrict        kernel_data,
852                      t_nrnb * gmx_restrict                  nrnb)
853 {
854     /* Suffixes 0,1,2,3 refer to particle indices for waters in the inner or outer loop, or 
855      * just 0 for non-waters.
856      * Suffixes A,B,C,D,E,F,G,H refer to j loop unrolling done with AVX, e.g. for the eight different
857      * jnr indices corresponding to data put in the four positions in the SIMD register.
858      */
859     int              i_shift_offset,i_coord_offset,outeriter,inneriter;
860     int              j_index_start,j_index_end,jidx,nri,inr,ggid,iidx;
861     int              jnrA,jnrB,jnrC,jnrD;
862     int              jnrE,jnrF,jnrG,jnrH;
863     int              jnrlistA,jnrlistB,jnrlistC,jnrlistD;
864     int              jnrlistE,jnrlistF,jnrlistG,jnrlistH;
865     int              j_coord_offsetA,j_coord_offsetB,j_coord_offsetC,j_coord_offsetD;
866     int              j_coord_offsetE,j_coord_offsetF,j_coord_offsetG,j_coord_offsetH;
867     int              *iinr,*jindex,*jjnr,*shiftidx,*gid;
868     real             rcutoff_scalar;
869     real             *shiftvec,*fshift,*x,*f;
870     real             *fjptrA,*fjptrB,*fjptrC,*fjptrD,*fjptrE,*fjptrF,*fjptrG,*fjptrH;
871     real             scratch[4*DIM];
872     __m256           tx,ty,tz,fscal,rcutoff,rcutoff2,jidxall;
873     real *           vdwioffsetptr0;
874     __m256           ix0,iy0,iz0,fix0,fiy0,fiz0,iq0,isai0;
875     real *           vdwioffsetptr1;
876     __m256           ix1,iy1,iz1,fix1,fiy1,fiz1,iq1,isai1;
877     real *           vdwioffsetptr2;
878     __m256           ix2,iy2,iz2,fix2,fiy2,fiz2,iq2,isai2;
879     int              vdwjidx0A,vdwjidx0B,vdwjidx0C,vdwjidx0D,vdwjidx0E,vdwjidx0F,vdwjidx0G,vdwjidx0H;
880     __m256           jx0,jy0,jz0,fjx0,fjy0,fjz0,jq0,isaj0;
881     __m256           dx00,dy00,dz00,rsq00,rinv00,rinvsq00,r00,qq00,c6_00,c12_00;
882     __m256           dx10,dy10,dz10,rsq10,rinv10,rinvsq10,r10,qq10,c6_10,c12_10;
883     __m256           dx20,dy20,dz20,rsq20,rinv20,rinvsq20,r20,qq20,c6_20,c12_20;
884     __m256           velec,felec,velecsum,facel,crf,krf,krf2;
885     real             *charge;
886     int              nvdwtype;
887     __m256           rinvsix,rvdw,vvdw,vvdw6,vvdw12,fvdw,fvdw6,fvdw12,vvdwsum,sh_vdw_invrcut6;
888     int              *vdwtype;
889     real             *vdwparam;
890     __m256           one_sixth   = _mm256_set1_ps(1.0/6.0);
891     __m256           one_twelfth = _mm256_set1_ps(1.0/12.0);
892     __m256i          ewitab;
893     __m128i          ewitab_lo,ewitab_hi;
894     __m256           ewtabscale,eweps,sh_ewald,ewrt,ewtabhalfspace,ewtabF,ewtabFn,ewtabD,ewtabV;
895     __m256           beta,beta2,beta3,zeta2,pmecorrF,pmecorrV,rinv3;
896     real             *ewtab;
897     __m256           rswitch,swV3,swV4,swV5,swF2,swF3,swF4,d,d2,sw,dsw;
898     real             rswitch_scalar,d_scalar;
899     __m256           dummy_mask,cutoff_mask;
900     __m256           signbit = _mm256_castsi256_ps( _mm256_set1_epi32(0x80000000) );
901     __m256           one     = _mm256_set1_ps(1.0);
902     __m256           two     = _mm256_set1_ps(2.0);
903     x                = xx[0];
904     f                = ff[0];
905
906     nri              = nlist->nri;
907     iinr             = nlist->iinr;
908     jindex           = nlist->jindex;
909     jjnr             = nlist->jjnr;
910     shiftidx         = nlist->shift;
911     gid              = nlist->gid;
912     shiftvec         = fr->shift_vec[0];
913     fshift           = fr->fshift[0];
914     facel            = _mm256_set1_ps(fr->epsfac);
915     charge           = mdatoms->chargeA;
916     nvdwtype         = fr->ntype;
917     vdwparam         = fr->nbfp;
918     vdwtype          = mdatoms->typeA;
919
920     sh_ewald         = _mm256_set1_ps(fr->ic->sh_ewald);
921     beta             = _mm256_set1_ps(fr->ic->ewaldcoeff);
922     beta2            = _mm256_mul_ps(beta,beta);
923     beta3            = _mm256_mul_ps(beta,beta2);
924
925     ewtab            = fr->ic->tabq_coul_FDV0;
926     ewtabscale       = _mm256_set1_ps(fr->ic->tabq_scale);
927     ewtabhalfspace   = _mm256_set1_ps(0.5/fr->ic->tabq_scale);
928
929     /* Setup water-specific parameters */
930     inr              = nlist->iinr[0];
931     iq0              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+0]));
932     iq1              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+1]));
933     iq2              = _mm256_mul_ps(facel,_mm256_set1_ps(charge[inr+2]));
934     vdwioffsetptr0   = vdwparam+2*nvdwtype*vdwtype[inr+0];
935
936     /* When we use explicit cutoffs the value must be identical for elec and VdW, so use elec as an arbitrary choice */
937     rcutoff_scalar   = fr->rcoulomb;
938     rcutoff          = _mm256_set1_ps(rcutoff_scalar);
939     rcutoff2         = _mm256_mul_ps(rcutoff,rcutoff);
940
941     rswitch_scalar   = fr->rcoulomb_switch;
942     rswitch          = _mm256_set1_ps(rswitch_scalar);
943     /* Setup switch parameters */
944     d_scalar         = rcutoff_scalar-rswitch_scalar;
945     d                = _mm256_set1_ps(d_scalar);
946     swV3             = _mm256_set1_ps(-10.0/(d_scalar*d_scalar*d_scalar));
947     swV4             = _mm256_set1_ps( 15.0/(d_scalar*d_scalar*d_scalar*d_scalar));
948     swV5             = _mm256_set1_ps( -6.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
949     swF2             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar));
950     swF3             = _mm256_set1_ps( 60.0/(d_scalar*d_scalar*d_scalar*d_scalar));
951     swF4             = _mm256_set1_ps(-30.0/(d_scalar*d_scalar*d_scalar*d_scalar*d_scalar));
952
953     /* Avoid stupid compiler warnings */
954     jnrA = jnrB = jnrC = jnrD = jnrE = jnrF = jnrG = jnrH = 0;
955     j_coord_offsetA = 0;
956     j_coord_offsetB = 0;
957     j_coord_offsetC = 0;
958     j_coord_offsetD = 0;
959     j_coord_offsetE = 0;
960     j_coord_offsetF = 0;
961     j_coord_offsetG = 0;
962     j_coord_offsetH = 0;
963
964     outeriter        = 0;
965     inneriter        = 0;
966
967     for(iidx=0;iidx<4*DIM;iidx++)
968     {
969         scratch[iidx] = 0.0;
970     }
971
972     /* Start outer loop over neighborlists */
973     for(iidx=0; iidx<nri; iidx++)
974     {
975         /* Load shift vector for this list */
976         i_shift_offset   = DIM*shiftidx[iidx];
977
978         /* Load limits for loop over neighbors */
979         j_index_start    = jindex[iidx];
980         j_index_end      = jindex[iidx+1];
981
982         /* Get outer coordinate index */
983         inr              = iinr[iidx];
984         i_coord_offset   = DIM*inr;
985
986         /* Load i particle coords and add shift vector */
987         gmx_mm256_load_shift_and_3rvec_broadcast_ps(shiftvec+i_shift_offset,x+i_coord_offset,
988                                                     &ix0,&iy0,&iz0,&ix1,&iy1,&iz1,&ix2,&iy2,&iz2);
989
990         fix0             = _mm256_setzero_ps();
991         fiy0             = _mm256_setzero_ps();
992         fiz0             = _mm256_setzero_ps();
993         fix1             = _mm256_setzero_ps();
994         fiy1             = _mm256_setzero_ps();
995         fiz1             = _mm256_setzero_ps();
996         fix2             = _mm256_setzero_ps();
997         fiy2             = _mm256_setzero_ps();
998         fiz2             = _mm256_setzero_ps();
999
1000         /* Start inner kernel loop */
1001         for(jidx=j_index_start; jidx<j_index_end && jjnr[jidx+7]>=0; jidx+=8)
1002         {
1003
1004             /* Get j neighbor index, and coordinate index */
1005             jnrA             = jjnr[jidx];
1006             jnrB             = jjnr[jidx+1];
1007             jnrC             = jjnr[jidx+2];
1008             jnrD             = jjnr[jidx+3];
1009             jnrE             = jjnr[jidx+4];
1010             jnrF             = jjnr[jidx+5];
1011             jnrG             = jjnr[jidx+6];
1012             jnrH             = jjnr[jidx+7];
1013             j_coord_offsetA  = DIM*jnrA;
1014             j_coord_offsetB  = DIM*jnrB;
1015             j_coord_offsetC  = DIM*jnrC;
1016             j_coord_offsetD  = DIM*jnrD;
1017             j_coord_offsetE  = DIM*jnrE;
1018             j_coord_offsetF  = DIM*jnrF;
1019             j_coord_offsetG  = DIM*jnrG;
1020             j_coord_offsetH  = DIM*jnrH;
1021
1022             /* load j atom coordinates */
1023             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1024                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1025                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1026                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1027                                                  &jx0,&jy0,&jz0);
1028
1029             /* Calculate displacement vector */
1030             dx00             = _mm256_sub_ps(ix0,jx0);
1031             dy00             = _mm256_sub_ps(iy0,jy0);
1032             dz00             = _mm256_sub_ps(iz0,jz0);
1033             dx10             = _mm256_sub_ps(ix1,jx0);
1034             dy10             = _mm256_sub_ps(iy1,jy0);
1035             dz10             = _mm256_sub_ps(iz1,jz0);
1036             dx20             = _mm256_sub_ps(ix2,jx0);
1037             dy20             = _mm256_sub_ps(iy2,jy0);
1038             dz20             = _mm256_sub_ps(iz2,jz0);
1039
1040             /* Calculate squared distance and things based on it */
1041             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1042             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1043             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1044
1045             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1046             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1047             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1048
1049             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1050             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1051             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1052
1053             /* Load parameters for j particles */
1054             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
1055                                                                  charge+jnrC+0,charge+jnrD+0,
1056                                                                  charge+jnrE+0,charge+jnrF+0,
1057                                                                  charge+jnrG+0,charge+jnrH+0);
1058             vdwjidx0A        = 2*vdwtype[jnrA+0];
1059             vdwjidx0B        = 2*vdwtype[jnrB+0];
1060             vdwjidx0C        = 2*vdwtype[jnrC+0];
1061             vdwjidx0D        = 2*vdwtype[jnrD+0];
1062             vdwjidx0E        = 2*vdwtype[jnrE+0];
1063             vdwjidx0F        = 2*vdwtype[jnrF+0];
1064             vdwjidx0G        = 2*vdwtype[jnrG+0];
1065             vdwjidx0H        = 2*vdwtype[jnrH+0];
1066
1067             fjx0             = _mm256_setzero_ps();
1068             fjy0             = _mm256_setzero_ps();
1069             fjz0             = _mm256_setzero_ps();
1070
1071             /**************************
1072              * CALCULATE INTERACTIONS *
1073              **************************/
1074
1075             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1076             {
1077
1078             r00              = _mm256_mul_ps(rsq00,rinv00);
1079
1080             /* Compute parameters for interactions between i and j atoms */
1081             qq00             = _mm256_mul_ps(iq0,jq0);
1082             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
1083                                             vdwioffsetptr0+vdwjidx0B,
1084                                             vdwioffsetptr0+vdwjidx0C,
1085                                             vdwioffsetptr0+vdwjidx0D,
1086                                             vdwioffsetptr0+vdwjidx0E,
1087                                             vdwioffsetptr0+vdwjidx0F,
1088                                             vdwioffsetptr0+vdwjidx0G,
1089                                             vdwioffsetptr0+vdwjidx0H,
1090                                             &c6_00,&c12_00);
1091
1092             /* EWALD ELECTROSTATICS */
1093             
1094             /* Analytical PME correction */
1095             zeta2            = _mm256_mul_ps(beta2,rsq00);
1096             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1097             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1098             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1099             felec            = _mm256_mul_ps(qq00,felec);
1100             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1101             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1102             velec            = _mm256_sub_ps(rinv00,pmecorrV);
1103             velec            = _mm256_mul_ps(qq00,velec);
1104             
1105             /* LENNARD-JONES DISPERSION/REPULSION */
1106
1107             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1108             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
1109             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
1110             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
1111             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
1112
1113             d                = _mm256_sub_ps(r00,rswitch);
1114             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1115             d2               = _mm256_mul_ps(d,d);
1116             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1117
1118             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1119
1120             /* Evaluate switch function */
1121             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1122             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
1123             fvdw             = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
1124             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1125
1126             fscal            = _mm256_add_ps(felec,fvdw);
1127
1128             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1129
1130             /* Calculate temporary vectorial force */
1131             tx               = _mm256_mul_ps(fscal,dx00);
1132             ty               = _mm256_mul_ps(fscal,dy00);
1133             tz               = _mm256_mul_ps(fscal,dz00);
1134
1135             /* Update vectorial force */
1136             fix0             = _mm256_add_ps(fix0,tx);
1137             fiy0             = _mm256_add_ps(fiy0,ty);
1138             fiz0             = _mm256_add_ps(fiz0,tz);
1139
1140             fjx0             = _mm256_add_ps(fjx0,tx);
1141             fjy0             = _mm256_add_ps(fjy0,ty);
1142             fjz0             = _mm256_add_ps(fjz0,tz);
1143
1144             }
1145
1146             /**************************
1147              * CALCULATE INTERACTIONS *
1148              **************************/
1149
1150             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1151             {
1152
1153             r10              = _mm256_mul_ps(rsq10,rinv10);
1154
1155             /* Compute parameters for interactions between i and j atoms */
1156             qq10             = _mm256_mul_ps(iq1,jq0);
1157
1158             /* EWALD ELECTROSTATICS */
1159             
1160             /* Analytical PME correction */
1161             zeta2            = _mm256_mul_ps(beta2,rsq10);
1162             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1163             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1164             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1165             felec            = _mm256_mul_ps(qq10,felec);
1166             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1167             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1168             velec            = _mm256_sub_ps(rinv10,pmecorrV);
1169             velec            = _mm256_mul_ps(qq10,velec);
1170             
1171             d                = _mm256_sub_ps(r10,rswitch);
1172             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1173             d2               = _mm256_mul_ps(d,d);
1174             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1175
1176             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1177
1178             /* Evaluate switch function */
1179             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1180             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
1181             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1182
1183             fscal            = felec;
1184
1185             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1186
1187             /* Calculate temporary vectorial force */
1188             tx               = _mm256_mul_ps(fscal,dx10);
1189             ty               = _mm256_mul_ps(fscal,dy10);
1190             tz               = _mm256_mul_ps(fscal,dz10);
1191
1192             /* Update vectorial force */
1193             fix1             = _mm256_add_ps(fix1,tx);
1194             fiy1             = _mm256_add_ps(fiy1,ty);
1195             fiz1             = _mm256_add_ps(fiz1,tz);
1196
1197             fjx0             = _mm256_add_ps(fjx0,tx);
1198             fjy0             = _mm256_add_ps(fjy0,ty);
1199             fjz0             = _mm256_add_ps(fjz0,tz);
1200
1201             }
1202
1203             /**************************
1204              * CALCULATE INTERACTIONS *
1205              **************************/
1206
1207             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1208             {
1209
1210             r20              = _mm256_mul_ps(rsq20,rinv20);
1211
1212             /* Compute parameters for interactions between i and j atoms */
1213             qq20             = _mm256_mul_ps(iq2,jq0);
1214
1215             /* EWALD ELECTROSTATICS */
1216             
1217             /* Analytical PME correction */
1218             zeta2            = _mm256_mul_ps(beta2,rsq20);
1219             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1220             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1221             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1222             felec            = _mm256_mul_ps(qq20,felec);
1223             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1224             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1225             velec            = _mm256_sub_ps(rinv20,pmecorrV);
1226             velec            = _mm256_mul_ps(qq20,velec);
1227             
1228             d                = _mm256_sub_ps(r20,rswitch);
1229             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1230             d2               = _mm256_mul_ps(d,d);
1231             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1232
1233             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1234
1235             /* Evaluate switch function */
1236             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1237             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
1238             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1239
1240             fscal            = felec;
1241
1242             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1243
1244             /* Calculate temporary vectorial force */
1245             tx               = _mm256_mul_ps(fscal,dx20);
1246             ty               = _mm256_mul_ps(fscal,dy20);
1247             tz               = _mm256_mul_ps(fscal,dz20);
1248
1249             /* Update vectorial force */
1250             fix2             = _mm256_add_ps(fix2,tx);
1251             fiy2             = _mm256_add_ps(fiy2,ty);
1252             fiz2             = _mm256_add_ps(fiz2,tz);
1253
1254             fjx0             = _mm256_add_ps(fjx0,tx);
1255             fjy0             = _mm256_add_ps(fjy0,ty);
1256             fjz0             = _mm256_add_ps(fjz0,tz);
1257
1258             }
1259
1260             fjptrA             = f+j_coord_offsetA;
1261             fjptrB             = f+j_coord_offsetB;
1262             fjptrC             = f+j_coord_offsetC;
1263             fjptrD             = f+j_coord_offsetD;
1264             fjptrE             = f+j_coord_offsetE;
1265             fjptrF             = f+j_coord_offsetF;
1266             fjptrG             = f+j_coord_offsetG;
1267             fjptrH             = f+j_coord_offsetH;
1268
1269             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1270
1271             /* Inner loop uses 333 flops */
1272         }
1273
1274         if(jidx<j_index_end)
1275         {
1276
1277             /* Get j neighbor index, and coordinate index */
1278             jnrlistA         = jjnr[jidx];
1279             jnrlistB         = jjnr[jidx+1];
1280             jnrlistC         = jjnr[jidx+2];
1281             jnrlistD         = jjnr[jidx+3];
1282             jnrlistE         = jjnr[jidx+4];
1283             jnrlistF         = jjnr[jidx+5];
1284             jnrlistG         = jjnr[jidx+6];
1285             jnrlistH         = jjnr[jidx+7];
1286             /* Sign of each element will be negative for non-real atoms.
1287              * This mask will be 0xFFFFFFFF for dummy entries and 0x0 for real ones,
1288              * so use it as val = _mm_andnot_ps(mask,val) to clear dummy entries.
1289              */
1290             dummy_mask = gmx_mm256_set_m128(gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx+4)),_mm_setzero_si128())),
1291                                             gmx_mm_castsi128_ps(_mm_cmplt_epi32(_mm_loadu_si128((const __m128i *)(jjnr+jidx)),_mm_setzero_si128())));
1292                                             
1293             jnrA       = (jnrlistA>=0) ? jnrlistA : 0;
1294             jnrB       = (jnrlistB>=0) ? jnrlistB : 0;
1295             jnrC       = (jnrlistC>=0) ? jnrlistC : 0;
1296             jnrD       = (jnrlistD>=0) ? jnrlistD : 0;
1297             jnrE       = (jnrlistE>=0) ? jnrlistE : 0;
1298             jnrF       = (jnrlistF>=0) ? jnrlistF : 0;
1299             jnrG       = (jnrlistG>=0) ? jnrlistG : 0;
1300             jnrH       = (jnrlistH>=0) ? jnrlistH : 0;
1301             j_coord_offsetA  = DIM*jnrA;
1302             j_coord_offsetB  = DIM*jnrB;
1303             j_coord_offsetC  = DIM*jnrC;
1304             j_coord_offsetD  = DIM*jnrD;
1305             j_coord_offsetE  = DIM*jnrE;
1306             j_coord_offsetF  = DIM*jnrF;
1307             j_coord_offsetG  = DIM*jnrG;
1308             j_coord_offsetH  = DIM*jnrH;
1309
1310             /* load j atom coordinates */
1311             gmx_mm256_load_1rvec_8ptr_swizzle_ps(x+j_coord_offsetA,x+j_coord_offsetB,
1312                                                  x+j_coord_offsetC,x+j_coord_offsetD,
1313                                                  x+j_coord_offsetE,x+j_coord_offsetF,
1314                                                  x+j_coord_offsetG,x+j_coord_offsetH,
1315                                                  &jx0,&jy0,&jz0);
1316
1317             /* Calculate displacement vector */
1318             dx00             = _mm256_sub_ps(ix0,jx0);
1319             dy00             = _mm256_sub_ps(iy0,jy0);
1320             dz00             = _mm256_sub_ps(iz0,jz0);
1321             dx10             = _mm256_sub_ps(ix1,jx0);
1322             dy10             = _mm256_sub_ps(iy1,jy0);
1323             dz10             = _mm256_sub_ps(iz1,jz0);
1324             dx20             = _mm256_sub_ps(ix2,jx0);
1325             dy20             = _mm256_sub_ps(iy2,jy0);
1326             dz20             = _mm256_sub_ps(iz2,jz0);
1327
1328             /* Calculate squared distance and things based on it */
1329             rsq00            = gmx_mm256_calc_rsq_ps(dx00,dy00,dz00);
1330             rsq10            = gmx_mm256_calc_rsq_ps(dx10,dy10,dz10);
1331             rsq20            = gmx_mm256_calc_rsq_ps(dx20,dy20,dz20);
1332
1333             rinv00           = gmx_mm256_invsqrt_ps(rsq00);
1334             rinv10           = gmx_mm256_invsqrt_ps(rsq10);
1335             rinv20           = gmx_mm256_invsqrt_ps(rsq20);
1336
1337             rinvsq00         = _mm256_mul_ps(rinv00,rinv00);
1338             rinvsq10         = _mm256_mul_ps(rinv10,rinv10);
1339             rinvsq20         = _mm256_mul_ps(rinv20,rinv20);
1340
1341             /* Load parameters for j particles */
1342             jq0              = gmx_mm256_load_8real_swizzle_ps(charge+jnrA+0,charge+jnrB+0,
1343                                                                  charge+jnrC+0,charge+jnrD+0,
1344                                                                  charge+jnrE+0,charge+jnrF+0,
1345                                                                  charge+jnrG+0,charge+jnrH+0);
1346             vdwjidx0A        = 2*vdwtype[jnrA+0];
1347             vdwjidx0B        = 2*vdwtype[jnrB+0];
1348             vdwjidx0C        = 2*vdwtype[jnrC+0];
1349             vdwjidx0D        = 2*vdwtype[jnrD+0];
1350             vdwjidx0E        = 2*vdwtype[jnrE+0];
1351             vdwjidx0F        = 2*vdwtype[jnrF+0];
1352             vdwjidx0G        = 2*vdwtype[jnrG+0];
1353             vdwjidx0H        = 2*vdwtype[jnrH+0];
1354
1355             fjx0             = _mm256_setzero_ps();
1356             fjy0             = _mm256_setzero_ps();
1357             fjz0             = _mm256_setzero_ps();
1358
1359             /**************************
1360              * CALCULATE INTERACTIONS *
1361              **************************/
1362
1363             if (gmx_mm256_any_lt(rsq00,rcutoff2))
1364             {
1365
1366             r00              = _mm256_mul_ps(rsq00,rinv00);
1367             r00              = _mm256_andnot_ps(dummy_mask,r00);
1368
1369             /* Compute parameters for interactions between i and j atoms */
1370             qq00             = _mm256_mul_ps(iq0,jq0);
1371             gmx_mm256_load_8pair_swizzle_ps(vdwioffsetptr0+vdwjidx0A,
1372                                             vdwioffsetptr0+vdwjidx0B,
1373                                             vdwioffsetptr0+vdwjidx0C,
1374                                             vdwioffsetptr0+vdwjidx0D,
1375                                             vdwioffsetptr0+vdwjidx0E,
1376                                             vdwioffsetptr0+vdwjidx0F,
1377                                             vdwioffsetptr0+vdwjidx0G,
1378                                             vdwioffsetptr0+vdwjidx0H,
1379                                             &c6_00,&c12_00);
1380
1381             /* EWALD ELECTROSTATICS */
1382             
1383             /* Analytical PME correction */
1384             zeta2            = _mm256_mul_ps(beta2,rsq00);
1385             rinv3            = _mm256_mul_ps(rinvsq00,rinv00);
1386             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1387             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1388             felec            = _mm256_mul_ps(qq00,felec);
1389             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1390             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1391             velec            = _mm256_sub_ps(rinv00,pmecorrV);
1392             velec            = _mm256_mul_ps(qq00,velec);
1393             
1394             /* LENNARD-JONES DISPERSION/REPULSION */
1395
1396             rinvsix          = _mm256_mul_ps(_mm256_mul_ps(rinvsq00,rinvsq00),rinvsq00);
1397             vvdw6            = _mm256_mul_ps(c6_00,rinvsix);
1398             vvdw12           = _mm256_mul_ps(c12_00,_mm256_mul_ps(rinvsix,rinvsix));
1399             vvdw             = _mm256_sub_ps( _mm256_mul_ps(vvdw12,one_twelfth) , _mm256_mul_ps(vvdw6,one_sixth) );
1400             fvdw             = _mm256_mul_ps(_mm256_sub_ps(vvdw12,vvdw6),rinvsq00);
1401
1402             d                = _mm256_sub_ps(r00,rswitch);
1403             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1404             d2               = _mm256_mul_ps(d,d);
1405             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1406
1407             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1408
1409             /* Evaluate switch function */
1410             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1411             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(velec,dsw)) );
1412             fvdw             = _mm256_sub_ps( _mm256_mul_ps(fvdw,sw) , _mm256_mul_ps(rinv00,_mm256_mul_ps(vvdw,dsw)) );
1413             cutoff_mask      = _mm256_cmp_ps(rsq00,rcutoff2,_CMP_LT_OQ);
1414
1415             fscal            = _mm256_add_ps(felec,fvdw);
1416
1417             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1418
1419             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1420
1421             /* Calculate temporary vectorial force */
1422             tx               = _mm256_mul_ps(fscal,dx00);
1423             ty               = _mm256_mul_ps(fscal,dy00);
1424             tz               = _mm256_mul_ps(fscal,dz00);
1425
1426             /* Update vectorial force */
1427             fix0             = _mm256_add_ps(fix0,tx);
1428             fiy0             = _mm256_add_ps(fiy0,ty);
1429             fiz0             = _mm256_add_ps(fiz0,tz);
1430
1431             fjx0             = _mm256_add_ps(fjx0,tx);
1432             fjy0             = _mm256_add_ps(fjy0,ty);
1433             fjz0             = _mm256_add_ps(fjz0,tz);
1434
1435             }
1436
1437             /**************************
1438              * CALCULATE INTERACTIONS *
1439              **************************/
1440
1441             if (gmx_mm256_any_lt(rsq10,rcutoff2))
1442             {
1443
1444             r10              = _mm256_mul_ps(rsq10,rinv10);
1445             r10              = _mm256_andnot_ps(dummy_mask,r10);
1446
1447             /* Compute parameters for interactions between i and j atoms */
1448             qq10             = _mm256_mul_ps(iq1,jq0);
1449
1450             /* EWALD ELECTROSTATICS */
1451             
1452             /* Analytical PME correction */
1453             zeta2            = _mm256_mul_ps(beta2,rsq10);
1454             rinv3            = _mm256_mul_ps(rinvsq10,rinv10);
1455             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1456             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1457             felec            = _mm256_mul_ps(qq10,felec);
1458             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1459             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1460             velec            = _mm256_sub_ps(rinv10,pmecorrV);
1461             velec            = _mm256_mul_ps(qq10,velec);
1462             
1463             d                = _mm256_sub_ps(r10,rswitch);
1464             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1465             d2               = _mm256_mul_ps(d,d);
1466             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1467
1468             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1469
1470             /* Evaluate switch function */
1471             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1472             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv10,_mm256_mul_ps(velec,dsw)) );
1473             cutoff_mask      = _mm256_cmp_ps(rsq10,rcutoff2,_CMP_LT_OQ);
1474
1475             fscal            = felec;
1476
1477             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1478
1479             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1480
1481             /* Calculate temporary vectorial force */
1482             tx               = _mm256_mul_ps(fscal,dx10);
1483             ty               = _mm256_mul_ps(fscal,dy10);
1484             tz               = _mm256_mul_ps(fscal,dz10);
1485
1486             /* Update vectorial force */
1487             fix1             = _mm256_add_ps(fix1,tx);
1488             fiy1             = _mm256_add_ps(fiy1,ty);
1489             fiz1             = _mm256_add_ps(fiz1,tz);
1490
1491             fjx0             = _mm256_add_ps(fjx0,tx);
1492             fjy0             = _mm256_add_ps(fjy0,ty);
1493             fjz0             = _mm256_add_ps(fjz0,tz);
1494
1495             }
1496
1497             /**************************
1498              * CALCULATE INTERACTIONS *
1499              **************************/
1500
1501             if (gmx_mm256_any_lt(rsq20,rcutoff2))
1502             {
1503
1504             r20              = _mm256_mul_ps(rsq20,rinv20);
1505             r20              = _mm256_andnot_ps(dummy_mask,r20);
1506
1507             /* Compute parameters for interactions between i and j atoms */
1508             qq20             = _mm256_mul_ps(iq2,jq0);
1509
1510             /* EWALD ELECTROSTATICS */
1511             
1512             /* Analytical PME correction */
1513             zeta2            = _mm256_mul_ps(beta2,rsq20);
1514             rinv3            = _mm256_mul_ps(rinvsq20,rinv20);
1515             pmecorrF         = gmx_mm256_pmecorrF_ps(zeta2);
1516             felec            = _mm256_add_ps( _mm256_mul_ps(pmecorrF,beta3), rinv3);
1517             felec            = _mm256_mul_ps(qq20,felec);
1518             pmecorrV         = gmx_mm256_pmecorrV_ps(zeta2);
1519             pmecorrV         = _mm256_mul_ps(pmecorrV,beta);
1520             velec            = _mm256_sub_ps(rinv20,pmecorrV);
1521             velec            = _mm256_mul_ps(qq20,velec);
1522             
1523             d                = _mm256_sub_ps(r20,rswitch);
1524             d                = _mm256_max_ps(d,_mm256_setzero_ps());
1525             d2               = _mm256_mul_ps(d,d);
1526             sw               = _mm256_add_ps(one,_mm256_mul_ps(d2,_mm256_mul_ps(d,_mm256_add_ps(swV3,_mm256_mul_ps(d,_mm256_add_ps(swV4,_mm256_mul_ps(d,swV5)))))));
1527
1528             dsw              = _mm256_mul_ps(d2,_mm256_add_ps(swF2,_mm256_mul_ps(d,_mm256_add_ps(swF3,_mm256_mul_ps(d,swF4)))));
1529
1530             /* Evaluate switch function */
1531             /* fscal'=f'/r=-(v*sw)'/r=-(v'*sw+v*dsw)/r=-v'*sw/r-v*dsw/r=fscal*sw-v*dsw/r */
1532             felec            = _mm256_sub_ps( _mm256_mul_ps(felec,sw) , _mm256_mul_ps(rinv20,_mm256_mul_ps(velec,dsw)) );
1533             cutoff_mask      = _mm256_cmp_ps(rsq20,rcutoff2,_CMP_LT_OQ);
1534
1535             fscal            = felec;
1536
1537             fscal            = _mm256_and_ps(fscal,cutoff_mask);
1538
1539             fscal            = _mm256_andnot_ps(dummy_mask,fscal);
1540
1541             /* Calculate temporary vectorial force */
1542             tx               = _mm256_mul_ps(fscal,dx20);
1543             ty               = _mm256_mul_ps(fscal,dy20);
1544             tz               = _mm256_mul_ps(fscal,dz20);
1545
1546             /* Update vectorial force */
1547             fix2             = _mm256_add_ps(fix2,tx);
1548             fiy2             = _mm256_add_ps(fiy2,ty);
1549             fiz2             = _mm256_add_ps(fiz2,tz);
1550
1551             fjx0             = _mm256_add_ps(fjx0,tx);
1552             fjy0             = _mm256_add_ps(fjy0,ty);
1553             fjz0             = _mm256_add_ps(fjz0,tz);
1554
1555             }
1556
1557             fjptrA             = (jnrlistA>=0) ? f+j_coord_offsetA : scratch;
1558             fjptrB             = (jnrlistB>=0) ? f+j_coord_offsetB : scratch;
1559             fjptrC             = (jnrlistC>=0) ? f+j_coord_offsetC : scratch;
1560             fjptrD             = (jnrlistD>=0) ? f+j_coord_offsetD : scratch;
1561             fjptrE             = (jnrlistE>=0) ? f+j_coord_offsetE : scratch;
1562             fjptrF             = (jnrlistF>=0) ? f+j_coord_offsetF : scratch;
1563             fjptrG             = (jnrlistG>=0) ? f+j_coord_offsetG : scratch;
1564             fjptrH             = (jnrlistH>=0) ? f+j_coord_offsetH : scratch;
1565
1566             gmx_mm256_decrement_1rvec_8ptr_swizzle_ps(fjptrA,fjptrB,fjptrC,fjptrD,fjptrE,fjptrF,fjptrG,fjptrH,fjx0,fjy0,fjz0);
1567
1568             /* Inner loop uses 336 flops */
1569         }
1570
1571         /* End of innermost loop */
1572
1573         gmx_mm256_update_iforce_3atom_swizzle_ps(fix0,fiy0,fiz0,fix1,fiy1,fiz1,fix2,fiy2,fiz2,
1574                                                  f+i_coord_offset,fshift+i_shift_offset);
1575
1576         /* Increment number of inner iterations */
1577         inneriter                  += j_index_end - j_index_start;
1578
1579         /* Outer loop uses 18 flops */
1580     }
1581
1582     /* Increment number of outer iterations */
1583     outeriter        += nri;
1584
1585     /* Update outer/inner flops */
1586
1587     inc_nrnb(nrnb,eNR_NBKERNEL_ELEC_VDW_W3_F,outeriter*18 + inneriter*336);
1588 }